content
stringlengths
5
1.05M
from .pushservice import *
from copy import deepcopy from hashlib import md5 from misago.core import threadstore from .forms import get_permissions_forms def fake_post_data(target, data_dict): """ In order for form to don't fail submission, all permission fields need to receive values. This function populates data dict with default values for permissions, making form validation pass """ for form in get_permissions_forms(target): for field in form: if field.value() is True: data_dict[field.html_name] = 1 elif field.value() is False: data_dict[field.html_name] = 0 else: data_dict[field.html_name] = field.value() return data_dict def override_acl(user, new_acl): """overrides user permissions with specified ones""" final_cache = deepcopy(user.acl_cache) final_cache.update(new_acl) if user.is_authenticated: user._acl_cache = final_cache user.acl_key = md5(str(user.pk).encode()).hexdigest()[:8] user.save(update_fields=['acl_key']) threadstore.set('acl_%s' % user.acl_key, final_cache) else: threadstore.set('acl_%s' % user.acl_key, final_cache)
from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.db import models from django.test import TestCase, TransactionTestCase from django_dynamic_fixture import G from mock import patch, MagicMock, call from activatable_model.models import BaseActivatableModel from activatable_model.signals import model_activations_changed, model_activations_updated from activatable_model.validation import get_activatable_models, validate_activatable_models from activatable_model.tests.models import ( ActivatableModel, ActivatableModelWRel, Rel, ActivatableModelWNonDefaultField, ActivatableModelWRelAndCascade, ) class BaseMockActivationsSignalHanderTest(TestCase): """ Connects a mock to the model_activations_changed signal so that it can be easily tested. """ def setUp(self): super(BaseMockActivationsSignalHanderTest, self).setUp() self.mock_model_activations_changed_handler = MagicMock() model_activations_changed.connect(self.mock_model_activations_changed_handler) self.mock_model_activations_updated_handler = MagicMock() model_activations_updated.connect(self.mock_model_activations_updated_handler) def tearDown(self): super(BaseMockActivationsSignalHanderTest, self).tearDown() model_activations_changed.disconnect(self.mock_model_activations_changed_handler) class CascadeTest(TransactionTestCase): """ Tests that cascade deletes cant happen on an activatable test model. """ def test_no_cascade(self): rel = G(Rel) G(ActivatableModelWRel, rel_field=rel) with self.assertRaises(models.ProtectedError): rel.delete() def test_allowed_cascade(self): rel = G(Rel) rel_id = rel.id G(ActivatableModelWRelAndCascade, rel_field=rel) rel.delete() self.assertEqual(ActivatableModelWRelAndCascade.objects.filter(id=rel_id).count(), 0) class ManagerQuerySetTest(BaseMockActivationsSignalHanderTest): """ Tests custom functionality in the manager and queryset for activatable models. Tests it on models that use the default is_active field and models that define their own custom activatable field. """ def test_update_no_is_active(self): G(ActivatableModel, is_active=False) G(ActivatableModel, is_active=False) ActivatableModel.objects.update(char_field='hi') self.assertEquals(ActivatableModel.objects.filter(char_field='hi', is_active=False).count(), 2) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) def test_update_no_is_active_custom(self): G(ActivatableModelWNonDefaultField, active=False) G(ActivatableModelWNonDefaultField, active=False) ActivatableModelWNonDefaultField.objects.update(char_field='hi') self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(char_field='hi', active=False).count(), 2) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) def test_update_w_is_active(self): m1 = G(ActivatableModel, is_active=False) m2 = G(ActivatableModel, is_active=False) ActivatableModel.objects.filter(is_active=False).update(char_field='hi', is_active=True) self.assertEquals(ActivatableModel.objects.filter(char_field='hi', is_active=True).count(), 2) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3) call_args = self.mock_model_activations_changed_handler.call_args self.assertEquals(call_args[1]['is_active'], True) self.assertEquals(set(call_args[1]['instance_ids']), set([m1.id, m2.id])) self.assertEquals(call_args[1]['sender'], ActivatableModel) def test_update_w_is_active_custom(self): m1 = G(ActivatableModelWNonDefaultField, active=False) m2 = G(ActivatableModelWNonDefaultField, active=False) ActivatableModelWNonDefaultField.objects.update(char_field='hi', active=True) self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(char_field='hi', active=True).count(), 2) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3) call_args = self.mock_model_activations_changed_handler.call_args self.assertEquals(call_args[1]['is_active'], True) self.assertEquals(set(call_args[1]['instance_ids']), set([m1.id, m2.id])) self.assertEquals(call_args[1]['sender'], ActivatableModelWNonDefaultField) def test_activate(self): models = [ G(ActivatableModel, is_active=False), G(ActivatableModel, is_active=True), ] ActivatableModel.objects.activate() self.assertEquals(ActivatableModel.objects.filter(is_active=True).count(), 2) static_kwargs = { 'sender': ActivatableModel, 'signal': model_activations_changed, } self.mock_model_activations_changed_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[0].id], is_active=True, **static_kwargs), ]) static_kwargs['signal'] = model_activations_updated self.mock_model_activations_updated_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[0].id, models[1].id], is_active=True, **static_kwargs), ]) def test_activate_custom(self): models = [ G(ActivatableModelWNonDefaultField, active=False), G(ActivatableModelWNonDefaultField, active=True), ] ActivatableModelWNonDefaultField.objects.activate() self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=True).count(), 2) static_kwargs = { 'sender': ActivatableModelWNonDefaultField, 'signal': model_activations_changed, } self.mock_model_activations_changed_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[0].id], is_active=True, **static_kwargs), ]) static_kwargs['signal'] = model_activations_updated self.mock_model_activations_updated_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[0].id, models[1].id], is_active=True, **static_kwargs), ]) def test_deactivate(self): models = [ G(ActivatableModel, is_active=False), G(ActivatableModel, is_active=True), ] ActivatableModel.objects.deactivate() self.assertEquals(ActivatableModel.objects.filter(is_active=False).count(), 2) static_kwargs = { 'sender': ActivatableModel, 'signal': model_activations_changed, } self.mock_model_activations_changed_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[1].id], is_active=False, **static_kwargs), ]) static_kwargs['signal'] = model_activations_updated self.mock_model_activations_updated_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[0].id, models[1].id], is_active=False, **static_kwargs), ]) def test_deactivate_custom(self): models = [ G(ActivatableModelWNonDefaultField, active=False), G(ActivatableModelWNonDefaultField, active=True), ] ActivatableModelWNonDefaultField.objects.deactivate() self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=False).count(), 2) static_kwargs = { 'sender': ActivatableModelWNonDefaultField, 'signal': model_activations_changed, } self.mock_model_activations_changed_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[1].id], is_active=False, **static_kwargs), ]) static_kwargs['signal'] = model_activations_updated self.mock_model_activations_updated_handler.assert_has_calls([ call(instance_ids=[models[0].id], is_active=False, **static_kwargs), call(instance_ids=[models[1].id], is_active=True, **static_kwargs), call(instance_ids=[models[0].id, models[1].id], is_active=False, **static_kwargs), ]) def test_delete_no_force(self): G(ActivatableModel, is_active=False) G(ActivatableModel, is_active=True) ActivatableModel.objects.all().delete() self.assertEquals(ActivatableModel.objects.filter(is_active=False).count(), 2) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3) def test_delete_no_force_custom(self): G(ActivatableModelWNonDefaultField, active=False) G(ActivatableModelWNonDefaultField, active=True) ActivatableModelWNonDefaultField.objects.all().delete() self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=False).count(), 2) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3) def test_delete_w_force(self): G(ActivatableModel, is_active=False) G(ActivatableModel, is_active=True) ActivatableModel.objects.all().delete(force=True) self.assertFalse(ActivatableModel.objects.exists()) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) def test_delete_w_force_custom(self): G(ActivatableModelWNonDefaultField, active=False) G(ActivatableModelWNonDefaultField, active=True) ActivatableModelWNonDefaultField.objects.all().delete(force=True) self.assertFalse(ActivatableModelWNonDefaultField.objects.exists()) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) class SaveTest(BaseMockActivationsSignalHanderTest): """ Tests the custom save function in the BaseActivatableModel. """ def test_create(self): m = G(ActivatableModel, is_active=False) call_args = self.mock_model_activations_changed_handler.call_args self.assertEquals(call_args[1]['is_active'], False) self.assertEquals(call_args[1]['instance_ids'], [m.id]) self.assertEquals(call_args[1]['sender'], ActivatableModel) updated_call_args = self.mock_model_activations_updated_handler.call_args self.assertEquals(updated_call_args[1]['is_active'], False) self.assertEquals(updated_call_args[1]['instance_ids'], [m.id]) self.assertEquals(updated_call_args[1]['sender'], ActivatableModel) def test_save_not_changed(self): m = G(ActivatableModel, is_active=False) m.is_active = False m.save() self.assertEquals(self.mock_model_activations_changed_handler.call_count, 1) self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2) def test_save_changed(self): m = G(ActivatableModel, is_active=False) m.is_active = True m.save() # changed self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) call_args = self.mock_model_activations_changed_handler.call_args self.assertEquals(call_args[1]['is_active'], True) self.assertEquals(call_args[1]['instance_ids'], [m.id]) self.assertEquals(call_args[1]['sender'], ActivatableModel) # updated self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2) updated_call_args = self.mock_model_activations_updated_handler.call_args self.assertEquals(updated_call_args[1]['is_active'], True) self.assertEquals(updated_call_args[1]['instance_ids'], [m.id]) self.assertEquals(updated_call_args[1]['sender'], ActivatableModel) def test_save_changed_custom(self): m = G(ActivatableModelWNonDefaultField, active=False) m.active = True m.save() # changed self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) call_args = self.mock_model_activations_changed_handler.call_args self.assertEquals(call_args[1]['is_active'], True) self.assertEquals(call_args[1]['instance_ids'], [m.id]) self.assertEquals(call_args[1]['sender'], ActivatableModelWNonDefaultField) # updated self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2) updated_call_args = self.mock_model_activations_updated_handler.call_args self.assertEquals(updated_call_args[1]['is_active'], True) self.assertEquals(updated_call_args[1]['instance_ids'], [m.id]) self.assertEquals(updated_call_args[1]['sender'], ActivatableModelWNonDefaultField) class SingleDeleteTest(BaseMockActivationsSignalHanderTest): """ Tests calling delete on a single model that inherits BaseActivatableModel. """ def test_delete_no_force_no_active_changed(self): m = G(ActivatableModel, is_active=False) m.delete() m = ActivatableModel.objects.get(id=m.id) self.assertFalse(m.is_active) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 1) self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2) def test_delete_no_force_active_changed(self): m = G(ActivatableModel, is_active=True) m.delete() m = ActivatableModel.objects.get(id=m.id) self.assertFalse(m.is_active) self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2) self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2) def test_delete_force(self): m = G(ActivatableModel, is_active=False) m.delete(force=True) self.assertFalse(ActivatableModel.objects.exists()) class ValidateDbTest(TestCase): """ Tests that activatable models are validated properly upon pre_syncdb signal. """ def test_get_activatable_models(self): activatable_models = get_activatable_models() self.assertEquals( set( [ ActivatableModel, ActivatableModelWRel, ActivatableModelWRelAndCascade, ActivatableModelWNonDefaultField ] ), set(activatable_models) ) def test_all_valid_models(self): """ All models should validate fine. """ validate_activatable_models() @patch('activatable_model.validation.get_activatable_models') def test_activatable_field_is_not_boolean(self, mock_get_activatable_models): """ SET_NULL is a valid option for foreign keys in activatable models. """ # Make this an object and not an actual django model. This prevents it from always # being included when syncing the db. This is true for all other test models in this file. class NonBooleanModel(BaseActivatableModel): class Meta: abstract = True is_active = models.CharField() ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL) mock_get_activatable_models.return_value = [NonBooleanModel] with self.assertRaises(ValidationError): validate_activatable_models() @patch('activatable_model.validation.get_activatable_models') def test_activatable_field_is_not_defined(self, mock_get_activatable_models): """ SET_NULL is a valid option for foreign keys in activatable models. """ # Make this an object and not an actual django model. This prevents it from always # being included when syncing the db. This is true for all other test models in this file. class NoValidFieldModel(BaseActivatableModel): class Meta: abstract = True ACTIVATABLE_FIELD_NAME = 'active' is_active = models.BooleanField() ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL) mock_get_activatable_models.return_value = [NoValidFieldModel] with self.assertRaises(ValidationError): validate_activatable_models() @patch('activatable_model.validation.get_activatable_models') def test_foreign_key_is_null(self, mock_get_activatable_models): """ SET_NULL is a valid option for foreign keys in activatable models. """ # Make this an object and not an actual django model. This prevents it from always # being included when syncing the db. This is true for all other test models in this file. class CascadableModel(BaseActivatableModel): class Meta: abstract = True is_active = models.BooleanField(default=False) ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL) mock_get_activatable_models.return_value = [CascadableModel] validate_activatable_models() self.assertEquals(mock_get_activatable_models.call_count, 1) @patch('activatable_model.validation.get_activatable_models') def test_foreign_key_protect(self, mock_get_activatable_models): """ PROTECT is a valid option for foreign keys in activatable models. """ # Make this an object and not an actual django model. This prevents it from always # being included when syncing the db. This is true for all other test models in this file. class CascadableModel(BaseActivatableModel): class Meta: abstract = True is_active = models.BooleanField(default=False) ctype = models.ForeignKey(ContentType, null=True, on_delete=models.PROTECT) mock_get_activatable_models.return_value = [CascadableModel] validate_activatable_models() self.assertEquals(mock_get_activatable_models.call_count, 1) @patch('activatable_model.validation.get_activatable_models') def test_foreign_key_cascade(self, mock_get_activatable_models): """ The default cascade behavior is invalid for activatable models. """ class CascadableModel(BaseActivatableModel): class Meta: abstract = True is_active = models.BooleanField(default=False) ctype = models.ForeignKey(ContentType, on_delete=models.CASCADE) mock_get_activatable_models.return_value = [CascadableModel] with self.assertRaises(ValidationError): validate_activatable_models() @patch('activatable_model.validation.get_activatable_models') def test_one_to_one_is_null(self, mock_get_activatable_models): """ SET_NULL is a valid option for foreign keys in activatable models. """ # Make this an object and not an actual django model. This prevents it from always # being included when syncing the db. This is true for all other test models in this file. class CascadableModel(BaseActivatableModel): class Meta: abstract = True is_active = models.BooleanField(default=False) ctype = models.OneToOneField(ContentType, null=True, on_delete=models.SET_NULL) mock_get_activatable_models.return_value = [CascadableModel] validate_activatable_models() self.assertEquals(mock_get_activatable_models.call_count, 1) @patch('activatable_model.validation.get_activatable_models') def test_one_to_one_protect(self, mock_get_activatable_models): """ PROTECT is a valid option for foreign keys in activatable models. """ # Make this an object and not an actual django model. This prevents it from always # being included when syncing the db. This is true for all other test models in this file. class CascadableModel(BaseActivatableModel): class Meta: abstract = True is_active = models.BooleanField(default=False) ctype = models.OneToOneField(ContentType, null=True, on_delete=models.PROTECT) mock_get_activatable_models.return_value = [CascadableModel] validate_activatable_models() self.assertEquals(mock_get_activatable_models.call_count, 1) @patch('activatable_model.validation.get_activatable_models') def test_one_to_one_cascade(self, mock_get_activatable_models): """ The default cascade behavior is invalid for activatable models. """ class CascadableModel(BaseActivatableModel): class Meta: abstract = True is_active = models.BooleanField(default=False) ctype = models.OneToOneField(ContentType, on_delete=models.CASCADE) mock_get_activatable_models.return_value = [CascadableModel] with self.assertRaises(ValidationError): validate_activatable_models() class ModelUpdatedSignalTest(BaseMockActivationsSignalHanderTest): """ Tests the updated signal test """ def test_no_activatable_field_updated(self): m = G(ActivatableModel, is_active=False) m_from_db = ActivatableModel.objects.get(id=m.id) m_from_db.char_field = 'foo' m_from_db.save() self.assertEquals(self.mock_model_activations_updated_handler.call_count, 1)
class Response(object): status = None reason = None url = None content_type = None charset = None text = None meta = None def __init__(self): pass
from bokeh.models.formatters import FuncTickFormatter def configure_plot(plot): plot.toolbar.logo = None plot.xaxis.axis_label_text_font_style = "normal" plot.yaxis.axis_label_text_font_style = "normal" plot.xaxis.major_label_text_font_size = "1rem" plot.yaxis.major_label_text_font_size = "1rem" plot.xaxis.axis_label_text_font_size = "1rem" plot.yaxis.axis_label_text_font_size = "1rem" plot.xaxis.formatter = FuncTickFormatter(code="return format_exponential(tick);") plot.yaxis.formatter = FuncTickFormatter(code="return format_exponential(tick);")
# # K-line, stock datas # Klang 提供了全局的股票数据,和获取股票数据的方法 # Kdatas 是提供了单只股票的数据和计算方法 # 类似通达信这样的公式,经过计算的数据有时候需要 计算整个周期,有时候是单个周期 # 因此需要封装成 python 类来解决 # 例如: if CLOSE > OPEN, 判断的是单个周期 # 例如: EVERY (CLOSE > OPEN,N) 这时候判断的是多个周期 import numpy as np import pandas kl = None def setstock(kl1): global kl kl = kl1 """ df2 = df[(df.date >= '2020-10-01') & (df.date <= '2021-01-01')] """ #获取股票数据 #baostock volume,TDX use vol # def getstockdata(name): if isinstance(kl.currentdf.get('df'),pandas.core.frame.DataFrame): return kl.currentdf['df'][name] return [] #做类似C/C[1]计算 #计算涨跌率 #C和C1都是列表,numpy计算的时候需要同样list size def match_size(*datas_list): size = min(len(data) for data in datas_list) if size == 0: return np.array([]),np.array([]) #[len-size:] new_list = [np.array(data[len(data)-size:]) for data in datas_list] return new_list #股票数据重新定义以方便计算 class KdataBase(object): def __init__(self,index=0,data=[]): self._data = data self.dtype = float self.index = index #返回最后一天的数据 @property def value(self): ret = self.data[-1] if self.dtype == float: return round(ret,3) def __bool__(self): return bool(self.data[-1]) @property def data(self): return self._data #C,index=0, #C[1],index=1 def __getitem__(self, index): #Klang Don't None 类型,因此处理判断位None if index is None: return None if index < 0: index = -(index + 1) n = self.__class__(index) if len(self.data) >index: nindex = len(self.data) - index n._data = self.data[:nindex] else: raise StopIteration return n # < def __lt__(self, other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 < d2 return kb else: #int float return self.value < other # > def __gt__(self, other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 > d2 return kb else: #int float return self.value > other # == def __eq__(self, other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 == d2 return kb else: #int float return self.value == other # != def __ne__(self, other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 != d2 return kb else: #int float return self.value != other # >= def __ge__(self, other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 >= d2 return kb else: #int float return self.value >= other # <= def __le__(self, other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 <= d2 return kb else: #int float return self.value <= other # + def __add__(self,other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 + d2 return kb else: #int float return self.value + other # - def __sub__(self,other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 - d2 return kb else: #int float return self.value - other # - def __rsub__(self,other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d2 - d1 return kb else: #int float return other - self.value # * def __mul__(self,other): if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 * d2 return kb else: #int float return round(self.value * other,3) # / def __truediv__(self, other): #s1 , s2 = match_size(self.data,other.data) if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d1 / d2 return kb else: return round(self.value / other,3) def __rtruediv__(self, other): #s1 , s2 = match_size(self.data,other.data) if isinstance(other,KdataBase): kb = KdataBase() d1,d2 = match_size(self.data,other.data) kb._data = d2 / d1 return kb else: return round(other / self.value,3) __div__ = __truediv__ __rdiv__ = __rtruediv__ def __len__(self): return len(self.data) def __repr__(self): return str(self.value) class Kdatas(KdataBase): def __init__(self,index=0): self._data = [] self.currentindex = -1 #stock code index self.dfstart = -1 #stock start date self.dfend = -1 #stock end date self.index = index #C,C[1],C[2] #返回最后一天的数据 @property def value(self): return self.dtype(self.data[-1]) #比较currentindex值的目的是切换股票的时候刷新 @property def data(self): if len(self._data) == 0 or self.currentindex != kl.currentindex\ or self.dfstart != kl.dfstart\ or self.dfend != kl.dfend: #reload self.currentindex = kl.currentindex self.dfstart = kl.dfstart self.dfend = kl.dfend d = getstockdata(self.name).astype(self.dtype) index = len(d) - self.index if index > 0: self._data = d[:index] else : self._data = [] return self._data # create open high low close volume datetime # 建立全局的 o,O,OPEN,等关键词 for name in ["open", "high", "low", "close", "volume", 'vol','amount','datetime']: dtype = float if name != "datetime" else np.str_ cls = type("{}Kdatas".format(name.capitalize()), (Kdatas, ), {"name": name, "dtype": dtype}) obj = cls() for var in [name[0], name[0].upper(), name.upper()]: globals()[var] = obj
#----------------- Libraries -------------------# import os import sys from tqdm import tqdm import numpy as np import pandas as pd from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split from Preprocessing import Preprocessing def kfold_decompose(data, kfold_n): """ This function uses kfold to split the data. Args: data (list): The data to split kfold_n (int): number of fragments to be split Returns: list[dict]: a list of the split datasets """ X = np.array(data) kf = KFold(n_splits=kfold_n, random_state=2, shuffle=True) data_output = [] for train_index, test_index in kf.split(X): X_train, X_test = X[train_index], X[test_index] data_output.append({'train': X_train, 'test': X_test}) return data_output def create_folders(data_label_true, data_label_fake, kfold_n, data_path): """ This function fragments the data and creates the repositories to store it. Args: data_label_true (list[dict]): true data text data_label_fake (list[dict]): fake data text kfold_n (int): number of data splits with kfold """ # cwd = os.path.dirname(os.path.abspath(__file__)) # data_path = os.path.join(cwd, os.pardir, 'data') training = os.path.join(data_path, 'training') os.mkdir(training) prp = Preprocessing() for i in tqdm(range(kfold_n)): dataset_path = os.path.join(training, 'dataset_' + str(i+1)) os.mkdir(dataset_path) os.mkdir(os.path.join(dataset_path, 'test')) os.mkdir(os.path.join(dataset_path, 'train')) os.mkdir(os.path.join(dataset_path, 'train', 'vc')) pd.DataFrame(data_label_true[i]['test']).to_excel( os.path.join(dataset_path, 'test', 'True.xlsx'), index=False) pd.DataFrame(data_label_fake[i]['test']).to_excel( os.path.join(dataset_path, 'test', 'Fake.xlsx'), index=False) X_train, X_val = train_test_split( data_label_true[i]['train'], test_size=0.20, random_state=1) pd.DataFrame(prp.preprocess(X_train)).to_excel( os.path.join(dataset_path, 'train', 'True.xlsx'), index=False) pd.DataFrame(X_val).to_excel(os.path.join( dataset_path, 'train', 'vc', 'True.xlsx'), index=False) X_train, X_val = train_test_split( data_label_fake[i]['train'], test_size=0.20, random_state=1) pd.DataFrame(prp.preprocess(X_train)).to_excel( os.path.join(dataset_path, 'train', 'Fake.xlsx'), index=False) pd.DataFrame(X_val).to_excel(os.path.join( dataset_path, 'train', 'vc', 'Fake.xlsx'), index=False) def main(true_set, fake_set, kfold_n, data_path): """ This function takes the text dataset of true and fake news and splits it with kfolds and creates the repositories for it. Args: true_set (numpy): list of text of true label dataset fake_set (numpy): list of text of fake label dataset kfold_n (int, optional): kfold stplit. Defaults to 5. """ data_label_true = kfold_decompose(true_set, kfold_n) data_label_fake = kfold_decompose(fake_set, kfold_n) create_folders(data_label_true, data_label_fake, kfold_n, data_path)
""" Custom validator that validates the format needed for PBRG to get the record date from a folder name. """ import re import string import wx DAY_NUMBER = 0 MONTH_NAME = 1 YEAR_NUMBER = 2 class DateValidator (wx.Validator): """ Validates input date has the next format: DD MONTH_NAME YYYY """ def __init__(self, flag=None, pyVar=None): wx.Validator.__init__(self) self.flag = flag self.Bind(wx.EVT_CHAR, self.on_char) self.day_pattern = r"[0-31]{1}" self.month_pattern = r"[A-Z]{4,11}" self.year_pattern = r"[0-9]{4}" def Clone(self): """ Every validator must have this method. """ return DateValidator(self.flag) def Validate(self, win): """ Validates the format of the date. """ txt_ctrl = self.GetWindow() data = txt_ctrl.GetValue() # Using re mod. if self.flag == DAY_NUMBER: if not re.search(self.day_pattern, data): return False elif self.flag == MONTH_NAME: if not re.search(self.month_pattern, data): return False elif self.flag == YEAR_NUMBER: if not re.search(self.year_pattern, data): return False return True def on_char(self, event): """ Called every time a char is typed in the text ctrl. """ key = event.GetKeyCode() data_len = len(self.GetWindow().GetValue()) if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255: event.Skip() return if self.flag == DAY_NUMBER and chr(key) in string.digits and data_len <= 1: event.Skip() return if self.flag == MONTH_NAME and chr(key) in string.ascii_uppercase and data_len <= 9: event.Skip() return if self.flag== YEAR_NUMBER and chr(key) in string.digits and data_len <= 3: event.Skip() return return def TransferToWindow(self): """ Called when the value associated with the validator must be transferred to the window. """ return True def TransferFromWindow(self): """ Called when the value in the window must be transferred to the validator. """ return True
import time from netmiko.no_enable import NoEnable from netmiko.no_config import NoConfig from netmiko.cisco.cisco_ios import CiscoIosBase class KeymileSSH(NoEnable, NoConfig, CiscoIosBase): def __init__(self, **kwargs): kwargs.setdefault("default_enter", "\r\n") return super().__init__(**kwargs) def session_preparation(self): """Prepare the session after the connection has been established.""" self._test_channel_read(pattern=r">") self.set_base_prompt() time.sleep(0.3 * self.global_delay_factor) self.clear_buffer() def disable_paging(self, *args, **kwargs): """Keymile does not use paging.""" return "" def strip_prompt(self, a_string): """Remove appending empty line and prompt from output""" self._write_session_log(a_string) a_string = a_string[:-1] return super().strip_prompt(a_string=a_string) def set_base_prompt(self, pri_prompt_terminator=">", **kwargs): """set prompt termination to >""" return super().set_base_prompt(pri_prompt_terminator=pri_prompt_terminator)
#!/usr/bin/env python import sys import requests import base64 from requests.auth import HTTPBasicAuth import json import configparser account_data = {} pc_data = {} #verify_git_creds verifies the given got credentials have access to the repo def verify_git_creds(repo, owner, username, password): git_repo_endpoint = "https://api.github.com/repos/"+owner+"/"+repo response = requests.get(git_repo_endpoint, auth=HTTPBasicAuth(username, password)) if response.status_code == 200: return True else: return False #verify_pc_creds verifies the given pc credentials def verify_pc_creds(pc_ip, pc_port, pc_username, pc_password): pc_endpoint = "https://"+pc_ip+":"+pc_port+"/api/nutanix/v3/projects/list" payload = {} headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} response = requests.post(pc_endpoint, auth=HTTPBasicAuth(pc_username, pc_password), data=json.dumps(payload), headers=headers, verify=False) if response.status_code == 200: return True else: return False #get_content gets the blueprint of previous versions def get_content_git(blueprint, project, account_data, ref="master"): owner = account_data['owner'] repo = account_data['repository'] username = account_data['username'] password = account_data['password'] get_content_endpoint = "https://api.github.com/repos/"+owner+"/"+repo+"/contents/"+project+"/blueprints/"+blueprint+".json" params = {"ref": ref} response = requests.get(get_content_endpoint, auth=HTTPBasicAuth(username, password), params=params) respData = json.loads(response.content) return respData #get_git_file_list gets the blueprint of previous versions def get_git_file_list(blueprint, project, account_data, ref="master"): owner = account_data['owner'] repo = account_data['repository'] username = account_data['username'] password = account_data['password'] get_content_endpoint = "https://api.github.com/repos/"+owner+"/"+repo+"/contents/"+project+"/blueprints/" params = {"ref": ref} response = requests.get(get_content_endpoint, auth=HTTPBasicAuth(username, password), params=params) respData = json.loads(response.content) return respData #create_blueprint creates blueprint and returns blueprint sha and commit sha def create_blueprint(blueprint, project, blueprint_json, account_data): owner = account_data['owner'] repo = account_data['repository'] username = account_data['username'] password = account_data['password'] create_blueprint_endpoint = "https://api.github.com/repos/"+owner+"/"+repo+"/contents/"+project+"/blueprints/"+blueprint+".json" payload = {"message": "creates blueprint "+ blueprint, "content": base64.b64encode(blueprint_json)} response = requests.put(create_blueprint_endpoint, auth=HTTPBasicAuth(username, password), data=json.dumps(payload)) respData = json.loads(response.content) return respData['commit']['sha'] #update_blueprint updates blueprint and returns blueprint sha and commit sha def update_blueprint(blueprint, project, blueprint_json, prev_bp_sha, account_data): owner = account_data['owner'] repo = account_data['repository'] username = account_data['username'] password = account_data['password'] update_blueprint_endpoint = "https://api.github.com/repos/"+owner+"/"+repo+"/contents/"+project+"/blueprints/"+blueprint+".json" payload = {"message": "updates blueprint "+ blueprint, "content": base64.b64encode(blueprint_json), "sha" : prev_bp_sha} response = requests.put(update_blueprint_endpoint, auth=HTTPBasicAuth(username, password), data=json.dumps(payload)) respData = json.loads(response.content) return respData['commit']['sha'] def get_bp_list(pc_data, length, offset): ip = pc_data["ip"] port = pc_data["port"] username = pc_data["username"] password = pc_data["password"] get_bp_url = "https://"+ip+":"+port+"/api/nutanix/v3/blueprints/list" payload = {'length': length, 'offset': offset, 'filter': "state!=DELETED"} headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} response = requests.post(get_bp_url, auth=HTTPBasicAuth(username, password), data=json.dumps(payload), headers=headers, verify=False) respData = json.loads(response.content) return respData def get_bp_list_in_project(pc_data): ip = pc_data["ip"] port = pc_data["port"] username = pc_data["username"] password = pc_data["password"] projectList = pc_data["project_list"] projectBlueprintList = [] length = 20 count = 0 offset = 0 blueprintCount = get_bp_list(pc_data, length, offset)["metadata"]["total_matches"] while True: blueprintList = get_bp_list(pc_data, length, offset) for bp in blueprintList["entities"]: if bp["metadata"]["project_reference"]["name"] in projectList: projectBlueprintList.append(bp) offset += length count += length if (count >= blueprintCount): break return projectBlueprintList def get_bp_names(bp_list): blueprint_names=[] for bp in bp_list: blueprint_names.append({ "name" : bp["metadata"]["name"], "uuid" : bp["metadata"]["uuid"], "project" : bp["metadata"]["project_reference"]["name"] }) return blueprint_names def get_content_calm(uuid,pc_data): ip = pc_data["ip"] port = pc_data["port"] username = pc_data["username"] password = pc_data["password"] get_bp_url = "https://"+ip+":"+port+"/api/nutanix/v3/blueprints/{}".format(uuid) headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} response = requests.get(get_bp_url, auth=HTTPBasicAuth(username, password), headers=headers, verify=False) respData = response.content return respData def git_update(blueprint_names,account_data,pc_data): for bp in blueprint_names: project = bp["project"] old_bp_sha = "" git_file_list = get_git_file_list(bp["name"], project, account_data, ref="master") if "message" in git_file_list and git_file_list["message"] == "Not Found": print "[WARNING] Project directory {} not found in the repository.".format(bp["project"]) sys.exit(1) print "[INFO] Fetching BP {} details.".format(bp["name"]) blueprint_json = get_content_calm(bp["uuid"], pc_data) for file in git_file_list: if file["name"] == bp["name"]+".json": old_bp_sha = file["sha"] break if old_bp_sha == "": print "[INFO] Creating BP {}.".format(bp["name"]) commit_sha = create_blueprint(bp["name"], project, blueprint_json, account_data) else: print "[INFO] Updating BP {}.".format(bp["name"]) commit_sha = update_blueprint(bp["name"], project, blueprint_json, old_bp_sha, account_data) def get_help(): print """config.ini file not found or missing some config parameters: [calm] pc_ip = <pc_ip> pc_port = <pc_port> username = <pc_username> password = <pc_password> project_list = <project1,project2> [git] owner = <git_owner> repository = <git_repository> username = <git_username> password = <git_password> """ if __name__ == "__main__": config = configparser.ConfigParser() config.read('config.ini') if 'calm' not in config or 'git' not in config: print "[ERROR] Failed to parse calm/git config in 'config.ini'" get_help() sys.exit(1) try: account_data["repository"] = config["git"]["repository"] account_data["owner"] = config["git"]["owner"] account_data["username"] = config["git"]["username"] account_data["password"] = config["git"]["password"] except KeyError: print "[ERROR] Missing git config 'repository', 'owner', 'username' & 'password'." get_help() sys.exit(1) except: print "[ERROR] Error while loading config file" try: pc_data["ip"] = config["calm"]["pc_ip"] pc_data["port"] = config["calm"]["pc_port"] pc_data["username"] = config["calm"]["username"] pc_data["password"] = config["calm"]["password"] pc_data["project_list"] = config["calm"]["project_list"] except KeyError: print "[ERROR] Missing pc config 'pc_ip', 'pc_port', 'username', 'password' & 'project_list'." get_help() sys.exit(1) except: print "[ERROR] Error while loading config file." git_status = verify_git_creds(account_data["repository"],account_data["owner"],account_data["username"],account_data["password"]) if git_status != True: print "[ERROR] Failed to authenticate git user." sys.exit(1) pc_status = verify_pc_creds(pc_data["ip"], pc_data["port"], pc_data["username"], pc_data["password"]) if pc_status != True: print "[ERROR] Failed to authenticate to PC." sys.exit(1) blueprint_list = get_bp_list_in_project(pc_data) blueprint_names = get_bp_names(blueprint_list) if len(blueprint_names) == 0: print "[INFO] No blueprints found in the project." sys.exit(0) git_update(blueprint_names,account_data,pc_data)
#output position of player in 25 seconds import time from mcpi.minecraft import Minecraft mc = Minecraft.create() pos1 = mc.player.getTilePos() x1=pos1.x y1=pos1.y z1=pos1.z #wait for 25 seconds time.sleep(25) #get position again pos2 =mc.player.getTilePos() x2 = pos2.x y2 = pos2.y z2 = pos2.z #calculate xDistance = x2 - x1 yDistance = y2 - y1 zDistance = z2 - z1 #put result to one variable post = (str(xDistance)+":"+str(yDistance)+":"+str(zDistance)) #output result mc.postToChat(post)
from unittest import TestCase import puzio import re class TestModule(TestCase): def test_timestamp(self): t = puzio.timestamp() patt = r'\d{8}T\d{6}' self.assertTrue(re.fullmatch(patt, t), f"timestamp {t} does not match pattern {patt}")
# coding: utf-8 """ ELEMENTS API The version of the OpenAPI document: 2 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from elements_sdk.configuration import Configuration class FSProperties(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'supports_directory_quotas': 'bool', 'supports_soft_quotas': 'bool', 'supports_user_quotas': 'bool', 'supports_group_quotas': 'bool', 'supports_xattrs': 'bool', 'supports_snapshots': 'bool', 'creating_directory_quota_destroys_content': 'bool', 'removing_directory_quota_destroys_content': 'bool' } attribute_map = { 'supports_directory_quotas': 'supports_directory_quotas', 'supports_soft_quotas': 'supports_soft_quotas', 'supports_user_quotas': 'supports_user_quotas', 'supports_group_quotas': 'supports_group_quotas', 'supports_xattrs': 'supports_xattrs', 'supports_snapshots': 'supports_snapshots', 'creating_directory_quota_destroys_content': 'creating_directory_quota_destroys_content', 'removing_directory_quota_destroys_content': 'removing_directory_quota_destroys_content' } def __init__(self, supports_directory_quotas=None, supports_soft_quotas=None, supports_user_quotas=None, supports_group_quotas=None, supports_xattrs=None, supports_snapshots=None, creating_directory_quota_destroys_content=None, removing_directory_quota_destroys_content=None, local_vars_configuration=None): # noqa: E501 """FSProperties - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._supports_directory_quotas = None self._supports_soft_quotas = None self._supports_user_quotas = None self._supports_group_quotas = None self._supports_xattrs = None self._supports_snapshots = None self._creating_directory_quota_destroys_content = None self._removing_directory_quota_destroys_content = None self.discriminator = None self.supports_directory_quotas = supports_directory_quotas self.supports_soft_quotas = supports_soft_quotas self.supports_user_quotas = supports_user_quotas self.supports_group_quotas = supports_group_quotas self.supports_xattrs = supports_xattrs self.supports_snapshots = supports_snapshots self.creating_directory_quota_destroys_content = creating_directory_quota_destroys_content self.removing_directory_quota_destroys_content = removing_directory_quota_destroys_content @property def supports_directory_quotas(self): """Gets the supports_directory_quotas of this FSProperties. # noqa: E501 :return: The supports_directory_quotas of this FSProperties. # noqa: E501 :rtype: bool """ return self._supports_directory_quotas @supports_directory_quotas.setter def supports_directory_quotas(self, supports_directory_quotas): """Sets the supports_directory_quotas of this FSProperties. :param supports_directory_quotas: The supports_directory_quotas of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and supports_directory_quotas is None: # noqa: E501 raise ValueError("Invalid value for `supports_directory_quotas`, must not be `None`") # noqa: E501 self._supports_directory_quotas = supports_directory_quotas @property def supports_soft_quotas(self): """Gets the supports_soft_quotas of this FSProperties. # noqa: E501 :return: The supports_soft_quotas of this FSProperties. # noqa: E501 :rtype: bool """ return self._supports_soft_quotas @supports_soft_quotas.setter def supports_soft_quotas(self, supports_soft_quotas): """Sets the supports_soft_quotas of this FSProperties. :param supports_soft_quotas: The supports_soft_quotas of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and supports_soft_quotas is None: # noqa: E501 raise ValueError("Invalid value for `supports_soft_quotas`, must not be `None`") # noqa: E501 self._supports_soft_quotas = supports_soft_quotas @property def supports_user_quotas(self): """Gets the supports_user_quotas of this FSProperties. # noqa: E501 :return: The supports_user_quotas of this FSProperties. # noqa: E501 :rtype: bool """ return self._supports_user_quotas @supports_user_quotas.setter def supports_user_quotas(self, supports_user_quotas): """Sets the supports_user_quotas of this FSProperties. :param supports_user_quotas: The supports_user_quotas of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and supports_user_quotas is None: # noqa: E501 raise ValueError("Invalid value for `supports_user_quotas`, must not be `None`") # noqa: E501 self._supports_user_quotas = supports_user_quotas @property def supports_group_quotas(self): """Gets the supports_group_quotas of this FSProperties. # noqa: E501 :return: The supports_group_quotas of this FSProperties. # noqa: E501 :rtype: bool """ return self._supports_group_quotas @supports_group_quotas.setter def supports_group_quotas(self, supports_group_quotas): """Sets the supports_group_quotas of this FSProperties. :param supports_group_quotas: The supports_group_quotas of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and supports_group_quotas is None: # noqa: E501 raise ValueError("Invalid value for `supports_group_quotas`, must not be `None`") # noqa: E501 self._supports_group_quotas = supports_group_quotas @property def supports_xattrs(self): """Gets the supports_xattrs of this FSProperties. # noqa: E501 :return: The supports_xattrs of this FSProperties. # noqa: E501 :rtype: bool """ return self._supports_xattrs @supports_xattrs.setter def supports_xattrs(self, supports_xattrs): """Sets the supports_xattrs of this FSProperties. :param supports_xattrs: The supports_xattrs of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and supports_xattrs is None: # noqa: E501 raise ValueError("Invalid value for `supports_xattrs`, must not be `None`") # noqa: E501 self._supports_xattrs = supports_xattrs @property def supports_snapshots(self): """Gets the supports_snapshots of this FSProperties. # noqa: E501 :return: The supports_snapshots of this FSProperties. # noqa: E501 :rtype: bool """ return self._supports_snapshots @supports_snapshots.setter def supports_snapshots(self, supports_snapshots): """Sets the supports_snapshots of this FSProperties. :param supports_snapshots: The supports_snapshots of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and supports_snapshots is None: # noqa: E501 raise ValueError("Invalid value for `supports_snapshots`, must not be `None`") # noqa: E501 self._supports_snapshots = supports_snapshots @property def creating_directory_quota_destroys_content(self): """Gets the creating_directory_quota_destroys_content of this FSProperties. # noqa: E501 :return: The creating_directory_quota_destroys_content of this FSProperties. # noqa: E501 :rtype: bool """ return self._creating_directory_quota_destroys_content @creating_directory_quota_destroys_content.setter def creating_directory_quota_destroys_content(self, creating_directory_quota_destroys_content): """Sets the creating_directory_quota_destroys_content of this FSProperties. :param creating_directory_quota_destroys_content: The creating_directory_quota_destroys_content of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and creating_directory_quota_destroys_content is None: # noqa: E501 raise ValueError("Invalid value for `creating_directory_quota_destroys_content`, must not be `None`") # noqa: E501 self._creating_directory_quota_destroys_content = creating_directory_quota_destroys_content @property def removing_directory_quota_destroys_content(self): """Gets the removing_directory_quota_destroys_content of this FSProperties. # noqa: E501 :return: The removing_directory_quota_destroys_content of this FSProperties. # noqa: E501 :rtype: bool """ return self._removing_directory_quota_destroys_content @removing_directory_quota_destroys_content.setter def removing_directory_quota_destroys_content(self, removing_directory_quota_destroys_content): """Sets the removing_directory_quota_destroys_content of this FSProperties. :param removing_directory_quota_destroys_content: The removing_directory_quota_destroys_content of this FSProperties. # noqa: E501 :type: bool """ if self.local_vars_configuration.client_side_validation and removing_directory_quota_destroys_content is None: # noqa: E501 raise ValueError("Invalid value for `removing_directory_quota_destroys_content`, must not be `None`") # noqa: E501 self._removing_directory_quota_destroys_content = removing_directory_quota_destroys_content def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, FSProperties): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, FSProperties): return True return self.to_dict() != other.to_dict()
''' Created on 16-10-2012 @author: Jacek Przemieniecki ''' from unifac.facade import Facade #@UnresolvedImport class UI(object): def __init__(self): self.facade = Facade() def parse_file(self, f): """ Opens the file from patch f and executes commands inside""" with open(f) as raw_file: line_number = 1 commands = raw_file.readlines() try: for line in commands: self.run_command(line) line_number += 1 except Exception: # TODO: Exception handling raise def run_command(self, line): """Available commands: ADD <smiles> <quantity> <smiles> - SMILES notation of compound added <quantity> - amount (in moles) of compound REMOVE <smiles> <quantity> <smiles> - SMILES notation of compound removed <quantity> - amount (in moles) of compound PRINT prints calculation results for current solution RESET resets the solution""" command = line.split()[0] parameters = line.split()[1:3] if command == "ADD": self.facade.add_molecule_smiles(parameters[0], float(parameters[1])) elif command == "REMOVE": self.facade.add_molecule_smiles(parameters[0], float(-parameters[1])) elif command == "PRINT": self.print_result(parameters[0]) elif command == "RESET": self.facade.reset_solution() elif command == "TEMPERATURE": self.facade.set_temperature(float(parameters[0])) else: raise Exception() # TODO: Exception handling CommandError("Unknown command: %s" % command) def print_result(self, iden): print("Activity coefficient for: ", iden, " ", self.facade.get_coeff(iden)) ui = UI() while 1: ui.run_command(input())
''' Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada Km acima do limite. ''' # Ler a velocidade do carro. vc = int(input('Informe a velocidade do carro: ')) print('--*-'* 20) print('A velocidade registrada é de {}Km/h.'.format(vc)) print('--*-'* 20) # Estrutura Condicional if/else if vc > 80: print('Você foi MULTADO! Pois excedeu o limite de 80Km/h. E agora deve R${:.2f} reais.'.format((vc - 80) * 7)) else: print('PARABÉNS!!!!') print('--$-'* 20)
""" OAuth/Gmail API-based email notifier :author: alanung and Matthew Farrugia-Roberts """ import base64 import pickle import os.path from email.mime.text import MIMEText from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request CLIENT_ID_PATH = 'gmail-credentials.json' ACCESS_TOKEN_PATH = 'gmail-token.pickle' SCOPES = ['https://www.googleapis.com/auth/gmail.send'] class GmailAPINotifier: """Class to send emails using the Gmail API and OAuth 2.0 Protocol. To use this class, you must obtain an OAuth 2.0 Client ID from the Google API Console (https://console.developers.google.com). See README for detailed instructions. """ def __init__(self, address): """ Initialise a GMail notifier object. Prompt the user to authenticate and provide mailing permissions if required. """ self.address = address self.creds = None # if there's an access token from previous authentication, load it if os.path.exists(ACCESS_TOKEN_PATH): with open(ACCESS_TOKEN_PATH, 'rb') as tokenfile: self.creds = pickle.load(tokenfile) # if the credentials are invalid or non-existent, prompt to authenticate if not self.creds or not self.creds.valid: if self.creds and self.creds.expired and self.creds.refresh_token: self.creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( CLIENT_ID_PATH, SCOPES) self.creds = flow.run_local_server() # save the credentials for the next run with open(ACCESS_TOKEN_PATH, 'wb') as tokenfile: pickle.dump(self.creds, tokenfile) self.service = build('gmail', 'v1', credentials=self.creds) def notify(self, subject, text): msg = MIMEText(text) msg['To'] = self.address msg['From'] = self.address msg['Subject'] = subject data = {'raw': base64.urlsafe_b64encode(msg.as_bytes()).decode()} self.service.users().messages().send(userId='me', body=data).execute() print("Email sent!")
# codes in this file are reproduced from https://github.com/GraphNAS/GraphNAS with some changes. from torch_geometric.nn import ( GATConv, GCNConv, ChebConv, SAGEConv, GatedGraphConv, ARMAConv, SGConv, ) import torch_geometric.nn import torch from torch import nn import torch.nn.functional as F class LinearConv(nn.Module): def __init__(self, in_channels, out_channels, bias=True): super(LinearConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.linear = torch.nn.Linear(in_channels, out_channels, bias) def forward(self, x, edge_index, edge_weight=None): return self.linear(x) def __repr__(self): return "{}({}, {})".format( self.__class__.__name__, self.in_channels, self.out_channels ) class ZeroConv(nn.Module): def forward(self, x, edge_index, edge_weight=None): out = torch.zeros_like(x) out.requires_grad = True return out def __repr__(self): return "ZeroConv()" class Identity(nn.Module): def forward(self, x, edge_index, edge_weight=None): return x def __repr__(self): return "Identity()" def gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> nn.Module: """ :param gnn_name: :param in_dim: :param out_dim: :param concat: for gat, concat multi-head output or not :return: GNN model """ if gnn_name == "gat_8": return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias) elif gnn_name == "gat_6": return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias) elif gnn_name == "gat_4": return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias) elif gnn_name == "gat_2": return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias) elif gnn_name in ["gat_1", "gat"]: return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias) elif gnn_name == "gcn": return GCNConv(in_dim, out_dim) elif gnn_name == "cheb": return ChebConv(in_dim, out_dim, K=2, bias=bias) elif gnn_name == "sage": return SAGEConv(in_dim, out_dim, bias=bias) elif gnn_name == "gated": return GatedGraphConv(in_dim, out_dim, bias=bias) elif gnn_name == "arma": return ARMAConv(in_dim, out_dim, bias=bias) elif gnn_name == "sg": return SGConv(in_dim, out_dim, bias=bias) elif gnn_name == "linear": return LinearConv(in_dim, out_dim, bias=bias) elif gnn_name == "zero": return ZeroConv() elif gnn_name == "identity": return Identity() elif hasattr(torch_geometric.nn, gnn_name): cls = getattr(torch_geometric.nn, gnn_name) assert isinstance(cls, type), "Only support modules, get %s" % (gnn_name) kwargs = { "in_channels": in_dim, "out_channels": out_dim, "concat": concat, "bias": bias, } kwargs = { key: kwargs[key] for key in cls.__init__.__code__.co_varnames if key in kwargs } return cls(**kwargs) raise KeyError("Cannot parse key %s" % (gnn_name)) import inspect import sys from torch_scatter import scatter_add import torch_scatter special_args = [ "edge_index", "edge_index_i", "edge_index_j", "size", "size_i", "size_j", ] __size_error_msg__ = ( "All tensors which should get mapped to the same source " "or target nodes must be of same size in dimension 0." ) is_python2 = sys.version_info[0] < 3 getargspec = inspect.getargspec if is_python2 else inspect.getfullargspec def scatter_(name, src, index, dim_size=None): r"""Aggregates all values from the :attr:`src` tensor at the indices specified in the :attr:`index` tensor along the first dimension. If multiple indices reference the same location, their contributions are aggregated according to :attr:`name` (either :obj:`"add"`, :obj:`"mean"` or :obj:`"max"`). Args: name (string): The aggregation to use (:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`). src (Tensor): The source tensor. index (LongTensor): The indices of elements to scatter. dim_size (int, optional): Automatically create output tensor with size :attr:`dim_size` in the first dimension. If set to :attr:`None`, a minimal sized output tensor is returned. (default: :obj:`None`) :rtype: :class:`Tensor` """ assert name in ["add", "mean", "max"] op = getattr(torch_scatter, "scatter_{}".format(name)) fill_value = -1e9 if name == "max" else 0 out = op(src, index, 0, None, dim_size) if isinstance(out, tuple): out = out[0] if name == "max": out[out == fill_value] = 0 return out from torch.nn import Parameter from torch_geometric.nn.inits import glorot, zeros from torch_geometric.utils import ( remove_self_loops, add_self_loops, add_remaining_self_loops, softmax, ) class MessagePassing(torch.nn.Module): def __init__(self, aggr="add", flow="source_to_target"): super(MessagePassing, self).__init__() self.aggr = aggr assert self.aggr in ["add", "mean", "max"] self.flow = flow assert self.flow in ["source_to_target", "target_to_source"] self.__message_args__ = getargspec(self.message)[0][1:] self.__special_args__ = [ (i, arg) for i, arg in enumerate(self.__message_args__) if arg in special_args ] self.__message_args__ = [ arg for arg in self.__message_args__ if arg not in special_args ] self.__update_args__ = getargspec(self.update)[0][2:] def propagate(self, edge_index, size=None, **kwargs): r"""The initial call to start propagating messages. Args: edge_index (Tensor): The indices of a general (sparse) assignment matrix with shape :obj:`[N, M]` (can be directed or undirected). size (list or tuple, optional): The size :obj:`[N, M]` of the assignment matrix. If set to :obj:`None`, the size is tried to get automatically inferrred. (default: :obj:`None`) **kwargs: Any additional data which is needed to construct messages and to update node embeddings. """ size = [None, None] if size is None else list(size) assert len(size) == 2 i, j = (0, 1) if self.flow == "target_to_source" else (1, 0) ij = {"_i": i, "_j": j} message_args = [] for arg in self.__message_args__: if arg[-2:] in ij.keys(): tmp = kwargs.get(arg[:-2], None) if tmp is None: # pragma: no cover message_args.append(tmp) else: idx = ij[arg[-2:]] if isinstance(tmp, tuple) or isinstance(tmp, list): assert len(tmp) == 2 if tmp[1 - idx] is not None: if size[1 - idx] is None: size[1 - idx] = tmp[1 - idx].size(0) if size[1 - idx] != tmp[1 - idx].size(0): raise ValueError(__size_error_msg__) tmp = tmp[idx] if size[idx] is None: size[idx] = tmp.size(0) if size[idx] != tmp.size(0): raise ValueError(__size_error_msg__) tmp = torch.index_select(tmp, 0, edge_index[idx]) message_args.append(tmp) else: message_args.append(kwargs.get(arg, None)) size[0] = size[1] if size[0] is None else size[0] size[1] = size[0] if size[1] is None else size[1] kwargs["edge_index"] = edge_index kwargs["size"] = size for (idx, arg) in self.__special_args__: if arg[-2:] in ij.keys(): message_args.insert(idx, kwargs[arg[:-2]][ij[arg[-2:]]]) else: message_args.insert(idx, kwargs[arg]) update_args = [kwargs[arg] for arg in self.__update_args__] out = self.message(*message_args) if self.aggr in ["add", "mean", "max"]: out = scatter_(self.aggr, out, edge_index[i], dim_size=size[i]) else: pass out = self.update(out, *update_args) return out def message(self, x_j): # pragma: no cover r"""Constructs messages in analogy to :math:`\phi_{\mathbf{\Theta}}` for each edge in :math:`(i,j) \in \mathcal{E}`. Can take any argument which was initially passed to :meth:`propagate`. In addition, features can be lifted to the source node :math:`i` and target node :math:`j` by appending :obj:`_i` or :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`.""" return x_j def update(self, aggr_out): # pragma: no cover r"""Updates node embeddings in analogy to :math:`\gamma_{\mathbf{\Theta}}` for each node :math:`i \in \mathcal{V}`. Takes in the output of aggregation as first argument and any argument which was initially passed to :meth:`propagate`.""" return aggr_out class GeoLayer(MessagePassing): def __init__( self, in_channels, out_channels, heads=1, concat=True, negative_slope=0.2, dropout=0, bias=True, att_type="gat", agg_type="sum", pool_dim=0, ): if agg_type in ["sum", "mlp"]: super(GeoLayer, self).__init__("add") elif agg_type in ["mean", "max"]: super(GeoLayer, self).__init__(agg_type) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.concat = concat self.negative_slope = negative_slope self.dropout = dropout self.att_type = att_type self.agg_type = agg_type # GCN weight self.gcn_weight = None self.weight = Parameter(torch.Tensor(in_channels, heads * out_channels)) self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels)) if bias and concat: self.bias = Parameter(torch.Tensor(heads * out_channels)) elif bias and not concat: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter("bias", None) if self.att_type in ["generalized_linear"]: self.general_att_layer = torch.nn.Linear(out_channels, 1, bias=False) if self.agg_type in ["mean", "max", "mlp"]: if pool_dim <= 0: pool_dim = 128 self.pool_dim = pool_dim if pool_dim != 0: self.pool_layer = torch.nn.ModuleList() self.pool_layer.append(torch.nn.Linear(self.out_channels, self.pool_dim)) self.pool_layer.append(torch.nn.Linear(self.pool_dim, self.out_channels)) else: pass self.reset_parameters() @staticmethod def norm(edge_index, num_nodes, edge_weight, improved=False, dtype=None): if edge_weight is None: edge_weight = torch.ones( (edge_index.size(1),), dtype=dtype, device=edge_index.device ) fill_value = 1 if not improved else 2 edge_index, edge_weight = add_remaining_self_loops( edge_index, edge_weight, fill_value, num_nodes ) row, col = edge_index deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes) deg_inv_sqrt = deg.pow(-0.5) deg_inv_sqrt[deg_inv_sqrt == float("inf")] = 0 return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col] def reset_parameters(self): glorot(self.weight) glorot(self.att) zeros(self.bias) if self.att_type in ["generalized_linear"]: glorot(self.general_att_layer.weight) if self.pool_dim != 0: for layer in self.pool_layer: glorot(layer.weight) zeros(layer.bias) def forward(self, x, edge_index): """""" edge_index, _ = remove_self_loops(edge_index) edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) # prepare x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels) # x [2708,2,4] weight [1433,8] return self.propagate(edge_index, x=x, num_nodes=x.size(0)) def message(self, x_i, x_j, edge_index, num_nodes): # x_i torch.Size([13264, 2, 4]) # x_j torch.Size([13264, 2, 4]) # edge_index torch.Size([2, 13264]) # num_nodes 2708 if self.att_type == "const": if self.training and self.dropout > 0: x_j = F.dropout(x_j, p=self.dropout, training=True) neighbor = x_j elif self.att_type == "gcn": if self.gcn_weight is None or self.gcn_weight.size(0) != x_j.size( 0 ): # 对于不同的图gcn_weight需要重新计算 _, norm = self.norm(edge_index, num_nodes, None) self.gcn_weight = norm neighbor = self.gcn_weight.view(-1, 1, 1) * x_j else: # Compute attention coefficients. alpha = self.apply_attention(edge_index, num_nodes, x_i, x_j) alpha = softmax(alpha, edge_index[0], num_nodes=num_nodes) # Sample attention coefficients stochastically. if self.training and self.dropout > 0: alpha = F.dropout(alpha, p=self.dropout, training=True) neighbor = x_j * alpha.view(-1, self.heads, 1) # pool_layer # (0): Linear(in_features=4, out_features=128, bias=True) # (1): Linear(in_features=128, out_features=4, bias=True) if self.pool_dim > 0: # neighbor torch.Size([13264, 2, 4]) for layer in self.pool_layer: neighbor = layer(neighbor) return neighbor def apply_attention(self, edge_index, num_nodes, x_i, x_j): if self.att_type == "gat": alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1) alpha = F.leaky_relu(alpha, self.negative_slope) elif self.att_type == "gat_sym": wl = self.att[:, :, : self.out_channels] # weight left wr = self.att[:, :, self.out_channels :] # weight right alpha = (x_i * wl).sum(dim=-1) + (x_j * wr).sum(dim=-1) alpha_2 = (x_j * wl).sum(dim=-1) + (x_i * wr).sum(dim=-1) alpha = F.leaky_relu(alpha, self.negative_slope) + F.leaky_relu( alpha_2, self.negative_slope ) elif self.att_type == "linear": wl = self.att[:, :, : self.out_channels] # weight left wr = self.att[:, :, self.out_channels :] # weight right al = x_j * wl ar = x_j * wr alpha = al.sum(dim=-1) + ar.sum(dim=-1) alpha = torch.tanh(alpha) elif self.att_type == "cos": wl = self.att[:, :, : self.out_channels] # weight left wr = self.att[:, :, self.out_channels :] # weight right alpha = x_i * wl * x_j * wr alpha = alpha.sum(dim=-1) elif self.att_type == "generalized_linear": wl = self.att[:, :, : self.out_channels] # weight left wr = self.att[:, :, self.out_channels :] # weight right al = x_i * wl ar = x_j * wr alpha = al + ar alpha = torch.tanh(alpha) alpha = self.general_att_layer(alpha) else: raise Exception("Wrong attention type:", self.att_type) return alpha def update(self, aggr_out):# torch.Size([2708, 2, 4]) if self.concat is True: aggr_out = aggr_out.view(-1, self.heads * self.out_channels) # torch.Size([2708, 8]) else: aggr_out = aggr_out.mean(dim=1) if self.bias is not None: aggr_out = aggr_out + self.bias return aggr_out def __repr__(self): return "{}({}, {}, heads={})".format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads ) def get_param_dict(self): params = {} key = f"{self.att_type}_{self.agg_type}_{self.in_channels}_{self.out_channels}_{self.heads}" weight_key = key + "_weight" att_key = key + "_att" agg_key = key + "_agg" bais_key = key + "_bais" params[weight_key] = self.weight params[att_key] = self.att params[bais_key] = self.bias if hasattr(self, "pool_layer"): params[agg_key] = self.pool_layer.state_dict() return params def load_param(self, params): key = f"{self.att_type}_{self.agg_type}_{self.in_channels}_{self.out_channels}_{self.heads}" weight_key = key + "_weight" att_key = key + "_att" agg_key = key + "_agg" bais_key = key + "_bais" if weight_key in params: self.weight = params[weight_key] if att_key in params: self.att = params[att_key] if bais_key in params: self.bias = params[bais_key] if agg_key in params and hasattr(self, "pool_layer"): self.pool_layer.load_state_dict(params[agg_key])
# -*- coding: utf-8 -*- """ Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research, and not-for-profit purposes, without fee and without a signed licensing agreement, is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643- 7201, [email protected], http://ipira.berkeley.edu/industry-info for commercial licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. """ """ Contact class that encapsulates friction cone and surface window computation. Authors: Brian Hou and Jeff Mahler """ from abc import ABCMeta, abstractmethod import itertools as it import logging import numpy as np from skimage.restoration import denoise_bilateral from autolab_core import RigidTransform from dexnet.constants import NO_CONTACT_DIST from dexnet.constants import WIN_DIST_LIM import matplotlib.pyplot as plt from sklearn.decomposition import PCA # class Contact(metaclass=ABCMeta): # for python3 class Contact: """ Abstract class for contact models. """ __metaclass__ = ABCMeta class Contact3D(Contact): """ 3D contact points. Attributes ---------- graspable : :obj:`GraspableObject3D` object to use to get contact information contact_point : 3x1 :obj:`numpy.ndarray` point of contact on the object in_direction : 3x1 :obj:`numpy.ndarray` direction along which contact was made normal : normalized 3x1 :obj:`numpy.ndarray` surface normal at the contact point """ def __init__(self, graspable, contact_point, in_direction=None): self.graspable_ = graspable self.point_ = contact_point # in world coordinates # cached attributes self.in_direction_ = in_direction # inward facing grasp axis self.friction_cone_ = None self.normal_ = None # outward facing normal self.surface_info_ = None self._compute_normal() @property def graspable(self): return self.graspable_ @property def point(self): return self.point_ @property def normal(self): return self.normal_ @normal.setter def normal(self, normal): self.normal_ = normal @property def in_direction(self): return self.in_direction_ def _compute_normal(self): """Compute outward facing normal at contact, according to in_direction. Indexes into the SDF grid coordinates to lookup the normal info. """ # tf to grid as_grid = self.graspable.sdf.transform_pt_obj_to_grid(self.point) on_surface, _ = self.graspable.sdf.on_surface(as_grid) if not on_surface: logging.debug('Contact point not on surface') return None # compute outward facing normal from SDF normal = self.graspable.sdf.surface_normal(as_grid) # flip normal to point outward if in_direction is defined if self.in_direction_ is not None and np.dot(self.in_direction_, normal) > 0: normal = -normal # transform to world frame normal = self.graspable.sdf.transform_pt_grid_to_obj(normal, direction=True) self.normal_ = normal def tangents(self, direction=None, align_axes=True, max_samples=1000): """Returns the direction vector and tangent vectors at a contact point. The direction vector defaults to the *inward-facing* normal vector at this contact. The direction and tangent vectors for a right handed coordinate frame. Parameters ---------- direction : 3x1 :obj:`numpy.ndarray` direction to find orthogonal plane for align_axes : bool whether or not to align the tangent plane to the object reference frame max_samples : int number of samples to use in discrete optimization for alignment of reference frame Returns ------- direction : normalized 3x1 :obj:`numpy.ndarray` direction to find orthogonal plane for t1 : normalized 3x1 :obj:`numpy.ndarray` first tangent vector, x axis t2 : normalized 3x1 :obj:`numpy.ndarray` second tangent vector, y axis """ # illegal contact, cannot return tangents if self.normal_ is None: return None, None, None # default to inward pointing normal if direction is None: direction = -self.normal_ # force direction to face inward if np.dot(self.normal_, direction) > 0: direction = -direction # transform to direction = direction.reshape((3, 1)) # make 2D for SVD # get orthogonal plane U, _, _ = np.linalg.svd(direction) # U[:, 1:] spans the tanget plane at the contact x, y = U[:, 1], U[:, 2] # make sure t1 and t2 obey right hand rule z_hat = np.cross(x, y) if z_hat.dot(direction) < 0: y = -y v = x w = y # redefine tangent x axis to automatically align with the object x axis if align_axes: max_ip = 0 max_theta = 0 target = np.array([1, 0, 0]) theta = 0 d_theta = 2 * np.pi / float(max_samples) for i in range(max_samples): v = np.cos(theta) * x + np.sin(theta) * y if v.dot(target) > max_ip: max_ip = v.dot(target) max_theta = theta theta = theta + d_theta v = np.cos(max_theta) * x + np.sin(max_theta) * y w = np.cross(direction.ravel(), v) return np.squeeze(direction), v, w def reference_frame(self, align_axes=True): """Returns the local reference frame of the contact. Z axis in the in direction (or surface normal if not specified) X and Y axes in the tangent plane to the direction Parameters ---------- align_axes : bool whether or not to align to the object axes Returns ------- :obj:`RigidTransform` rigid transformation from contact frame to object frame """ t_obj_contact = self.point rz, rx, ry = self.tangents(self.in_direction_, align_axes=align_axes) R_obj_contact = np.array([rx, ry, rz]).T T_contact_obj = RigidTransform(rotation=R_obj_contact, translation=t_obj_contact, from_frame='contact', to_frame='obj') return T_contact_obj def normal_force_magnitude(self): """ Returns the component of the force that the contact would apply along the normal direction. Returns ------- float magnitude of force along object surface normal """ normal_force_mag = 1.0 if self.in_direction_ is not None and self.normal_ is not None: in_normal = -self.normal_ in_direction_norm = self.in_direction_ / np.linalg.norm(self.in_direction_) normal_force_mag = np.dot(in_direction_norm, in_normal) return max(normal_force_mag, 0.0) def friction_cone(self, num_cone_faces=8, friction_coef=0.5): """ Computes the friction cone and normal for a contact point. Parameters ---------- num_cone_faces : int number of cone faces to use in discretization friction_coef : float coefficient of friction at contact point Returns ------- success : bool False when cone can't be computed cone_support : :obj:`numpy.ndarray` array where each column is a vector on the boundary of the cone normal : normalized 3x1 :obj:`numpy.ndarray` outward facing surface normal """ if self.friction_cone_ is not None and self.normal_ is not None: return True, self.friction_cone_, self.normal_ # get normal and tangents in_normal, t1, t2 = self.tangents() if in_normal is None: return False, self.friction_cone_, self.normal_ friction_cone_valid = True # check whether contact would slip, which is whether or not the tangent force is always # greater than the frictional force if self.in_direction_ is not None: in_direction_norm = self.in_direction_ / np.linalg.norm(self.in_direction_) normal_force_mag = self.normal_force_magnitude() tan_force_x = np.dot(in_direction_norm, t1) tan_force_y = np.dot(in_direction_norm, t2) tan_force_mag = np.sqrt(tan_force_x ** 2 + tan_force_y ** 2) friction_force_mag = friction_coef * normal_force_mag if friction_force_mag < tan_force_mag: logging.debug('Contact would slip') return False, self.friction_cone_, self.normal_ # set up friction cone tan_len = friction_coef force = in_normal cone_support = np.zeros((3, num_cone_faces)) # find convex combinations of tangent vectors for j in range(num_cone_faces): tan_vec = t1 * np.cos(2 * np.pi * (float(j) / num_cone_faces)) + t2 * np.sin( 2 * np.pi * (float(j) / num_cone_faces)) cone_support[:, j] = force + friction_coef * tan_vec self.friction_cone_ = cone_support return True, self.friction_cone_, self.normal_ def torques(self, forces): """ Get the torques that can be applied by a set of force vectors at the contact point. Parameters ---------- forces : 3xN :obj:`numpy.ndarray` the forces applied at the contact Returns ------- success : bool whether or not computation was successful torques : 3xN :obj:`numpy.ndarray` the torques that can be applied by given forces at the contact """ as_grid = self.graspable.sdf.transform_pt_obj_to_grid(self.point) on_surface, _ = self.graspable.sdf.on_surface(as_grid) if not on_surface: logging.debug('Contact point not on surface') return False, None num_forces = forces.shape[1] torques = np.zeros([3, num_forces]) moment_arm = self.graspable.moment_arm(self.point) for i in range(num_forces): torques[:, i] = np.cross(moment_arm, forces[:, i]) return True, torques def surface_window_sdf(self, width=1e-2, num_steps=21): """Returns a window of SDF values on the tangent plane at a contact point. Used for patch computation. Parameters ---------- width : float width of the window in obj frame num_steps : int number of steps to use along the contact in direction Returns ------- window : NUM_STEPSxNUM_STEPS :obj:`numpy.ndarray` array of distances from tangent plane to obj along in direction, False if surface window can't be computed """ in_normal, t1, t2 = self.tangents() if in_normal is None: # normal and tangents not found return False scales = np.linspace(-width / 2.0, width / 2.0, num_steps) window = np.zeros(num_steps ** 2) for i, (c1, c2) in enumerate(it.product(scales, repeat=2)): curr_loc = self.point + c1 * t1 + c2 * t2 curr_loc_grid = self.graspable.sdf.transform_pt_obj_to_grid(curr_loc) if self.graspable.sdf.is_out_of_bounds(curr_loc_grid): window[i] = -1e-2 continue window[i] = self.graspable.sdf[curr_loc_grid] return window.reshape((num_steps, num_steps)) def _compute_surface_window_projection(self, u1=None, u2=None, width=1e-2, num_steps=21, max_projection=0.1, back_up=0, samples_per_grid=2.0, sigma_range=0.1, sigma_spatial=1, direction=None, vis=False, compute_weighted_covariance=False, disc=False, num_radial_steps=5, debug_objs=None): """Compute the projection window onto the basis defined by u1 and u2. Params: u1, u2 - orthogonal numpy 3 arrays width - float width of the window in obj frame num_steps - int number of steps max_projection - float maximum amount to search forward for a contact (meters) back_up - amount in meters to back up before projecting samples_per_grid - float number of samples per grid when finding contacts sigma - bandwidth of gaussian filter on window direction - dir to do the projection along compute_weighted_covariance - whether to return the weighted covariance matrix, along with the window Returns: window - numpy NUM_STEPSxNUM_STEPS array of distances from tangent plane to obj, False if surface window can't be computed """ direction, t1, t2 = self.tangents(direction) if direction is None: # normal and tangents not found raise ValueError('Direction could not be computed') if u1 is not None and u2 is not None: # use given basis t1, t2 = u1, u2 # number of samples used when looking for contacts no_contact = NO_CONTACT_DIST num_samples = int(samples_per_grid * (max_projection + back_up) / self.graspable.sdf.resolution) window = np.zeros(num_steps ** 2) res = width / num_steps scales = np.linspace(-width / 2.0 + res / 2.0, width / 2.0 - res / 2.0, num_steps) scales_it = it.product(scales, repeat=2) if disc: scales_it = [] for i in range(num_steps): theta = 2.0 * np.pi / i for j in range(num_radial_steps): r = (j + 1) * width / num_radial_steps p = (r * np.cos(theta), r * np.sin(theta)) scales_it.append(p) # start computing weighted covariance matrix if compute_weighted_covariance: cov = np.zeros((3, 3)) cov_weight = 0 if vis: ax = plt.gca(projection='3d') self.graspable_.sdf.scatter() for i, (c1, c2) in enumerate(scales_it): curr_loc = self.point + c1 * t1 + c2 * t2 curr_loc_grid = self.graspable.sdf.transform_pt_obj_to_grid(curr_loc) if self.graspable.sdf.is_out_of_bounds(curr_loc_grid): window[i] = no_contact continue if vis: ax.scatter(curr_loc_grid[0], curr_loc_grid[1], curr_loc_grid[2], s=130, c='y') found, projection_contact = self.graspable._find_projection( curr_loc, direction, max_projection, back_up, num_samples, vis=vis) if found: # logging.debug('%d found.' %(i)) sign = direction.dot(projection_contact.point - curr_loc) projection = (sign / abs(sign)) * np.linalg.norm(projection_contact.point - curr_loc) projection = min(projection, max_projection) if compute_weighted_covariance: # weight according to SHOT: R - d_i weight = width / np.sqrt(2) - np.sqrt(c1 ** 2 + c2 ** 2) diff = (projection_contact.point - self.point).reshape((3, 1)) cov += weight * np.dot(diff, diff.T) cov_weight += weight else: logging.debug('%d not found.' % (i)) projection = no_contact window[i] = projection if vis: plt.show() if not disc: window = window.reshape((num_steps, num_steps)).T # transpose to make x-axis along columns if debug_objs is not None: debug_objs.append(window) # apply bilateral filter if sigma_range > 0.0 and sigma_spatial > 0.0: window_min_val = np.min(window) window_pos = window - window_min_val window_pos_blur = denoise_bilateral(window_pos, sigma_range=sigma_range, sigma_spatial=sigma_spatial, mode='nearest') window = window_pos_blur + window_min_val if compute_weighted_covariance: if cov_weight > 0: return window, cov / cov_weight return window, cov return window def surface_window_projection_unaligned(self, width=1e-2, num_steps=21, max_projection=0.1, back_up=0.0, samples_per_grid=2.0, sigma=1.5, direction=None, vis=False): """Projects the local surface onto the tangent plane at a contact point. Deprecated. """ return self._compute_surface_window_projection(width=width, num_steps=num_steps, max_projection=max_projection, back_up=back_up, samples_per_grid=samples_per_grid, sigma=sigma, direction=direction, vis=vis) def surface_window_projection(self, width=1e-2, num_steps=21, max_projection=0.1, back_up=0.0, samples_per_grid=2.0, sigma_range=0.1, sigma_spatial=1, direction=None, compute_pca=False, vis=False, debug_objs=None): """Projects the local surface onto the tangent plane at a contact point. Parameters ---------- width : float width of the window in obj frame num_steps : int number of steps to use along the in direction max_projection : float maximum amount to search forward for a contact (meters) back_up : float amount to back up before finding a contact in meters samples_per_grid : float number of samples per grid when finding contacts sigma_range : float bandwidth of bilateral range filter on window sigma_spatial : float bandwidth of gaussian spatial filter of bilateral filter direction : 3x1 :obj:`numpy.ndarray` dir to do the projection along Returns ------- window : NUM_STEPSxNUM_STEPS :obj:`numpy.ndarray` array of distances from tangent plane to obj, False if surface window can't be computed """ # get initial projection direction, t1, t2 = self.tangents(direction) window, cov = self._compute_surface_window_projection(t1, t2, width=width, num_steps=num_steps, max_projection=max_projection, back_up=back_up, samples_per_grid=samples_per_grid, sigma_range=sigma_range, sigma_spatial=sigma_spatial, direction=direction, vis=False, compute_weighted_covariance=True, debug_objs=debug_objs) if not compute_pca: return window # compute principal axis pca = PCA() pca.fit(cov) R = pca.components_ principal_axis = R[0, :] if np.isclose(abs(np.dot(principal_axis, direction)), 1): # principal axis is aligned with direction of projection, use secondary axis principal_axis = R[1, :] if vis: # reshape window window = window.reshape((num_steps, num_steps)) # project principal axis onto tangent plane (t1, t2) to get u1 u1t = np.array([np.dot(principal_axis, t1), np.dot(principal_axis, t2)]) u2t = np.array([-u1t[1], u1t[0]]) if sigma > 0: window = spfilt.gaussian_filter(window, sigma) plt.figure() plt.title('Principal Axis') plt.imshow(window, extent=[0, num_steps - 1, num_steps - 1, 0], interpolation='none', cmap=plt.cm.binary) plt.colorbar() plt.clim(-WIN_DIST_LIM, WIN_DIST_LIM) # fixing color range for visual comparisons center = num_steps // 2 plt.scatter([center, center * u1t[0] + center], [center, -center * u1t[1] + center], color='blue') plt.scatter([center, center * u2t[0] + center], [center, -center * u2t[1] + center], color='green') u1 = np.dot(principal_axis, t1) * t1 + np.dot(principal_axis, t2) * t2 u2 = np.cross(direction, u1) # u2 must be orthogonal to u1 on plane u1 = u1 / np.linalg.norm(u1) u2 = u2 / np.linalg.norm(u2) window = self._compute_surface_window_projection(u1, u2, width=width, num_steps=num_steps, max_projection=max_projection, back_up=back_up, samples_per_grid=samples_per_grid, sigma=sigma, direction=direction, vis=False) # arbitrarily require that right_avg > left_avg (inspired by SHOT) left_avg = np.average(window[:, :num_steps // 2]) right_avg = np.average(window[:, num_steps // 2:]) if left_avg > right_avg: # need to flip both u1 and u2, i.e. rotate 180 degrees window = np.rot90(window, k=2) if vis: if sigma > 0: window = spfilt.gaussian_filter(window, sigma) plt.figure() plt.title('Tfd') plt.imshow(window, extent=[0, num_steps - 1, num_steps - 1, 0], interpolation='none', cmap=plt.cm.binary) plt.colorbar() plt.clim(-WIN_DIST_LIM, WIN_DIST_LIM) # fixing color range for visual comparisons plt.show() return window def plot_friction_cone(self, color='y', scale=1.0): success, cone, in_normal = self.friction_cone() ax = plt.gca(projection='3d') self.graspable.sdf.scatter() # object x, y, z = self.graspable.sdf.transform_pt_obj_to_grid(self.point) nx, ny, nz = self.graspable.sdf.transform_pt_obj_to_grid(in_normal, direction=True) ax.scatter([x], [y], [z], c=color, s=60) # contact ax.scatter([x - nx], [y - ny], [z - nz], c=color, s=60) # normal if success: ax.scatter(x + scale * cone[0], y + scale * cone[1], z + scale * cone[2], c=color, s=40) # cone ax.set_xlim3d(0, self.graspable.sdf.dims_[0]) ax.set_ylim3d(0, self.graspable.sdf.dims_[1]) ax.set_zlim3d(0, self.graspable.sdf.dims_[2]) return plt.Rectangle((0, 0), 1, 1, fc=color) # return a proxy for legend class SurfaceWindow: """Struct for encapsulating local surface window features. Attributes ---------- proj_win : NxN :obj:`numpy.ndarray` the window of distances to a surface (depth image created by orthographic projection) grad : NxN :obj:`numpy.ndarray` X and Y gradients of the projection window hess_x : NxN :obj:`numpy.ndarray` hessian, partial derivatives of the X gradient window hess_y : NxN :obj:`numpy.ndarray` hessian, partial derivatives of the Y gradient window gauss_curvature : NxN :obj:`numpy.ndarray` gauss curvature at each point (function of hessian determinant) """ def __init__(self, proj_win, grad, hess_x, hess_y, gauss_curvature): self.proj_win_ = proj_win self.grad_ = grad self.hess_x_ = hess_x self.hess_y_ = hess_y self.gauss_curvature_ = gauss_curvature @property def proj_win_2d(self): return self.proj_win_ @property def proj_win(self): return self.proj_win_.flatten() @property def grad_x(self): return self.grad_[0].flatten() @property def grad_y(self): return self.grad_[1].flatten() @property def grad_x_2d(self): return self.grad_[0] @property def grad_y_2d(self): return self.grad_[1] @property def curvature(self): return self.gauss_curvature_.flatten() def asarray(self, proj_win_weight=0.0, grad_x_weight=0.0, grad_y_weight=0.0, curvature_weight=0.0): proj_win = proj_win_weight * self.proj_win grad_x = grad_x_weight * self.grad_x grad_y = grad_y_weight * self.grad_y curvature = curvature_weight * self.gauss_curvature return np.append([], [proj_win, grad_x, grad_y, curvature])
# Copyright (C) 2020 FUJITSU # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver LOG = logging.getLogger(__name__) class VnflcmMgmtNoop(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver): def get_type(self): return 'vnflcm_noop' def get_name(self): return 'vnflcm_noop' def get_description(self): return 'Tacker VNFMgmt VnflcmNoop Driver' def instantiate_start(self, vnf_instance, additional_params, **kwargs): LOG.debug('instantiate_start %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def instantiate_end(self, vnf_instance, additional_params, **kwargs): LOG.debug('instantiate_end %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def terminate_start(self, vnf_instance, additional_params, **kwargs): LOG.debug('terminate_start %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def terminate_end(self, vnf_instance, additional_params, **kwargs): LOG.debug('terminate_end %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def scale_start(self, vnf_instance, additional_params, **kwargs): LOG.debug('scale_start %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def scale_end(self, vnf_instance, additional_params, **kwargs): LOG.debug('scale_end %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def heal_start(self, vnf_instance, additional_params, **kwargs): LOG.debug('heal_start %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass def heal_end(self, vnf_instance, additional_params, **kwargs): LOG.debug('heal_end %(vnf_instance)s ' '%(additional_params)s %(kwargs)s', {'vnf_instance': vnf_instance, 'additional_params': additional_params, 'kwargs': kwargs}) pass
import random import torch import numpy as np from ..environment.device import DeviceTopology from ..environment.state import CircuitStateDQN from ..algorithms.simanneal import AnnealerDQN from ..hyperparams import DEVICE class DoubleDQNAgent(torch.nn.Module): def __init__(self, device: DeviceTopology): """ Initializes the graph network as a torch module and makes the architecture and the graph. :param device: the Topology to which the agent is mapping to """ super(DoubleDQNAgent, self).__init__() self.device: DeviceTopology = device # For the action space self.current_model = torch.nn.Sequential( torch.nn.Linear(2 * self.device.max_distance, 32), torch.nn.ReLU(), torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 1), ).to(DEVICE) self.target_model = torch.nn.Sequential( torch.nn.Linear(2 * self.device.max_distance, 32), torch.nn.ReLU(), torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 1), ).to(DEVICE) self.current_optimizer = torch.optim.Adam(self.current_model.parameters()) self.annealer = AnnealerDQN(self, device) self.gamma = 0.8 self.epsilon_decay = 0.95 self.epsilon = 1.0 self.epsilon_min = 0.001 def update_target_model(self): """ Copy weights from the current model to the target model """ self.target_model.load_state_dict(self.current_model.state_dict()) def forward(self, current_state, next_state, action_chooser='model'): """ Get the value function approximations for the given state representation :param current_state: the current state :param next_state: the next state as a result of the action :param action_chooser: str, model if current model or target if target model :return: int/float, the value function approximation """ current_distance_vector = self.get_distance_metric(current_state) next_distance_vector = self.get_distance_metric(next_state) nn_input = torch.cat([current_distance_vector, next_distance_vector], dim=-1) if action_chooser == 'model': q_val = self.current_model(nn_input) elif action_chooser == 'target': q_val = self.target_model(nn_input) else: raise ValueError('Action_chooser must be either model or target') return q_val def act(self, state: CircuitStateDQN): """ Chooses an action to perform in the environment and returns it (i.e. does not alter environment state) :param state: the state of the environment :return: np.array of shape (len(device),), the chosen action mask after annealing """ if np.random.rand() <= self.epsilon: action, value = self.generate_random_action(state), -1 else: action, value = self.annealer.simulated_annealing(state, action_chooser='model') return action, -value def replay(self, memory, batch_size=32): """ Learns from past experiences :param memory: MemoryTree object, the experience buffer to sample from :param batch_size: number of experiences to sample from the experience buffer when training """ tree_index, minibatch, is_weights = memory.sample(batch_size) absolute_errors = [] is_weights = np.reshape(is_weights, -1) for experience, is_weight in zip(minibatch, is_weights): [state, reward, next_state, done] = experience[0] # Train the current model (model.fit in current state) q_val = self(state, next_state)[0] if done: target = reward else: _, energy = self.annealer.simulated_annealing(next_state, action_chooser='target', search_limit=10) target = reward - self.gamma * energy absolute_errors.append(abs(q_val.detach() - target)) self.current_optimizer.zero_grad() loss = torch.multiply(torch.square(torch.subtract(q_val, target)), is_weight) loss.backward() self.current_optimizer.step() memory.batch_update(tree_index, absolute_errors) # Epsilon decay function - exploration vs. exploitation if self.epsilon > self.epsilon_min: self.epsilon *= self.epsilon_decay def generate_random_action(self, state: CircuitStateDQN): """ Generates a random layer of swaps. Care is taken to ensure that all swaps can occur in parallel. That is, no two neighbouring edges undergo a swap simultaneously. """ action = np.array([0] * len(self.device.edges)) # an action representing an empty layer of swaps edges = [(n1, n2) for (n1, n2) in self.device.edges] edges = list(filter(lambda e: e[0] not in state.protected_nodes and e[1] not in state.protected_nodes, edges)) edge_index_map = {edge: index for index, edge in enumerate(edges)} while len(edges) > 0: edge, action[edge_index_map[edge]] = random.sample(edges, 1)[0], 1 edges = [e for e in edges if e[0] not in edge and e[1] not in edge] return action def get_distance_metric(self, state: CircuitStateDQN): """ Obtains a vector that summarises the different distances from qubits to their targets. More precisely, x_i represents the number of qubits that are currently a distance of i away from their targets. If there are n qubits, then the length of this vector will also be n. """ nodes_to_target_qubits = [ state._qubit_targets[state.node_to_qubit[n]] for n in range(0, len(state.node_to_qubit))] nodes_to_target_nodes = [ next(iter(np.where(np.array(state.node_to_qubit) == q)[0]), -1) for q in nodes_to_target_qubits] distance_vector = np.zeros(self.device.max_distance) for node in range(len(nodes_to_target_nodes)): target = nodes_to_target_nodes[node] if target == -1: continue d = int(self.device.distances[node, target]) distance_vector[d - 1] += 1 distance_vector = torch.from_numpy(distance_vector).to(DEVICE).float() return distance_vector
import pytest @pytest.fixture def analysis_step_version_3(testapp, analysis_step, software_version): item = { 'schema_version': '3', 'version': 1, 'analysis_step': analysis_step['@id'], 'software_versions': [ software_version['@id'], ], } return item @pytest.fixture def analysis_step_version(testapp, analysis_step, software_version): item = { 'analysis_step': analysis_step['@id'], 'minor_version': 0, 'software_versions': [ software_version['@id'], ], } return testapp.post_json('/analysis_step_version', item).json['@graph'][0] @pytest.fixture def analysis_step_version_bam(testapp, analysis_step_bam, software_version): item = { 'analysis_step': analysis_step_bam['@id'], 'minor_version': 0, 'software_versions': [ software_version['@id'], ], } return testapp.post_json('/analysis_step_version', item).json['@graph'][0] @pytest.fixture def analysis_step_version_chip_encode4(testapp, analysis_step_chip_encode4, software_version): item = { 'analysis_step': analysis_step_chip_encode4['@id'], 'minor_version': 0, 'software_versions': [ software_version['@id'], ], } return testapp.post_json('/analysis_step_version', item).json['@graph'][0] @pytest.fixture def analysis_step_version_atac_encode4_alignment(testapp, analysis_step_atac_encode4_alignment, software_version): item = { 'analysis_step': analysis_step_atac_encode4_alignment['@id'], 'minor_version': 0, 'software_versions': [ software_version['@id'], ], } return testapp.post_json('/analysis_step_version', item).json['@graph'][0] @pytest.fixture def analysis_step_version_atac_encode4_replicate_concordance(testapp, analysis_step_atac_encode4_replicate_concordance, software_version): item = { 'analysis_step': analysis_step_atac_encode4_replicate_concordance['@id'], 'minor_version': 0, 'software_versions': [ software_version['@id'], ], } return testapp.post_json('/analysis_step_version', item).json['@graph'][0]
from __future__ import absolute_import # Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the # License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. import os import argparse import logging import platform import json import sys import errno from . import cfncluster from . import easyconfig def create(args): cfncluster.create(args) def configure(args): easyconfig.configure(args) def command(args, extra_args): cfncluster.command(args, extra_args) def status(args): cfncluster.status(args) def list(args): cfncluster.list(args) def delete(args): cfncluster.delete(args) def instances(args): cfncluster.instances(args) def update(args): cfncluster.update(args) def version(args): cfncluster.version(args) def start(args): cfncluster.start(args) def stop(args): cfncluster.stop(args) def config_logger(): logger = logging.getLogger('cfncluster.cfncluster') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) ch.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(ch) logfile = os.path.expanduser(os.path.join('~', '.cfncluster', 'cfncluster-cli.log')) try: os.makedirs(os.path.dirname(logfile)) except OSError as e: if e.errno != errno.EEXIST: raise # can safely ignore EEXISTS for this purpose... fh = logging.FileHandler(logfile) fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')) logger.addHandler(fh) def addarg_config(subparser): subparser.add_argument("--config", "-c", dest="config_file", help='specify a alternative config file') def addarg_region(subparser): subparser.add_argument( "--region", "-r", dest="region", help='specify a specific region to connect to', default=None) def addarg_nowait(subparser): subparser.add_argument( "--nowait", "-nw", dest="nowait", action='store_true', help='do not wait for stack events, after executing stack command') def main(): config_logger() logger = logging.getLogger('cfncluster.cfncluster') logger.debug("CfnCluster cli starting") parser = argparse.ArgumentParser(description='cfncluster is a tool to launch and manage a cluster.', epilog="For command specific flags run cfncluster [command] --help") subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' pcreate = subparsers.add_parser('create', help='creates a cluster') pcreate.add_argument("cluster_name", type=str, default=None, help='create a cfncluster with the provided name.') addarg_config(pcreate) addarg_region(pcreate) addarg_nowait(pcreate) pcreate.add_argument("--norollback", "-nr", action='store_true', dest="norollback", default=False, help='disable stack rollback on error') pcreate.add_argument("--template-url", "-u", type=str, dest="template_url", default=None, help='specify a URL for a custom cloudformation template') pcreate.add_argument("--cluster-template", "-t", type=str, dest="cluster_template", default=None, help='specify a specific cluster template to use') pcreate.add_argument("--extra-parameters", "-p", type=json.loads, dest="extra_parameters", default=None, help='add extra parameters to stack create') pcreate.add_argument("--tags", "-g", type=json.loads, dest="tags", default=None, help='tags to be added to the stack') pcreate.set_defaults(func=create) pupdate = subparsers.add_parser('update', help='update a running cluster') pupdate.add_argument("cluster_name", type=str, default=None, help='update a cfncluster with the provided name.') addarg_config(pupdate) addarg_region(pupdate) addarg_nowait(pupdate) pupdate.add_argument("--norollback", "-nr", action='store_true', dest="norollback", default=False, help='disable stack rollback on error') pupdate.add_argument("--template-url", "-u", type=str, dest="template_url", default=None, help='specify a URL for a custom cloudformation template') pupdate.add_argument("--cluster-template", "-t", type=str, dest="cluster_template", default=None, help='specify a specific cluster template to use') pupdate.add_argument("--extra-parameters", "-p", type=str, dest="extra_parameters", default=None, help='add extra parameters to stack update') pupdate.add_argument("--reset-desired", "-rd", action='store_true', dest="reset_desired", default=False, help='reset the current ASG desired capacity to initial config values') pupdate.set_defaults(func=update) pdelete = subparsers.add_parser('delete', help='delete a cluster') pdelete.add_argument("cluster_name", type=str, default=None, help='delete a cfncluster with the provided name.') addarg_config(pdelete) addarg_region(pdelete) addarg_nowait(pdelete) pdelete.set_defaults(func=delete) pstart = subparsers.add_parser('start', help='start the compute fleet that has been stopped') pstart.add_argument("cluster_name", type=str, default=None, help='starts the compute fleet of the provided cluster name.') addarg_config(pstart) addarg_region(pstart) pstart.set_defaults(func=start) pstop = subparsers.add_parser('stop', help='stop the compute fleet, but leave the master server running for ' 'debugging/development') pstop.add_argument("cluster_name", type=str, default=None, help='stops the compute fleet of the provided cluster name.') addarg_config(pstop) addarg_region(pstop) pstop.set_defaults(func=stop) pstatus = subparsers.add_parser('status', help='pull the current status of the cluster') pstatus.add_argument("cluster_name", type=str, default=None, help='show the status of cfncluster with the provided name.') addarg_config(pstatus) addarg_region(pstatus) addarg_nowait(pstatus) pstatus.set_defaults(func=status) plist = subparsers.add_parser('list', help='display a list of stacks associated with cfncluster') addarg_config(plist) addarg_region(plist) plist.set_defaults(func=list) pinstances = subparsers.add_parser('instances', help='display a list of all instances in a cluster') pinstances.add_argument("cluster_name", type=str, default=None, help='show the status of cfncluster with the provided name.') addarg_config(pinstances) addarg_region(pinstances) pinstances.set_defaults(func=instances) pssh = subparsers.add_parser('ssh', help='connect to the master server using SSH', description='run ssh command with username and ip address pre-filled. ' \ 'Arbitrary arguments are appended to the end of the ssh commmand. ' \ 'This command may be customized in the aliases section of the config file.') pssh.add_argument("cluster_name", type=str, default=None, help='name of the cluster to set variables for.') pssh.add_argument("--dryrun", "-d", action='store_true', dest="dryrun", default=False, help='print command and exit.') pssh.set_defaults(func=command) pconfigure = subparsers.add_parser('configure', help='creating initial cfncluster configuration') addarg_config(pconfigure) pconfigure.set_defaults(func=configure) pversion = subparsers.add_parser('version', help='display version of cfncluster') pversion.set_defaults(func=version) args, extra_args = parser.parse_known_args() logger.debug(args) if args.func.__name__ == 'command': args.func(args, extra_args) else: if extra_args != []: parser.print_usage() print('Invalid arguments %s...' % extra_args) sys.exit(1) args.func(args)
from typing import Any, Generic, List, Optional, Union, cast from thirdweb.abi import TokenERC1155 from thirdweb.common.error import NotFoundException from thirdweb.common.nft import fetch_token_metadata from thirdweb.constants.currency import ZERO_ADDRESS from thirdweb.constants.role import Role, get_role_hash from thirdweb.core.classes.contract_wrapper import ContractWrapper from thirdweb.core.classes.base_contract import BaseContract from thirdweb.core.classes.ipfs_storage import IpfsStorage from thirdweb.types.contract import TERC1155 from thirdweb.types.nft import ( EditionMetadata, EditionMetadataOwner, NFTMetadata, QueryAllParams, ) from web3.eth import TxReceipt class ERC1155(Generic[TERC1155], BaseContract[TERC1155]): _storage: IpfsStorage def __init__( self, contract_wrapper: ContractWrapper, storage: IpfsStorage, ): super().__init__(contract_wrapper) self._storage = storage """ READ FUNCTIONS """ def get(self, token_id: int) -> EditionMetadata: """ Get metadata for a token ```python nft = contract.get(0) print(nft) ``` :param token_id: token ID to check the metadata for :return: Metadata for the token """ try: supply = self._contract_wrapper._contract_abi.total_supply.call(token_id) except: supply = 0 metadata = self._get_token_metadata(token_id) return EditionMetadata(metadata, supply) def get_all( self, query_params: QueryAllParams = QueryAllParams() ) -> List[EditionMetadata]: """ Get the metadata for all tokens on the contract ```python metadatas = contract.get_all() print(metadatas) ``` :param query_params: optional QueryAllParams to define which tokens to get metadata for :return: list of metadata for all tokens """ max_id = min(query_params.start + query_params.count, self.get_total_count()) return [self.get(token_id) for token_id in range(query_params.start, max_id)] def get_total_count(self) -> int: """ Get the total number of NFTs on the contract :return: total number of tokens on the contract """ return self._contract_wrapper._contract_abi.next_token_id_to_mint.call() def get_owned(self, address: str = "") -> List[EditionMetadataOwner]: """ Get the metadata for all the tokens owned by an address ```python address = "{{wallet_address}}" owned = contract.get_owned(address) print(owned) ``` :param address: address to get the owned tokens for :return: list of metadata for all tokens owned by the address """ owner = address if address else self._contract_wrapper.get_signer_address() max_id = self._contract_wrapper._contract_abi.next_token_id_to_mint.call() balances = self._contract_wrapper._contract_abi.balance_of_batch.call( [owner for i in range(max_id)], [id for id in range(max_id)], ) metadatas = [] for index, balance in enumerate(balances): metadata = self.get(index) metadatas.append( EditionMetadataOwner(metadata.metadata, metadata.supply, owner, balance) ) return metadatas def total_supply(self, token_id: int) -> int: """ Get the total number of tokens on the contract :return: total number of tokens on the contract """ return self._contract_wrapper._contract_abi.total_supply.call(token_id) def balance(self, token_id: int) -> int: """ Get the connected wallets balance of a specific token :param token_id: token ID to check the balance for :return: balance of the token """ return self.balance_of(self._contract_wrapper.get_signer_address(), token_id) def balance_of(self, address: str, token_id: int) -> int: """ Get a specific wallets balance of a specific token ```python address = "{{wallet_address}}" token_id = 0 balance = contract.balance_of(address, token_id) ``` :param address: address to check the balance for :param token_id: token ID to check the balance for :return: balance of the token """ return self._contract_wrapper._contract_abi.balance_of.call(address, token_id) def is_transfer_restricted(self) -> bool: """ Check if the contract is restricted so transfers can only be made by admins :return: True if the contract is restricted, False otherwise """ anyone_can_transfer = self._contract_wrapper._contract_abi.has_role.call( get_role_hash(Role.TRANSFER), ZERO_ADDRESS ) return not anyone_can_transfer def is_approved(self, address: str, operator: str) -> bool: """ Check if an operator address is approved to manage a target addresses assets :param address: address whose assets to check the approval of :param operator: operator address to check the approval for :return: True if the operator is approved, False otherwise """ return self._contract_wrapper._contract_abi.is_approved_for_all.call( address, operator ) """ WRITE FUNCTIONS """ def transfer( self, to: str, token_id: int, amount: int, data: Union[bytes, str] = b"0" ) -> TxReceipt: """ Transfer a specified token from the connected wallet to a specified address. ```python to = "{{wallet_address}}" token_id = 0 amount = 1 receipt = contract.transfer(to, token_id, amount) ``` :param to: wallet address to transfer the tokens to :param token_id: the specific token ID to transfer :param amount: the amount of tokens to transfer :returns: transaction receipt of the transfer """ fr = self._contract_wrapper.get_signer_address() return self._contract_wrapper.send_transaction( "safe_transfer_from", [fr, to, token_id, amount, data], ) def burn(self, token_id: int, amount: int) -> TxReceipt: """ Burn a specified amount of tokens from the connected wallet. :param amount: amount of tokens to burn :returns: transaction receipt of the burn """ account = self._contract_wrapper.get_signer_address() return self._contract_wrapper.send_transaction( "burn", [account, token_id, amount] ) def set_approval_for_all(self, operator: str, approved: bool) -> TxReceipt: """ Set the approval for an operator address to manage the connected wallets assets :param operator: operator address to set the approval for :param approved: True if the operator is approved, False otherwise """ return self._contract_wrapper.send_transaction( "set_approval_for_all", [operator, approved] ) """ INTERNAL FUNCTIONS """ def _get_token_metadata(self, token_id: int) -> NFTMetadata: token_uri = self._contract_wrapper._contract_abi.uri.call(token_id) if not token_uri: raise NotFoundException(str(token_id)) return fetch_token_metadata(token_id, token_uri, self._storage)
#this file contains functions that transform images; import tensorflow as tf import numpy as np import scipy def uniform_random(x_nat, epsilon): """Input: batch of images; type: ndarray: size: (batch, 784) Output: batch of images with uniform nois; we use clip function to be assured that numbers in matrixs belong to interval (0,1); type: ndarray; size: (batch, 784); """ x = x_nat + np.random.uniform(-epsilon, epsilon, x_nat.shape) return x def grando_transform_rotate_batch(batch_of_images, rotate): """Input: batch of images; type: ndarray: size: (batch, 784); angle; type: int; Output: batch of rotated images; type: ndarray; size: (batch, 784); """ t = np.array([]) reshape_batch_of_images = batch_of_images.reshape(len(batch_of_images), 28, 28) for i in reshape_batch_of_images: t = np.append(t, scipy.ndimage.rotate(i, rotate, reshape = False)) t = t.reshape(len(batch_of_images), 784) return t def grando_transform_shift_batch(batch_of_images, shift): """Input: batch of images; type: ndarray: size: (batch, 784); shift; type: float; Output: batch of shifted images; type: ndarray; size: (batch, 784); """ t = np.array([]) reshape_batch_of_images = batch_of_images.reshape(len(batch_of_images), 28, 28) for i in reshape_batch_of_images: t = np.append(t, scipy.ndimage.interpolation.shift(i, float(shift))) t = t.reshape(len(batch_of_images), 784) return t def grando_transform_gauss_batch(batch_of_images, mean, variance): """Input: batch of images; type: ndarray: size: (batch, 784) Output: batch of images with gaussian nois; we use clip function to be assured that numbers in matrixs belong to interval (0,1); type: ndarray; size: (batch, 784); """ x = batch_of_images + np.random.normal(mean, variance, batch_of_images.shape) return x
""" Class definitions for the fundamental objects in the agent-based model. @author: Noah Burrell <[email protected]> """ from scipy.stats import binom, norm, halfnorm, uniform class Student: """ A (truthfully-reporting) Student object. Attributes ---------- id : int. Unique identifier. type : str "active" or "passive". Denotes effort level for binary effort settings. lam : float. Denotes effort level for continuous effort settings. bias : float. Denotes bias when grading submissions. payment : int or float (depending on the mechanism). Stores payments received over the course of a semester. grades : dict. Stores the grades assigned over the course of a semester. grades = { assignment_number (int): { submission_number (int): score (int 0-10)} } """ def __init__(self, num, grader_type="active"): """ Creates a Student object. Parameters ---------- num : int. Identification number. grader_type : str "active" or "passive". """ self.id = num self.type = grader_type self.payment = 0 self.bias = norm.rvs(loc=0, scale=1, random_state=None) lam = 0 while lam == 0: lam = uniform.rvs(loc=0, scale=2, random_state=None) self.lam = lam self.grades = {} def report(self, signal): """ Generates a report for the Student object given a signal. Parameters ---------- signal : int 0-10. Signal observed by the Student grading a submission. Returns ------- report : int 0-10. Equal to signal, since Student objects report truthfully. """ report = signal return report class StrategicStudent(Student): """ A Student object that reports strategically. Attributes ---------- Inherits all attributes from Student class. Additional attributes: strategy : str one of the following: - "TRUTH" (with this strategy, StrategicStudents behave just like regular Students) - "NOISE" - "FIX-BIAS" - "MERGE" - "PRIOR" - "ALL10" - "HEDGE" A strategy to follow. See the paper for a description of each strategy. bias_correction : float. Denotes a bias correction term that is accessed when using the "Fix-Bias" strategy. """ def __init__(self, num, strat="TRUTH"): """ Creates a StrategicStudent object. Parameters ---------- num : int. Identification number. strat : str. A strategy to follow (from the list above). """ super().__init__(num) self.strategy = strat bias_correction_magnitude = halfnorm.rvs(loc=0, scale=1, random_state=None) bias_correction_sign = -1 if self.bias < 0: bias_correction_sign = 1 self.bias_correction = bias_correction_sign * bias_correction_magnitude def report(self, signal): """ Generates a report for the StrategicStudent object given a signal. Supersedes report() method from Student class. Parameters ---------- signal : int 0-10. Signal observed by the StrategicStudent grading a submission. Returns ------- report : int 0-10. Output of applying the StrategicStudent's given strategy to the signal. Note---if the strategy attribute does not match one of the strategies from the list above, this function just returns report = signal, just as for Student objects and StrategicStudent objects with the strategy "TRUTH". """ sigma = self.strategy if sigma == "NOISE": noise = norm.rvs(loc=0, scale=1, random_state=None) noisy_signal = signal + noise report = int(round(noisy_signal)) if report > 10: report = 10 elif report < 0: report = 0 elif sigma == "FIX-BIAS": corrected_signal = signal + self.bias_correction report = int(round(corrected_signal)) if report > 10: report = 10 elif report < 0: report = 0 elif sigma == "MERGE": projection = { 0: 0, 1: 3, 2: 3, 3: 3, 4: 6, 5: 6, 6: 6, 7: 7, 8: 7, 9: 7, 10: 10 } report = projection[signal] elif sigma == "PRIOR": report = 7 elif sigma == "ALL10": report = 10 elif sigma == "HEDGE": posterior = (7 + signal)/2.0 report = int(round(posterior)) else: report = signal return report class Submission: """ A Submission object. Attributes ---------- student_id : int. Unique identifier for the submission; corresponds to id number of Student who "turned in" this submission for the given assignment. assignment_number : int. Assignment identifier. Semesters consist of one or more assignments; the meta-grading mechanism is applied sequentially over the course of a semester, once for each assignment, to calculate payments to the peer grading agents. true_grade : int 0-10. The ground truth score for the submission. grades : dict. Stores the reports from each Student who graded this submission. grades = {grader id (int): score (int 0-10) } """ def __init__(self, s_id, assignment_num): """ Creates a Submission object. Parameters ---------- s_id : int submission identification number. assignment_num : int assignment identification number. """ self.student_id = s_id self.assignment_number = assignment_num self.true_grade = binom.rvs(n=10, p=0.7, random_state=None) self.grades = {}
# Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved # from django.test import TestCase # from mockito import mock, when, any, verify, contains # import iondb.rundb.data.tasks # from django.contrib.auth.models import User # from iondb.rundb.tests.views.report.test_report_action import verifyMessage # import tempfile # import shutil # from iondb.rundb.models import Experiment, Results, ReportStorage # from datetime import datetime # from iondb.rundb.tests.models.test_results import ResultsTest, create_result # from iondb.rundb.data import result_archive as ion_archiveResult # class ReportActionTaskTest(TestCase): # fixtures = ['iondb/rundb/tests/views/report/fixtures/globalconfig.json', # 'iondb/rundb/tests/models/fixtures/groups.json', # 'iondb/rundb/tests/models/fixtures/users.json'] # def setUp(self): # self.ionadmin = User.objects.get(username='ionadmin') # self.report = create_result(self) # def test_export_report(self): # _id = self.report.id # username = self.ionadmin.username # comment = "abc" # # returns method results directly # result = iondb.rundb.data.tasks.export_report(username, _id, comment) # self.assertEquals(result[0], True) # verifyMessage(self, True, self.report.resultsName) # verify(proxy).export_report(any(), contains(comment)) # ''' # def test_prune_report(self): # _id = self.report.id # username = self.ionadmin.username # comment = "abc" # proxy = mock() # when(xmlrpclib).ServerProxy(any(), allow_none=any()).thenReturn(proxy) # proxyResult = True # when(proxy).prune_report(any(), any()).thenReturn(proxyResult) # # returns method results directly # result = iondb.rundb.data.tasks.prune_report(username, _id, comment) # self.assertEquals(result[0], proxyResult) # verifyMessage(self, proxyResult, self.report.resultsName) # verify(proxy).prune_report(any(), contains(comment)) # def test_archive_report(self): # _id = self.report.id # username = self.ionadmin.username # comment = "abc" # proxy = mock() # when(xmlrpclib).ServerProxy(any(), allow_none=any()).thenReturn(proxy) # proxyResult = True # when(proxy).archive_report(any(), any()).thenReturn(proxyResult) # # returns method results directly # result = iondb.rundb.data.tasks.archive_report(username, _id, comment) # self.assertEquals(result[0], proxyResult) # verifyMessage(self, proxyResult, self.report.resultsName) # verify(proxy).archive_report(any(), contains(comment)) # def test_archive_report_using_delay(self): # _id = self.report.id # username = self.ionadmin.username # comment = "abc" # proxy = mock() # when(xmlrpclib).ServerProxy(any(), allow_none=any()).thenReturn(proxy) # proxyResult = True # when(proxy).archive_report(any(), any()).thenReturn(proxyResult) # #returns ASyncResult wrapping method invocation results # result = iondb.rundb.data.tasks.archive_report.delay(username, _id, comment) # self.assertEquals(result.get()[0], proxyResult) # verifyMessage(self, proxyResult, self.report.resultsName) # verify(proxy).archive_report(any(), contains(comment)) # ''' # class ArchiveReportTaskTest(ResultsTest): # def test_sync_filesystem_and_db_report_state(self): # result = self.test_is_archived_false() # result2 = self.test_is_archived_false() # result3 = self.test_is_archived_false() # reports = Results.objects.exclude(reportStatus__in=ion_archiveResult.STATUS) # self.assertEquals(3, reports.count()) # iondb.rundb.data.tasks.sync_filesystem_and_db_report_state.delay() # self.assertEqual(0, Results.objects.filter(reportStatus__in=ion_archiveResult.STATUS).count()) # self._mark_fs_archived(result3) # iondb.rundb.data.tasks.sync_filesystem_and_db_report_state.delay() # self.assertEqual(1, Results.objects.filter(reportStatus__in=ion_archiveResult.STATUS).count()) # self._mark_fs_archived(result) # iondb.rundb.data.tasks.sync_filesystem_and_db_report_state.delay() # self.assertEqual(2, Results.objects.filter(reportStatus__in=ion_archiveResult.STATUS).count()) # self._mark_fs_archived(result2) # iondb.rundb.data.tasks.sync_filesystem_and_db_report_state.delay() # self.assertEqual(3, Results.objects.filter(reportStatus__in=ion_archiveResult.STATUS).count()) # reports = Results.objects.exclude(reportStatus__in=ion_archiveResult.STATUS) # self.assertEquals(0, reports.count()) # result4 = self.test_is_archived_false() # reports = Results.objects.exclude(reportStatus__in=ion_archiveResult.STATUS) # self.assertEquals(1, reports.count()) # self._mark_fs_archived(result4) # iondb.rundb.data.tasks.sync_filesystem_and_db_report_state.delay() # self.assertEqual(4, Results.objects.filter(reportStatus__in=ion_archiveResult.STATUS).count())
''' 12. Try a string operation to perform below operation exam_st_date = (11, 12, 2014) Sample Output : The examination will start from : 11 / 12 / 2014 ''' exam_st_date = (11, 12, 2014) exam_st_date = str(exam_st_date) print(exam_st_date.replace(', ', ' / '))
# Not OK def simple(l = [0]): l[0] = 1 #$ modification=l return l # Not OK def slice(l = [0]): l[0:1] = 1 #$ modification=l return l # Not OK def list_del(l = [0]): del l[0] #$ modification=l return l # Not OK def append_op(l = []): l += [1, 2, 3] #$ modification=l return l # Not OK def repeat_op(l = [0]): l *= 3 #$ modification=l return l # Not OK def append(l = []): l.append(1) #$ modification=l return l # OK def includes(l = []): x = [0] x.extend(l) x.extend([1]) return x def extends(l): l.extend([1]) #$ modification=l return l # Not OK def deferred(l = []): extends(l) return l # Not OK def nonempty(l = [5]): l.append(1) #$ modification=l return l # Not OK def dict(d = {}): d['a'] = 1 #$ modification=d return d # Not OK def dict_nonempty(d = {'a': 1}): d['a'] = 2 #$ modification=d return d # OK def dict_nonempty_nochange(d = {'a': 1}): d['a'] = 1 #$ SPURIOUS: modification=d return d def modifies(d): d['a'] = 1 #$ modification=d return d # Not OK def dict_deferred(d = {}): modifies(d) return d # Not OK def dict_method(d = {}): d.update({'a': 1}) #$ modification=d return d # Not OK def dict_method_nonempty(d = {'a': 1}): d.update({'a': 2}) #$ modification=d return d # OK def dict_method_nonempty_nochange(d = {'a': 1}): d.update({'a': 1}) #$ SPURIOUS:modification=d return d def modifies_method(d): d.update({'a': 1}) #$ modification=d return d # Not OK def dict_deferred_method(d = {}): modifies_method(d) return d # OK def dict_includes(d = {}): x = {} x.update(d) x.update({'a': 1}) return x # Not OK def dict_del(d = {'a': 1}): del d['a'] #$ modification=d return d # Not OK def dict_update_op(d = {}): x = {'a': 1} d |= x #$ modification=d return d # OK def dict_update_op_nochange(d = {}): x = {} d |= x #$ SPURIOUS: modification=d return d def sanitizer(l = []): if l: l.append(1) else: l.append(1) #$ modification=l return l def sanitizer_negated(l = [1]): if not l: l.append(1) else: l.append(1) #$ modification=l return l def sanitizer(l = []): if not l: l.append(1) #$ modification=l else: l.append(1) return l def sanitizer_negated(l = [1]): if l: l.append(1) #$ modification=l else: l.append(1) return l
# coding=UTF-8 from natcap.invest.ui import model, inputs import natcap.invest.carbon class Carbon(model.InVESTModel): def __init__(self): model.InVESTModel.__init__(self, label=u'InVEST Carbon Model', target=natcap.invest.carbon.execute, validator=natcap.invest.carbon.validate, localdoc=u'carbonstorage.html') self.cur_lulc_raster = inputs.File( args_key=u'lulc_cur_path', helptext=( u"A GDAL-supported raster representing the land-cover " u"of the current scenario."), label=u'Current Land Use/Land Cover (Raster)', validator=self.validator) self.add_input(self.cur_lulc_raster) self.carbon_pools_path = inputs.File( args_key=u'carbon_pools_path', helptext=( u"A table that maps the land-cover IDs to carbon " u"pools. The table must contain columns of 'LULC', " u"'C_above', 'C_Below', 'C_Soil', 'C_Dead' as described " u"in the User's Guide. The values in LULC must at " u"least include the LULC IDs in the land cover maps."), label=u'Carbon Pools', validator=self.validator) self.add_input(self.carbon_pools_path) self.cur_lulc_year = inputs.Text( args_key=u'lulc_cur_year', helptext=u'The calendar year of the current scenario.', interactive=False, label=u'Current Landcover Calendar Year', validator=self.validator) self.add_input(self.cur_lulc_year) self.calc_sequestration = inputs.Checkbox( helptext=( u"Check to enable sequestration analysis. This " u"requires inputs of Land Use/Land Cover maps for both " u"current and future scenarios."), args_key='calc_sequestration', label=u'Calculate Sequestration') self.add_input(self.calc_sequestration) self.fut_lulc_raster = inputs.File( args_key=u'lulc_fut_path', helptext=( u"A GDAL-supported raster representing the land-cover " u"of the future scenario. <br><br>If REDD scenario " u"analysis is enabled, this should be the reference, or " u"baseline, future scenario against which to compare " u"the REDD policy scenario."), interactive=False, label=u'Future Landcover (Raster)', validator=self.validator) self.add_input(self.fut_lulc_raster) self.fut_lulc_year = inputs.Text( args_key=u'lulc_fut_year', helptext=u'The calendar year of the future scenario.', interactive=False, label=u'Future Landcover Calendar Year', validator=self.validator) self.add_input(self.fut_lulc_year) self.redd = inputs.Checkbox( helptext=( u"Check to enable REDD scenario analysis. This " u"requires three Land Use/Land Cover maps: one for the " u"current scenario, one for the future baseline " u"scenario, and one for the future REDD policy " u"scenario."), interactive=False, args_key='do_redd', label=u'REDD Scenario Analysis') self.add_input(self.redd) self.redd_lulc_raster = inputs.File( args_key=u'lulc_redd_path', helptext=( u"A GDAL-supported raster representing the land-cover " u"of the REDD policy future scenario. This scenario " u"will be compared to the baseline future scenario."), interactive=False, label=u'REDD Policy (Raster)', validator=self.validator) self.add_input(self.redd_lulc_raster) self.valuation_container = inputs.Container( args_key=u'do_valuation', expandable=True, expanded=False, interactive=False, label=u'Run Valuation Model') self.add_input(self.valuation_container) self.price_per_metric_ton_of_c = inputs.Text( args_key=u'price_per_metric_ton_of_c', label=u'Price/Metric ton of carbon', validator=self.validator) self.valuation_container.add_input(self.price_per_metric_ton_of_c) self.discount_rate = inputs.Text( args_key=u'discount_rate', helptext=u'The discount rate as a floating point percent.', label=u'Market Discount in Price of Carbon (%)', validator=self.validator) self.valuation_container.add_input(self.discount_rate) self.rate_change = inputs.Text( args_key=u'rate_change', helptext=( u"The floating point percent increase of the price of " u"carbon per year."), label=u'Annual Rate of Change in Price of Carbon (%)', validator=self.validator) self.valuation_container.add_input(self.rate_change) # Set interactivity, requirement as input sufficiency changes self.calc_sequestration.sufficiency_changed.connect( self.cur_lulc_year.set_interactive) self.calc_sequestration.sufficiency_changed.connect( self.fut_lulc_raster.set_interactive) self.calc_sequestration.sufficiency_changed.connect( self.fut_lulc_year.set_interactive) self.calc_sequestration.sufficiency_changed.connect( self.redd.set_interactive) self.redd.sufficiency_changed.connect( self.redd_lulc_raster.set_interactive) self.calc_sequestration.sufficiency_changed.connect( self.valuation_container.set_interactive) def assemble_args(self): args = { self.workspace.args_key: self.workspace.value(), self.suffix.args_key: self.suffix.value(), self.cur_lulc_raster.args_key: self.cur_lulc_raster.value(), self.carbon_pools_path.args_key: self.carbon_pools_path.value(), self.valuation_container.args_key: self.valuation_container.value(), self.calc_sequestration.args_key: self.calc_sequestration.value(), self.redd.args_key: self.redd.value(), } if self.calc_sequestration.value(): args[self.redd_lulc_raster.args_key] = ( self.redd_lulc_raster.value()) args[self.fut_lulc_raster.args_key] = self.fut_lulc_raster.value() for arg in (self.cur_lulc_year, self.fut_lulc_year): args[arg.args_key] = arg.value() # Attempt to cast valuation parameters to float if self.valuation_container.value(): for arg in (self.price_per_metric_ton_of_c, self.discount_rate, self.rate_change): args[arg.args_key] = arg.value() return args
from django.db import models from django.contrib.auth.models import AbstractUser from djmoney.models.fields import MoneyField # Create your models here. class Greeting(models.Model): when = models.DateTimeField("date created", auto_now_add=True) class User(AbstractUser): email_notifications = models.BooleanField(default=True) class Meta: db_table = "auth_user" class OdomSnapshot(models.Model): """The relevant data for an update to a car's odometer reading.""" car = models.ForeignKey( "Car", null=True, related_name="snaps", on_delete=models.CASCADE ) owner = models.ForeignKey( "User", related_name="odomSnapshot", on_delete=models.CASCADE ) date = models.DateTimeField() mileage = models.FloatField() def __str__(self): return f"{self.owner}'s Odometer Snapshot for car: {self.car} on: {self.date} at: {self.mileage}" class Meta: ordering = ["-mileage"] class Todo(models.Model): """A task or action to perform on a car.""" owner = models.ForeignKey("User", related_name="todo", on_delete=models.CASCADE) car = models.ForeignKey("Car", on_delete=models.CASCADE) completionOdomSnapshot = models.ForeignKey( "OdomSnapshot", on_delete=models.SET_NULL, blank=True, null=True ) name = models.CharField(max_length=32, null=False) complete = models.BooleanField(default=False) dueMileage = models.FloatField(default=None, blank=True, null=True) dueDate = models.DateTimeField(default=None, blank=True, null=True) notes = models.TextField(default=None, blank=True, null=True) # TODO: calculate this on POST estimatedDueDate = models.BooleanField(default=False, blank=True, null=True) mileageRepeatInterval = models.FloatField(default=None, blank=True, null=True) daysRepeatInterval = models.IntegerField(default=None, blank=True, null=True) monthsRepeatInterval = models.IntegerField(default=None, blank=True, null=True) yearsRepeatInterval = models.IntegerField(default=None, blank=True, null=True) def __str__(self): return f"Todo {self.id} named: {self.name} due at {self.dueMileage} miles" class Meta: ordering = ["dueDate", "dueMileage"] class Car(models.Model): """Represents the data for a Car type.""" owner = models.ForeignKey(User, related_name="car", on_delete=models.CASCADE) name = models.CharField(max_length=32, null=False) make = models.CharField(max_length=32, blank=True, null=True) model = models.CharField(max_length=32, blank=True, null=True) year = models.IntegerField(blank=True, null=True) plate = models.CharField(max_length=10, blank=True, null=True) vin = models.CharField(max_length=10, blank=True, null=True) # image = models.ImageField(upload_to="cars", null=True) color = models.CharField(max_length=7) # TODO: autodo green to int maybe? @property def light_color(self): r = int(self.color[1:3], base=16) g = int(self.color[3:5], base=16) b = int(self.color[5:7], base=16) luma = 0.2126 * r + 0.7152 * g + 0.0722 * b return luma > 150 def __str__(self): return "Car named: {}".format(self.name) class Refueling(models.Model): """Data associated with a car's refueling event.""" owner = models.ForeignKey(User, related_name="refueling", on_delete=models.CASCADE) odomSnapshot = models.ForeignKey(OdomSnapshot, on_delete=models.CASCADE) cost = MoneyField(max_digits=6, decimal_places=2, default_currency="USD") amount = models.FloatField() def __str__(self): return f"Refueling {self.id} for cost: {self.cost} and amount: {self.amount} with snap: {self.odomSnapshot.id}"
import sys from sty import fg, bg, ef, rs, RgbFg from core.game import TetrisRunner class TerminalTetrisRunner(TetrisRunner): def __init__(self, width=10, height=20): super().__init__(width, height) def delete_last_lines(self,n): CURSOR_UP_ONE = '\033[F' #'\x1b[1A' ERASE_LINE = '\033[K' #'\x1b[2K' for _ in range(n): sys.stdout.write(CURSOR_UP_ONE) sys.stdout.write(ERASE_LINE) def erase_board(self): self.delete_last_lines(self.height + 3) def display_board(self): data = self.game.get_board() for i in range(len(data)): line = '' for cell in data[len(data) - 1 - i]: line = line + fg(cell[0], cell[1], cell[2]) + '@' + fg.rs print(line) def get_input(self): char = _Getch().__call__() success = False if char == 'w': success = self.game.snap_piece() elif char == 'a': success = self.game.move_piece('left') elif char == 's': success = self.game.move_piece('down') elif char == 'd': success = self.game.move_piece('right') elif char == 'r': success = self.game.rotate_piece() elif char == 'x': exit() return success def play(self): print("WASD to move, R to rotate, X to quit") super().play() class _Getch: """Gets a single character from standard input. Does not echo to the screen.""" def __init__(self): try: self.impl = _GetchWindows() except ImportError: self.impl = _GetchUnix() def __call__(self): return self.impl() class _GetchUnix: def __init__(self): import tty, sys def __call__(self): import sys, tty, termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class _GetchWindows: def __init__(self): import msvcrt def __call__(self): import msvcrt return msvcrt.getch() if __name__ == '__main__': runner = TerminalTetrisRunner() runner.play()
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. import logging import os import shlex from fabric import Connection from utils import get_username_for_os class RemoteCommandExecutionError(Exception): """Signal a failure in remote command execution.""" def __init__(self, result): self.result = result class RemoteCommandExecutor: """Execute remote commands on the cluster master node.""" def __init__(self, cluster, username=None): if not username: username = get_username_for_os(cluster.os) self.__connection = Connection( host=cluster.master_ip, user=username, forward_agent=False, connect_kwargs={"key_filename": [cluster.ssh_key]}, ) self.__user_at_hostname = "{0}@{1}".format(username, cluster.master_ip) def __del__(self): try: self.__connection.close() except Exception as e: # Catch all exceptions if we fail to close the clients logging.warning("Exception raised when closing remote ssh client: {0}".format(e)) def run_remote_command( self, command, log_error=True, additional_files=None, raise_on_error=True, login_shell=True, hide=False, log_output=False, timeout=None, ): """ Execute remote command on the cluster master node. :param command: command to execute. :param log_error: log errors. :param additional_files: additional files to copy before executing script. :param raise_on_error: if True raises a RemoteCommandExecutionError on failures :param login_shell: if True prepends /bin/bash --login -c to the given command :param hide: do not print command output to the local stdout :param log_output: log the command output. :param timeout: interrupt connection after N seconds, default of None = no timeout :return: result of the execution. """ if isinstance(command, list): command = " ".join(command) self._copy_additional_files(additional_files) logging.info("Executing remote command command on {0}: {1}".format(self.__user_at_hostname, command)) if login_shell: command = "/bin/bash --login -c {0}".format(shlex.quote(command)) result = self.__connection.run(command, warn=True, pty=True, hide=hide, timeout=timeout) result.stdout = "\n".join(result.stdout.splitlines()) result.stderr = "\n".join(result.stderr.splitlines()) if log_output: logging.info("Command output:\n%s", result.stdout) if result.failed and raise_on_error: if log_error: logging.error( "Command {0} failed with error:\n{1}\nand output:\n{2}".format( command, result.stderr, result.stdout ) ) raise RemoteCommandExecutionError(result) return result def run_remote_script( self, script_file, args=None, log_error=True, additional_files=None, hide=False, timeout=None, run_as_root=False ): """ Execute a script remotely on the cluster master node. Script is copied to the master home dir before being executed. :param script_file: local path to the script to execute remotely. :param args: args to pass to the script when invoked. :param log_error: log errors. :param additional_files: list of additional files (full path) to copy before executing script. :param hide: do not print command output to the local stdout :param timeout: interrupt connection after N seconds, default of None = no timeout :return: result of the execution. """ script_name = os.path.basename(script_file) self.__connection.put(script_file, script_name) if not args: args = [] return ( self.run_remote_command( ["sudo", "/bin/bash", script_name] + args, log_error=log_error, additional_files=additional_files, hide=hide, timeout=timeout, ) if run_as_root else self.run_remote_command( ["/bin/bash", "--login", script_name] + args, log_error=log_error, additional_files=additional_files, hide=hide, timeout=timeout, ) ) def _copy_additional_files(self, files): for file in files or []: self.__connection.put(file, os.path.basename(file))
#!python3 # -*- coding: utf-8 -*- import os, sys, time def main(): os.chdir('dist') for file in os.listdir(os.path.curdir): if 'py2.' in file: newfile = file.replace('py2.', '') os.rename(file, newfile) if __name__ == '__main__': main()
# coding=utf-8 from lib.verify import verify from plugins.BruteForce.mysql_crack import mysql_check from plugins.BruteForce.postgres_crack import postgres_check from plugins.BruteForce.ssh_crack import ssh_check class Crack(): def __init__(self): self.result = [] def pool(self, ip, ports): if verify(['3306', 'mysql'], ports, ['']): result = mysql_check(ip) if result: self.result.append(result) if verify(['22', 'SSH'], ports, ['']): result = ssh_check(ip) if result: self.result.append(result) if verify(['5432', 'PostgreSQL'], ports, ['']): result = postgres_check(ip) if result: self.result.append(result) return self.result
import os import sys import telegram.ext as tg from loguru import logger from pyrogram import Client # enable logging LOGGER = logger # if version < 3.6, stop bot. if sys.version_info[0] < 3 or sys.version_info[1] < 6: LOGGER.error("You MUST have a python version of at least 3.6! Multiple features depend on this. Bot quitting.") quit(1) ENV = bool(os.environ.get('ENV', False)) if ENV: TOKEN = os.environ.get('TOKEN', None) try: OWNER_ID = int(os.environ.get('OWNER_ID', None)) except ValueError: raise Exception("Your OWNER_ID env variable is not a valid integer.") MESSAGE_DUMP = os.environ.get('MESSAGE_DUMP', None) OWNER_USERNAME = os.environ.get("OWNER_USERNAME", None) try: SUDO_USERS = set(int(x) for x in os.environ.get("SUDO_USERS", "").split()) except ValueError: raise Exception("Your sudo users list does not contain valid integers.") try: SUPPORT_USERS = set(int(x) for x in os.environ.get("SUPPORT_USERS", "").split()) except ValueError: raise Exception("Your support users list does not contain valid integers.") try: WHITELIST_USERS = set(int(x) for x in os.environ.get("WHITELIST_USERS", "").split()) except ValueError: raise Exception("Your whitelisted users list does not contain valid integers.") try: DEV_USERS = set(int(x) for x in os.environ.get("DEV_USERS", "").split()) except ValueError: raise Exception("Your developer users list does not contain valid integers.") WEBHOOK = bool(os.environ.get('WEBHOOK', False)) URL = os.environ.get('URL', "") # Does not contain token PORT = int(os.environ.get('PORT', 5000)) CERT_PATH = os.environ.get("CERT_PATH") DB_URI = os.environ.get('DATABASE_URL') DONATION_LINK = os.environ.get('DONATION_LINK') LOAD = os.environ.get("LOAD", "").split() NO_LOAD = os.environ.get("NO_LOAD", "translation").split() DEL_CMDS = bool(os.environ.get('DEL_CMDS', False)) STRICT_GBAN = bool(os.environ.get('STRICT_GBAN', False)) WORKERS = int(os.environ.get('WORKERS', 8)) BAN_STICKER = os.environ.get('BAN_STICKER', 'CAADAgADOwADPPEcAXkko5EB3YGYAg') ALLOW_EXCL = os.environ.get('ALLOW_EXCL', False) LASTFM_API_KEY = os.environ.get('LASTFM_API_KEY', "") WALL_API = os.environ.get('WALL_API', "") MOE_API = os.environ.get('MOE_API', "") AI_API_KEY = os.environ.get('AI_API_KEY', "") MAL_CLIENT_ID = os.environ.get('MAL_CLIENT_ID', "") MAL_ACCESS_TOKEN = os.environ.get('MAL_ACCESS_TOKEN', "") MAL_REFRESH_TOKEN = os.environ.get('MAL_REFRESH_TOKEN', "") API_ID = os.environ.get('API_ID', "") API_HASH = os.environ.get('API_HASH', "") try: BL_CHATS = set(int(x) for x in os.environ.get('BL_CHATS', "").split()) except ValueError: raise Exception("Your blacklisted chats list does not contain valid integers.") else: from tg_bot.config import Development as Config TOKEN = Config.API_KEY try: OWNER_ID = int(Config.OWNER_ID) except ValueError: raise Exception("Your OWNER_ID variable is not a valid integer.") MESSAGE_DUMP = Config.MESSAGE_DUMP OWNER_USERNAME = Config.OWNER_USERNAME try: SUDO_USERS = set(int(x) for x in Config.SUDO_USERS or []) except ValueError: raise Exception("Your sudo users list does not contain valid integers.") try: SUPPORT_USERS = set(int(x) for x in Config.SUPPORT_USERS or []) except ValueError: raise Exception("Your support users list does not contain valid integers.") try: WHITELIST_USERS = set(int(x) for x in Config.WHITELIST_USERS or []) except ValueError: raise Exception("Your whitelisted users list does not contain valid integers.") try: DEV_USERS = set(int(x) for x in Config.DEV_USERS or []) except ValueError: raise Exception("Your developer users list does not contain valid integers.") WEBHOOK = Config.WEBHOOK URL = Config.URL PORT = Config.PORT CERT_PATH = Config.CERT_PATH DB_URI = Config.SQLALCHEMY_DATABASE_URI DONATION_LINK = Config.DONATION_LINK LOAD = Config.LOAD NO_LOAD = Config.NO_LOAD DEL_CMDS = Config.DEL_CMDS STRICT_GBAN = Config.STRICT_GBAN WORKERS = Config.WORKERS BAN_STICKER = Config.BAN_STICKER ALLOW_EXCL = Config.ALLOW_EXCL LASTFM_API_KEY = Config.LASTFM_API_KEY WALL_API = Config.WALL_API MOE_API = Config.MOE_API AI_API_KEY = Config.AI_API_KEY MAL_CLIENT_ID = Config.MAL_CLIENT_ID MAL_ACCESS_TOKEN = Config.MAL_ACCESS_TOKEN MAL_REFRESH_TOKEN = Config.MAL_REFRESH_TOKEN API_ID = Config.API_ID API_HASH = Config.API_HASH try: BL_CHATS = set(int(x) for x in Config.BL_CHATS or []) except ValueError: raise Exception ("Your blacklisted chats list does not contain valid integers.") SUDO_USERS.add(OWNER_ID) DEV_USERS.add(OWNER_ID) updater = tg.Updater(TOKEN, workers=WORKERS) dispatcher = updater.dispatcher SUDO_USERS = list(SUDO_USERS) WHITELIST_USERS = list(WHITELIST_USERS) SUPPORT_USERS = list(SUPPORT_USERS) # Load at end tsure all prev variables have been set from tg_bot.modules.helper_funcs.handlers import CustomCommandHandler, CustomRegexHandler, CustomMessageHandler # make sure the regex handler can take extra kwargs tg.RegexHandler = CustomRegexHandler # Exempt blacklisted users from MessageHandler tg.MessageHandler = CustomMessageHandler if ALLOW_EXCL: tg.CommandHandler = CustomCommandHandler # Pyrogram Client pg = Client( "Phoenix", api_id=API_ID, api_hash=API_HASH, bot_token=TOKEN )
import unittest import os from checkov.dockerfile.runner import Runner from checkov.runner_filter import RunnerFilter class TestRunnerValid(unittest.TestCase): def test_runner_empty_dockerfile(self): current_dir = os.path.dirname(os.path.realpath(__file__)) valid_dir_path = current_dir + "/resources/empty_dockerfile" runner = Runner() report = runner.run(root_folder=valid_dir_path, external_checks_dir=None, runner_filter=RunnerFilter(framework='all')) self.assertEqual(report.failed_checks, []) self.assertEqual(report.parsing_errors, []) self.assertEqual(report.passed_checks, []) self.assertEqual(report.skipped_checks, []) report.print_console() def test_runner_failing_check(self): current_dir = os.path.dirname(os.path.realpath(__file__)) valid_dir_path = current_dir + "/resources/expose_port/fail" runner = Runner() report = runner.run(root_folder=valid_dir_path, external_checks_dir=None, runner_filter=RunnerFilter(framework='all',checks=['CKV_DOCKER_1'])) self.assertEqual(len(report.failed_checks), 1) self.assertEqual(report.parsing_errors, []) self.assertEqual(report.passed_checks, []) self.assertEqual(report.skipped_checks, []) report.print_console() def test_runner_failing_check_with_file_path(self): current_dir = os.path.dirname(os.path.realpath(__file__)) valid_file_path = current_dir + "/resources/expose_port/fail/Dockerfile" runner = Runner() report = runner.run( files=[valid_file_path], external_checks_dir=None, runner_filter=RunnerFilter(framework="all", checks=["CKV_DOCKER_1"]), ) self.assertEqual(len(report.failed_checks), 1) self.assertEqual(report.parsing_errors, []) self.assertEqual(report.passed_checks, []) self.assertEqual(report.skipped_checks, []) report.print_console() def test_runner_passing_check(self): current_dir = os.path.dirname(os.path.realpath(__file__)) valid_dir_path = current_dir + "/resources/expose_port/pass" runner = Runner() report = runner.run(root_folder=valid_dir_path, external_checks_dir=None, runner_filter=RunnerFilter(framework='all',checks=['CKV_DOCKER_1'])) self.assertEqual(len(report.passed_checks), 1) self.assertEqual(report.parsing_errors, []) self.assertEqual(report.failed_checks, []) self.assertEqual(report.skipped_checks, []) report.print_console() def test_runner_skip_check(self): current_dir = os.path.dirname(os.path.realpath(__file__)) valid_dir_path = current_dir + "/resources/expose_port/skip" runner = Runner() report = runner.run(root_folder=valid_dir_path, external_checks_dir=None, runner_filter=RunnerFilter(framework='all',checks=['CKV_DOCKER_1'])) self.assertEqual(len(report.skipped_checks), 1) self.assertEqual(report.parsing_errors, []) self.assertEqual(report.failed_checks, []) self.assertEqual(report.passed_checks, []) report.print_console() if __name__ == '__main__': unittest.main()
from sqlalchemy import Integer, Column, Table, Float, ForeignKey, String from db.meta import metadata Score = Table("score", metadata, Column("id", Integer(), primary_key=True), Column("user_id", Integer(), ForeignKey("user.id"), nullable=False), Column("score", Float(), primary_key=True), Column("timestamp", Float(), primary_key=True) )
from ober.tokens import TokenDatabase from ober.senses import SenseDatabase import struct import numpy as np import argparse import os def get_latest_clusters_version(clusters_path="data/senses/clusters"): max_version = 0 for f in os.listdir(clusters_path): if f.endswith(".clusters"): version = 0 try: version = int(f.split(".")[0]) except: version = 0 if version > max_version: max_version = version return max_version def read_clusters(clusters_file): # sense struct (token id, sense id, num neighbors) sense_struct = struct.Struct(">i i i") # cluster member struct (neighbor id, weight) member_struct = struct.Struct(">i f") with open(clusters_file, "rb") as f: while True: data = f.read(12) if not data: break token_id, sense_id, num_members = sense_struct.unpack(data) # read in members members = [] for _ in range(num_members): members.append(member_struct.unpack(f.read(8))) # return yield (token_id, sense_id, members) def main(): parser = argparse.ArgumentParser(description="Create sense vectors out of a clusters file and word vectors") parser.add_argument("--token_db_path", help="path to the token inventory", default="data/tokens") parser.add_argument("--token_db_version", help="version of the token inventory to use", type=int) parser.add_argument("--token_vectors_version", help="version of the token vectors to use", type=int) parser.add_argument("--sense_db_path", help="path to the sense inventory", default="data/senses") parser.add_argument("--sense_db_version", help="version of the sense inventory to write to", type=int) parser.add_argument("--sense_vectors_version", help="version of the sense vectors to write to", type=int) parser.add_argument("--clusters_path", help="path to the clusters inventory", default="data/senses/clusters") parser.add_argument("--clusters_version", help="version of the clusters inventory to use", type=int) args = parser.parse_args() token_db_version = args.token_db_version if not token_db_version: token_db_version = TokenDatabase.get_latest_version(args.token_db_path) token_vectors_version = args.token_vectors_version if not token_vectors_version: token_vectors_version = TokenDatabase.get_latest_vectors_version(args.token_db_path, token_db_version) sense_db_version = args.sense_db_version if not sense_db_version: sense_db_version = SenseDatabase.get_latest_version(args.sense_db_path) + 1 sense_vectors_version = args.sense_vectors_version if not sense_vectors_version: sense_vectors_version = SenseDatabase.get_latest_vectors_version(args.sense_db_path, sense_db_version) + 1 clusters_version = args.clusters_version if not clusters_version: clusters_version = get_latest_clusters_version(args.clusters_path) print ("Loading databases ...") # load token database token_database = TokenDatabase.load(db_path=args.token_db_path, version=token_db_version, vectors_version=token_vectors_version) # create sense database sense_database = SenseDatabase(db_path=args.sense_db_path, version=sense_db_version, vectors_version=sense_vectors_version) print("Loading/creating vectors ...") # load old vectors token_vectors = token_database.get_vectors() # create new vectors sense_vectors = np.zeros((200000, sense_database.vector_size), dtype=np.float32) print ("Pooling vectors ...") # load clusters for index, cluster in enumerate(read_clusters(os.path.join(args.clusters_path, "%05d.clusters" % clusters_version))): sense_database.add_sense("%s#%d" % (token_database.decode_token(cluster[0]), cluster[1])) # make sure there is enough room in sense vectors while index >= sense_vectors.shape[0]: sense_vectors = np.concatenate((sense_vectors, np.zeros_like(sense_vectors)), axis=0) sense_vector = sense_vectors[index] weight_total = 0 for member in cluster[2]: sense_vector += token_vectors[member[0]] * member[1] weight_total += member[1] sense_vector /= weight_total sense_vectors[index] = sense_vector print ("Saving ...") # update vectors and save sense_database.update_vectors(sense_vectors[:len(sense_database)]) sense_database.save() if __name__ == "__main__": main()
# -*- coding: utf-8 -*- """ Model page with tabs containing - App description - How to report issues - About contributors. """ import json import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input from dash.dependencies import Output from mrsimulator import __version__ as mrsim_version from app import __version__ as mrapp_version from app import app __author__ = "Deepansh J. Srivastava" __email__ = "[email protected]" PATH = "config/" # Get info from JSON file. with open(PATH + "about.json", "r") as f: content = json.load(f) about_ = content["about"] ABOUT_CONTENT = ( "Mrsimulator web-app is a plotly-dash user interface to the mrsimulator package " "designed for fast and easy solid-state NMR spectrum simulation. Both projects " "are open-source and maintained by the community. If you would like to contribute " "to the project, fork our Github repositories and start contributing." ) LINK_MRSIMULATOR_LIB = "https://github.com/DeepanshS/mrsimulator" LINK_MRSIMULATOR_APP = "https://github.com/DeepanshS/mrsimulator-app" def about(): title = dbc.ModalHeader("About") def make_row_element(name, link=None, version="", element="td"): link = html.A(name, href=link, target="blank_") if link is not None else name if element == "td": contents = [ html.Td(item) if element == "td" else html.Th(item) for item in [link, version] ] return html.Thead(html.Tr(contents)) table = html.Table( [ make_row_element(html.B("Projects"), None, html.B("Version")), make_row_element("Mrsimulator", LINK_MRSIMULATOR_LIB, mrsim_version), make_row_element("Mrsimulator-App", LINK_MRSIMULATOR_APP, mrapp_version), ] ) content = [ABOUT_CONTENT, table] modal = dbc.Modal( [title, dbc.ModalBody(content)], size="lg", id="modal-about", role="document", className="modal-dialogue", ) app.clientside_callback( """function (value) { if (value == null) throw window.dash_clientside.PreventUpdate; return true; }""", Output("modal-about", "is_open"), Input("modal-about-button", "n_clicks"), prevent_initial_call=True, ) return modal div = [] def get_contents(content): if not isinstance(content, (list, dict)): return [dcc.Markdown(content)] if isinstance(content, list): return [html.Ul([html.Li(item) for item in content])] return dbc.Row( [ dbc.Col([html.H6(item), html.Div(get_contents(content[item]))], md=12, lg=6) for item in content.keys() ] ) list_ = [] for item in about_.keys(): list_.append( dbc.Modal( [dbc.ModalHeader(item), dbc.ModalBody(get_contents(about_[item]))], size="lg", id=f"modal-{item}", role="document", className="modal-dialogue", ) ) app.clientside_callback( """function (value) { if (value == null) { throw window.dash_clientside.PreventUpdate; } return true; }""", Output(f"modal-{item}", "is_open"), Input(f"modal-{item}-button", "n_clicks"), prevent_initial_call=True, ) about_modals = html.Div([*list_, about()])
accuracy_requirement = 0.75 analytical_delay = True auto_delay_chain_sizing = False bank_select = "bank_select" bitcell = "bitcell_1port" bitcell_array = "bitcell_array" buf_dec = "pbuf" check_lvsdrc = False col_cap = "col_cap" col_cap_array = "col_cap_array" column_mux_array = "column_mux_array" config_file = "/home/peepu/VSDSRAM_sky130/myconfig_sky130_2.py" control_logic = "control_logic" coverage = 0 debug = False decoder = "hierarchical_decoder" delay_chain = "delay_chain" delay_chain_fanout_per_stage = 4 delay_chain_stages = 9 dff = "dff" dff_array = "dff_array" drc_exe = None drc_name = "" dummy_bitcell = "dummy_bitcell_1port" inline_lvsdrc = False inv_dec = "pinv" is_unit_test = False keep_temp = False load_scales = [0.25, 1, 4] local_array_size = 0 lvs_exe = None lvs_name = "" magic_exe = None nand2_dec = "pnand2" nand3_dec = "pnand3" nand4_dec = "pnand4" netlist_only = False nominal_corner_only = False num_banks = 1 num_r_ports = 0 num_rw_ports = 1 num_spare_cols = 0 num_spare_rows = 0 num_threads = 2 num_w_ports = 0 num_words = 16 openram_tech = "/home/peepu/VSDSRAM_sky130/OpenRAM/technology/sky130A/" openram_temp = "/tmp/openram_peepu_13915_temp/" output_extended_config = True output_name = "sram_2_16_sky130A" output_path = "/home/peepu/VSDSRAM_sky130/temp/" overridden = {'__name__': True, '__doc__': True, '__package__': True, '__loader__': True, '__spec__': True, '__file__': True, '__cached__': True, '__builtins__': True, 'word_size': True, 'num_words': True, 'tech_name': True, 'process_corners': True, 'supply_voltages': True, 'output_path': True, 'output_name': True} perimeter_pins = True pex_exe = None pex_name = "" precharge_array = "precharge_array" print_banner = True process_corners = ['TT'] ptx = "ptx" rbl_delay_percentage = 0.5 replica_bitcell = "replica_bitcell_1port" replica_bitline = "replica_bitline" route_supplies = "tree" row_cap = "row_cap" row_cap_array = "row_cap_array" sense_amp = "sense_amp" sense_amp_array = "sense_amp_array" slew_scales = [0.25, 1, 8] spice_exe = "" spice_name = "" supply_voltages = [1.8] tech_name = "sky130A" temperatures = [0, 25, 100] tri_gate = "tri_gate" tri_gate_array = "tri_gate_array" trim_netlist = False use_pex = False verbose_level = 0 word_size = 2 wordline_driver = "wordline_driver" words_per_row = 1 write_driver = "write_driver" write_driver_array = "write_driver_array" write_mask_and_array = "write_mask_and_array" write_size = None
class Setup: def __init__(self, downloadInfo, **kwargs): self.downloadInfo = downloadInfo if self.downloadInfo.type.isVideo(): self.unmuteVideo = kwargs.get("unmuteVideo", True) self.updateTrack = kwargs.get("updateTrack", False) self.priority = kwargs.get("priority", 0) * 2 class State: STATE_FALSE = 0 STATE_PROCESSING = 1 STATE_TRUE = 2 def __init__(self, state=STATE_FALSE): self._state = state def setFalse(self): self._state = self.STATE_FALSE def isFalse(self): return self._state == self.STATE_FALSE def setProcessing(self): self._state = self.STATE_PROCESSING def isProcessing(self): return self._state == self.STATE_PROCESSING def setTrue(self): self._state = self.STATE_TRUE def isTrue(self): return self._state == self.STATE_TRUE class Status: PREPARING = "preparing" DOWNLOADING = "downloading" WAITING = "waiting" UPDATING = "updating" ENCODING = "encoding" DONE = "done" def __init__(self): self.pauseState = State() self.terminateState = State() self._status = Status.PREPARING self._waitingTime = None self._updateFound = False self._skipWaiting = False self._skipDownload = False self._error = None def isPreparing(self): return self._status == Status.PREPARING def setDownloading(self): self._status = Status.DOWNLOADING def isDownloading(self): return self._status == Status.DOWNLOADING def setWaiting(self): self._status = Status.WAITING def isWaiting(self): return self._status == Status.WAITING def setWaitingTime(self, waitingTime): self._waitingTime = waitingTime def getWaitingTime(self): return self._waitingTime def setUpdating(self): self._status = Status.UPDATING def isUpdating(self): return self._status == Status.UPDATING def setEncoding(self): self._status = Status.ENCODING def isEncoding(self): return self._status == Status.ENCODING def setDone(self): self._status = Status.DONE def isDone(self): return self._status == Status.DONE def setUpdateFound(self): self._updateFound = True def isUpdateFound(self): return self._updateFound def setSkipWaiting(self, skipWaiting): self._skipWaiting = skipWaiting def isWaitingSkipped(self): return self._skipWaiting def setDownloadSkip(self): self._skipDownload = True def isDownloadSkipped(self): return self._skipDownload def raiseError(self, error): self._error = error self.terminateState.setProcessing() def getError(self): return self._error class Progress: def __init__(self): self.file = 0 self.totalFiles = 0 self.seconds = 0 self.totalSeconds = 0 self.size = "0.0B" self.totalSize = "0.0B" self.byteSize = 0 self.totalByteSize = 0 @staticmethod def getPercentage(part, whole): return (part / (whole or 1)) * 100 @property def fileProgress(self): return self.getPercentage(self.file, self.totalFiles) @property def timeProgress(self): return self.getPercentage(self.seconds, self.totalSeconds) @property def byteSizeProgress(self): return self.getPercentage(self.byteSize, self.totalByteSize)
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import os import torch from torch.utils.ffi import create_extension abs_path = os.path.dirname(os.path.realpath(__file__)) extra_objects = [abs_path + '/wavenet_infer.so'] extra_includes = [abs_path] ffi = create_extension( 'nv_wavenet_ext', headers=['wavenet_infer_wrapper.h'], sources=['wavenet_infer_wrapper.c'], define_macros=[('WITH_CUDA', None)], relative_to=__file__, with_cuda=True, extra_objects=extra_objects, include_dirs=extra_includes ) if __name__ == '__main__': ffi.build()
""" Author: Matt Hanson Created: 27/08/2020 11:13 AM """ import pandas as pd import os import numpy as np from supporting_functions.conversions import convert_RH_vpa from supporting_functions.woodward_2020_params import get_woodward_mean_full_params test_dir = os.path.join(os.path.dirname(__file__), 'test_data') def establish_peyman_input(return_pet=False): # use the scott farm so that it doesn't need irrigation # time period [2010 - 2013) # load weather data weather_path = os.path.join(test_dir, 'hamilton_ruakura_ews2010-2013_{}.csv') pressure = pd.read_csv(os.path.join(test_dir, 'hamilton_AWS_pressure.csv'), skiprows=8).loc[:, ['year', 'doy', 'pmsl']].set_index(['year', 'doy']) rain = pd.read_csv(weather_path.format('rain')).loc[:, ['year', 'doy', 'rain']].set_index(['year', 'doy']) temp = pd.read_csv(weather_path.format('temp')).loc[:, ['year', 'doy', 'tmax', 'tmin']].set_index(['year', 'doy']) rad = pd.read_csv(weather_path.format('rad')).loc[:, ['year', 'doy', 'radn']].set_index(['year', 'doy']) wind = pd.read_csv(weather_path.format('wind')).loc[:, ['year', 'doy', 'wind']].set_index(['year', 'doy']) pet = pd.read_csv(weather_path.format('pet')).loc[:, ['year', 'doy', 'pet']].set_index(['year', 'doy']) rh = pd.read_csv(weather_path.format('rh')).loc[:, ['year', 'doy', 'rh']] rh.loc[:, 'rh'] = pd.to_numeric(rh.rh, errors='coerce') rh = rh.groupby(['year', 'doy']).mean() dates = pd.Series(pd.date_range('2010-01-01', '2012-12-31')) matrix_weather = pd.DataFrame({'year': dates.dt.year, 'doy': dates.dt.dayofyear, 'to_delete': 1}).set_index(['year', 'doy']) matrix_weather = pd.merge(matrix_weather, temp, how='outer', left_index=True, right_index=True) matrix_weather = pd.merge(matrix_weather, rain, how='outer', left_index=True, right_index=True) matrix_weather = pd.merge(matrix_weather, rad, how='outer', left_index=True, right_index=True) matrix_weather = pd.merge(matrix_weather, rh, how='outer', left_index=True, right_index=True) matrix_weather = pd.merge(matrix_weather, wind, how='outer', left_index=True, right_index=True) matrix_weather = pd.merge(matrix_weather, pet, how='outer', left_index=True, right_index=True) matrix_weather = pd.merge(matrix_weather, pressure, how='outer', left_index=True, right_index=True) matrix_weather.loc[:, 'vpa'] = convert_RH_vpa(matrix_weather.loc[:, 'rh'], matrix_weather.loc[:, 'tmin'], matrix_weather.loc[:, 'tmax']) matrix_weather = matrix_weather.fillna(method='ffill') if return_pet: matrix_weather.drop(columns=['rh', 'to_delete', 'wind', 'vpa', 'pmsl'], inplace=True) else: matrix_weather.drop(columns=['rh', 'to_delete', 'pet', 'pmsl'], inplace=True) matrix_weather.loc[:, 'max_irr'] = 10. matrix_weather.loc[:, 'irr_trig'] = 0 matrix_weather.loc[:, 'irr_targ'] = 1 matrix_weather.loc[:, 'irr_trig_store'] = 0 matrix_weather.loc[:, 'irr_targ_store'] = 1 matrix_weather.loc[:, 'external_inflow'] = 0 matrix_weather.reset_index(inplace=True) # load harvest data from Simon woodward's paper harvest_nm = 'harvest_Scott_0.txt' days_harvest = pd.read_csv(os.path.join(test_dir, harvest_nm), delim_whitespace=True, names=['year', 'doy', 'percent_harvest'] ).astype(int) # floor matches what simon did. days_harvest = days_harvest.loc[(days_harvest.year >= 2010) & (days_harvest.year < 2013)] days_harvest.loc[:, 'frac_harv'] = days_harvest.loc[:, 'percent_harvest'] / 100 days_harvest.loc[:, 'harv_trig'] = 0 days_harvest.loc[:, 'harv_targ'] = 0 days_harvest.loc[:, 'weed_dm_frac'] = 0 days_harvest.loc[:, 'reseed_trig'] = -1 days_harvest.loc[:, 'reseed_basal'] = 1 days_harvest.drop(columns=['percent_harvest'], inplace=True) # load parameters from simon woodward's paper params = get_woodward_mean_full_params('scott') doy_irr = [0] return params, matrix_weather, days_harvest, doy_irr def _compair_pet(): """just to compaire the pet and peyman results, the are slightly differnt, but I think that is due to different methods of calculating PET,""" from basgra_python import run_basgra_nz verbose = False params, matrix_weather, days_harvest, doy_irr = establish_peyman_input(False) peyman_out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default', supply_pet=False) params, matrix_weather, days_harvest, doy_irr = establish_peyman_input(True) pet_out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default', supply_pet=True) from supporting_functions.plotting import plot_multiple_results plot_multiple_results({'pet': pet_out, 'peyman': peyman_out}) def establish_org_input(site='scott'): if site == 'scott': harvest_nm = 'harvest_Scott_0.txt' weather_nm = 'weather_Scott.txt' # col = 1 + 8 * (1) elif site == 'lincoln': harvest_nm = 'harvest_Lincoln_0.txt' weather_nm = 'weather_Lincoln.txt' # col = 1 + 8 * (3 - 1) else: raise ValueError('unexpected site') params = get_woodward_mean_full_params(site) matrix_weather = pd.read_csv(os.path.join(test_dir, weather_nm), delim_whitespace=True, index_col=0, header=0, names=['year', 'doy', 'tmin', 'tmax', 'rain', 'radn', 'pet']) # set start date as doy 121 2011 idx = (matrix_weather.year > 2011) | ((matrix_weather.year == 2011) & (matrix_weather.doy >= 121)) matrix_weather = matrix_weather.loc[idx].reset_index(drop=True) # set end date as doy 120, 2017 idx = (matrix_weather.year < 2017) | ((matrix_weather.year == 2017) & (matrix_weather.doy <= 120)) matrix_weather = matrix_weather.loc[idx].reset_index(drop=True) matrix_weather.loc[:, 'max_irr'] = 10. matrix_weather.loc[:, 'irr_trig'] = 0 matrix_weather.loc[:, 'irr_targ'] = 1 matrix_weather.loc[:, 'irr_trig_store'] = 0 matrix_weather.loc[:, 'irr_targ_store'] = 1 matrix_weather.loc[:, 'external_inflow'] = 0 days_harvest = pd.read_csv(os.path.join(test_dir, harvest_nm), delim_whitespace=True, names=['year', 'doy', 'percent_harvest'] ).astype(int) # floor matches what simon did. days_harvest.loc[:, 'frac_harv'] = days_harvest.loc[:, 'percent_harvest'] / 100 days_harvest.loc[:, 'harv_trig'] = 0 days_harvest.loc[:, 'harv_targ'] = 0 days_harvest.loc[:, 'weed_dm_frac'] = 0 days_harvest.loc[:, 'reseed_trig'] = -1 days_harvest.loc[:, 'reseed_basal'] = 1 days_harvest.drop(columns=['percent_harvest'], inplace=True) doy_irr = [0] return params, matrix_weather, days_harvest, doy_irr def clean_harvest(days_harvest, matrix_weather): stop_year = matrix_weather['year'].max() stop_day = matrix_weather.loc[matrix_weather.year == stop_year, 'doy'].max() days_harvest.loc[(days_harvest.year == stop_year) & (days_harvest.doy > stop_day), 'year'] = -1 # cull harvest after end of weather data days_harvest = days_harvest.loc[days_harvest.year > 0] # the size matching is handled internally return days_harvest def get_org_correct_values(): sample_output_path = os.path.join(test_dir, 'sample_org_output.csv') sample_data = pd.read_csv(sample_output_path, index_col=0).astype(float) # add in new features of data sample_data.loc[:, 'IRRIG'] = 0 # new data, check return sample_data def get_woodward_weather(): matrix_weather = pd.read_csv(os.path.join(test_dir, 'weather_Lincoln.txt'), delim_whitespace=True, index_col=0, header=0, names=['year', 'doy', 'tmin', 'tmax', 'rain', 'radn', 'pet']) matrix_weather = matrix_weather.loc[matrix_weather.year >= 2010] matrix_weather = matrix_weather.loc[matrix_weather.year < 2018] strs = ['{}-{:03d}'.format(e, f) for e, f in matrix_weather[['year', 'doy']].itertuples(False, None)] matrix_weather.loc[:, 'date'] = pd.to_datetime(strs, format='%Y-%j') matrix_weather.set_index('date', inplace=True) matrix_weather = matrix_weather.loc[matrix_weather.index > '2011-08-01'] matrix_weather.loc[:, 'max_irr'] = 10. matrix_weather.loc[:, 'irr_trig'] = 0 matrix_weather.loc[:, 'irr_targ'] = 1 matrix_weather.loc[:, 'irr_trig_store'] = 0 matrix_weather.loc[:, 'irr_targ_store'] = 1 matrix_weather.loc[:, 'external_inflow'] = 0 return matrix_weather def get_lincoln_broadfield(): path = os.path.join(test_dir, 'Lincoln_Broadfield_Ews.csv') line_breaks = { 'pet': [10, 2916], 'rain': [2920, 5841], 'tmaxmin': [5845, 8766], 'rad': [8770, 11688], } cols = { 'pet': ['station', 'year', 'doy', 'time', 'pet', 'per', 'type'], 'rain': ['station', 'year', 'doy', 'time', 'rain', 'sog', 'rain_def', 'rain_runoff', 'per', 'freq'], 'tmaxmin': ['station', 'year', 'doy', 'time', 'tmax', 'per1', 'tmin', 'per2', 'tgmin', 'per3', 'tmean', 'rhmean', 'per', 'freq'], 'rad': ['station', 'year', 'doy', 'time', 'radn', 'per', 'type', 'freq'], } keep_cols = np.array(['pet', 'rain', 'rain_def', 'rain_runoff', 'tmax', 'tmin', 'radn']) temp = pd.date_range('01-01-2010', '31-12-2017', freq='D') year = temp.year.values doy = temp.dayofyear.values outdata = pd.DataFrame({'year': year, 'doy': doy}, ) outdata.set_index(['year', 'doy'], inplace=True) for k, (start, stop) in line_breaks.items(): temp = pd.read_csv(path, names=cols[k], skiprows=start - 1, nrows=stop - start + 1) temp.loc[:, 'year'] = temp.year.astype(int) temp.loc[:, 'doy'] = temp.doy.astype(int) temp.set_index(['year', 'doy'], inplace=True) tkeep = keep_cols[np.in1d(keep_cols, temp.keys())] for k2 in tkeep: outdata.loc[temp.index, k2] = temp.loc[:, k2] outdata = outdata.reset_index() outdata.loc[:, 'tmax'] = pd.to_numeric(outdata.loc[:, 'tmax'], errors='coerce') outdata.fillna(method='ffill', inplace=True) strs = ['{}-{:03d}'.format(e, f) for e, f in outdata[['year', 'doy']].itertuples(False, None)] outdata.loc[:, 'date'] = pd.to_datetime(strs, format='%Y-%j') outdata.set_index('date', inplace=True) outdata = outdata.loc[outdata.index > '2011-08-01'] outdata.loc[:, 'max_irr'] = 10. outdata.loc[:, 'irr_trig'] = 0 outdata.loc[:, 'irr_targ'] = 1 outdata.loc[:, 'irr_trig_store'] = 0 outdata.loc[:, 'irr_targ_store'] = 1 outdata.loc[:, 'external_inflow'] = 0 return outdata def base_manual_harvest_data(): params, matrix_weather, days_harvest, doy_irr = establish_org_input() days_harvest = clean_harvest(days_harvest, matrix_weather) days_harvest.loc[:, 'frac_harv'] = 1 days_harvest.loc[:, 'harv_trig'] = 3000 days_harvest.loc[:, 'harv_targ'] = 1000 days_harvest.loc[:, 'weed_dm_frac'] = 0 days_harvest.loc[:, 'reseed_trig'] = -1 days_harvest.loc[:, 'reseed_basal'] = 1 strs = ['{}-{:03d}'.format(e, f) for e, f in days_harvest[['year', 'doy']].itertuples(False, None)] days_harvest.loc[:, 'date'] = pd.to_datetime(strs, format='%Y-%j') return days_harvest def base_auto_harvest_data(matrix_weather): strs = ['{}-{:03d}'.format(e, f) for e, f in matrix_weather[['year', 'doy']].itertuples(False, None)] days_harvest_out = pd.DataFrame({'year': matrix_weather.loc[:, 'year'], 'doy': matrix_weather.loc[:, 'doy'], 'frac_harv': np.zeros(len(matrix_weather)), # set filler values 'harv_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not harvest 'harv_targ': np.zeros(len(matrix_weather)), # set filler values 'weed_dm_frac': np.zeros(len(matrix_weather)), # set filler values 'date': pd.to_datetime(strs, format='%Y-%j') }) days_harvest_out.loc[:, 'reseed_trig'] = -1 days_harvest_out.loc[:, 'reseed_basal'] = 1 return days_harvest_out def get_input_for_storage_tests(): params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln') matrix_weather = get_lincoln_broadfield() matrix_weather.loc[:, 'max_irr'] = 5 matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15 matrix_weather.loc[:, 'irr_trig'] = 0.75 matrix_weather.loc[:, 'irr_targ'] = 0.9 params['IRRIGF'] = 1 # irrigation to 90% of field capacity doy_irr = list(range(305, 367)) + list(range(1, 91)) params['use_storage'] = 1 params['irrigated_area'] = 10 params['h2o_store_max_vol'] = 10000 # 100 mm storage params['h2o_store_SA'] = 0 # this is needed for evap, but not implemented currently # place holders, these need to be defined for each set params['runoff_from_rain'] = 1 params['calc_ind_store_demand'] = 0 params['stor_full_refil_doy'] = 240 params['abs_max_irr'] = 1000 # non-sensically high params['I_h2o_store_vol'] = 1 params['runoff_area'] = 0 params['runoff_frac'] = 0 params['stor_refill_min'] = 0 params['stor_refill_losses'] = 0 params['stor_leakage'] = 0 params['stor_irr_ineff'] = 0 params['stor_reserve_vol'] = 0 return params, matrix_weather, days_harvest, doy_irr
# -*- coding: utf-8 -*- # Copyright 2018-2019 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A bunch of useful utilities for dealing with types.""" # Python 2/3 compatibility from __future__ import print_function, division, unicode_literals, absolute_import from streamlit.compatibility import setup_2_3_shims setup_2_3_shims(globals()) import re def is_type(obj, fqn_type_pattern): """Check type without importing expensive modules. Parameters ---------- obj : any The object to type-check. fqn_type_pattern : str or regex The fully-qualified type string or a regular expression. Regexes should start with `^` and end with `$`. Example ------- To check whether something is a Matplotlib Figure without importing matplotlib, use: >>> is_type(foo, 'matplotlib.figure.Figure') """ the_type = type(obj) module = the_type.__module__ name = the_type.__name__ actual_fqn = "%s.%s" % (module, name) if isinstance(fqn_type_pattern, string_types): # noqa: F821 pylint:disable=undefined-variable return fqn_type_pattern == actual_fqn else: return fqn_type_pattern.match(actual_fqn) is not None _SYMPY_RE = re.compile(r"^sympy.*$") def is_sympy_expession(obj): """True if input is a SymPy expression.""" if not is_type(obj, _SYMPY_RE): return False try: import sympy if isinstance(obj, sympy.Expr): return True except: return False _ALTAIR_RE = re.compile(r"^altair\.vegalite\.v\d+\.api\.\w*Chart$") def is_altair_chart(obj): """True if input looks like an Altair chart.""" return is_type(obj, _ALTAIR_RE) def is_keras_model(obj): """True if input looks like a Keras model.""" return ( is_type(obj, "keras.engine.sequential.Sequential") or is_type(obj, "keras.engine.training.Model") or is_type(obj, "tensorflow.python.keras.engine.sequential.Sequential") or is_type(obj, "tensorflow.python.keras.engine.training.Model") ) def is_plotly_chart(obj): """True if input looks like a Plotly chart.""" return ( is_type(obj, "plotly.graph_objs._figure.Figure") or _is_list_of_plotly_objs(obj) or _is_probably_plotly_dict(obj) ) def is_graphviz_chart(obj): """True if input looks like a GraphViz chart.""" return is_type(obj, "graphviz.dot.Graph") or is_type(obj, "graphviz.dot.Digraph") def _is_plotly_obj(obj): """True if input if from a type that lives in plotly.plotly_objs.""" the_type = type(obj) return the_type.__module__.startswith("plotly.graph_objs") def _is_list_of_plotly_objs(obj): if type(obj) is not list: return False if len(obj) == 0: return False return all(_is_plotly_obj(item) for item in obj) def _is_probably_plotly_dict(obj): if type(obj) not in dict_types: # noqa: F821 pylint:disable=undefined-variable return False if len(obj.keys()) == 0: return False if any(k not in ["config", "data", "frames", "layout"] for k in obj.keys()): return False if any(_is_plotly_obj(v) for v in obj.values()): return True if any(_is_list_of_plotly_objs(v) for v in obj.values()): return True return False _FUNCTION_TYPE = type(lambda: 0) def is_function(x): """Return True if x is a function.""" return type(x) == _FUNCTION_TYPE def is_namedtuple(x): t = type(x) b = t.__bases__ if len(b) != 1 or b[0] != tuple: return False f = getattr(t, "_fields", None) if not isinstance(f, tuple): return False return all(type(n).__name__ == "str" for n in f)
""" Hyper parameters related utils. For now it contains hyper parameter search tools with random search but it may evolve later. :: hp_sampler = HyperparamSearch( lr=ExpSampler(1e-6, 0.1), momentum=DecaySampler(0.5, 0.999), wd=ExpSampler(0.1, 1e-6), model=ChoiceSampler(['resnet', 'vgg']) ) for run_nb in range(10): hps = hp_sampler.sample() results = train(**hps) hp_sampler.log_results(hps, results) After creating the json file summing up the hyper parameter search, it can be investigated with the viewer with :code:`python3 -m torchelie.hyper hpsearch.json`. Then locate your browser to :code:`http://localhost:8080`. """ import math import random import json import copy class Sampler: """ Uniform sampler. Args: low (float): lower bound high (float): higher bound """ def __init__(self, low: float, high: float) -> None: self.low = low self.high = high def sample(self) -> float: """ Sample from the distribution. """ return random.uniform(self.low, self.high) def inverse(self, x: float) -> float: return x class ExpSampler(Sampler): """ Exponential sampler (Uniform sampler over a log scale). Use it to sample the learning rate or the weight decay. Args: low (float): lower bound high (float): higher bound """ def __init__(self, low, high): low = self.inverse(low) high = self.inverse(high) super(ExpSampler, self).__init__(low, high) def sample(self): """ Sample a value. """ return 10**super(ExpSampler, self).sample() def inverse(self, x): return math.log10(x) class DecaySampler(ExpSampler): """ Sample a decay value. Use it for a momentum or beta1 / beta2 value or any exponential decay value. Args: low (float): lower bound high (float): higher bound """ def __init__(self, low, high): super(DecaySampler, self).__init__(low, high) def sample(self): """ Sample a value. """ return 1 - super(DecaySampler, self).sample() def inverse(self, x): return super(DecaySampler, self).inverse(1 - x) class ChoiceSampler: """ Sampler over a discrete of values. Args: choices (list): list of values """ def __init__(self, choices): self.choices = choices def sample(self): """ Sample a value """ return random.choice(self.choices) def inverse(self, x): return self.choices.index(x) class HyperparamSampler: """ Sample hyper parameters. It aggregates multiple samplers to produce a set of hyper parameters. Example: :: HyperparamSampler( lr=ExpSampler(1e-6, 0.1), momentum=DecaySampler(0.5, 0.999), wd=ExpSampler(0.1, 1e-6), model=ChoiceSampler(['resnet', 'vgg']) ) Args: hyperparams (kwargs): hyper params samplers. Names are arbitrary. """ def __init__(self, **hyperparams): self.hyperparams = hyperparams def sample(self): """ Sample hyperparameters. Returns: a dict containing sampled values for hyper parameters. """ return {k: v.sample() for k, v in self.hyperparams.items()} class HyperparamSearch: """ Perform hyper parameter search. Right now it just uses a random search. Params and results are logged to hpsearch.csv in the current directory. It would be cool to implement something like a Gaussian Process search or a RL algorithm. First, call sample() to get a set of hyper parameters. The, evaluate them on your task and get a dict of results. call log_results() with the hyper params and the results dict, and start again. Stop whenever you want. Args: hyperparameters (kwargs): named samplers (like for HyperparamSampler). """ def __init__(self, **hyperparams): self.sampler = HyperparamSampler(**hyperparams) def sample(self): """ Sample a set of hyper parameters. """ return self.sampler.sample() def read_hpsearch(self): try: with open('hpsearch.json', 'r') as f: return json.load(f) except: return [] def _str(self, x): try: return float(x) except: pass try: return x.item() except: pass return x def log_result(self, hps, result): """ Logs hyper parameters and results. """ res = self.read_hpsearch() full = copy.deepcopy(hps) full.update({'result_' + k: self._str(v) for k, v in result.items()}) res.append(full) with open('hpsearch.json', 'w') as f: json.dump(res, f) if __name__ == '__main__': from http.server import HTTPServer, BaseHTTPRequestHandler import argparse parser = argparse.ArgumentParser() parser.add_argument('file', default='hpsearch.json') opts = parser.parse_args() def make_html(): with open(opts.file) as f: dat = json.load(f) dat.sort(key=lambda x: x['result_lfw_loss']) dat = dat dimensions = [] for k in dat[0].keys(): v = dat[0][k] if isinstance(v, float): dimensions.append({ 'label': k, 'values': [dd[k] for dd in dat] }) print(json.dumps(dimensions)) return """ <html> <head> <meta charset="UTF-8"> <title></title> <script src="https://cdn.plot.ly/plotly-latest.min.js"></script> <script src="https://cdn.jsdelivr.net/npm/[email protected]/sorttable.min.js"></script> <style> table, th, td { border: 1px solid black; } </style> </head> <body> <div id="graphOpts"></div> <div id="graphDiv"></div> <div id="table"></div> <script> var DATA = """ + json.dumps(dat) + """ for (d in DATA) { DATA[d]['idx'] = d } let display_opts = {}; let hidden_rows = new Set(); function make_graph() { let data = DATA.filter((_, i) => !hidden_rows.has(i)); let DIM = Object.keys(data[0]) .filter(k => display_opts[k] !== "Hidden") .map(k => { if (typeof data[0][k] == 'number') { if (display_opts[k] == 'Visible' || display_opts[k] == undefined) { return { 'label': k, 'values': data.map(d => d[k]) }; } else if (display_opts[k] == 'Log') { return { 'label': k, 'values': data.map(d => Math.log10(d[k])), 'tickvals': data.map(d => Math.log10(d[k])), 'ticktext': data.map(d => d[k].toExponential(2)) }; } else if (display_opts[k] == 'Decay') { return { 'label': k, 'values': data.map(d => Math.log10(1/(1-d[k]))), 'tickvals': data.map(d => Math.log10(1/(1-d[k]))), 'ticktext': data.map(d => d[k].toExponential(2)) }; } } else { let labels = Array.from(new Set(data.map(d => d[k]))); return { 'label': k, 'values': data.map(d => labels.indexOf(d[k])), 'tickvals': labels.map((_, i) => i), 'ticktext': labels }; } }); var trace = { type: 'parcoords', line: { color: 'blue' }, dimensions: DIM }; var show = [trace] Plotly.purge('graphDiv'); Plotly.plot('graphDiv', show, {}, {showSendToCloud: true}); } make_graph(); function change_opts(t, val) { display_opts[t]=val; make_graph(); } graphOpts.innerHTML = ` <table width="100%"> <tr> ${get_titles().map(t => `<th>${t}</th>`).join('')} </tr> <tr> ${get_titles().map(t => `<td> <select onchange="change_opts('${t}', event.target.value)"> <option default>Visible</option> <option>Hidden</option> <option>Log</option> <option>Decay</option> </select> </th>` ).join('')} </tr> </table> `; function get_titles() { let titles = []; for (d of DATA) { for (k of Object.keys(d)) { titles.push(k); } } titles = Array.from(new Set(titles)); return titles; } function change_visibility(r, hid) { if (hid) { hidden_rows.add(r); } else { hidden_rows.delete(r); } make_graph(); } let make_table = (DATA) => { let titles = get_titles(); return ` <table width="100%" class="sortable"> <tr> ${titles.map(t => `<th>${t}</th>` ).join('')} <th>Hide</th> </tr> ${DATA.map((d, i) => `<tr>${titles.map(t => `<td> ${(typeof d[t] == 'number') ? d[t].toFixed(3) : d[t]} </td>` ).join('')} <td>H <input type="checkbox" onchange="change_visibility(${i}, event.target.checked)"/> </td> </tr>`).join('')} </table>`; }; table.innerHTML = make_table(DATA); </script> </body> </html> """ class Server(BaseHTTPRequestHandler): def do_GET(self): self.handle_http(200, 'text/html', make_html()) def handle_http(self, status, content_type, content): self.send_response(status) self.send_header('Content-type', content_type) self.end_headers() self.wfile.write(bytes(content, 'utf-8')) httpd = HTTPServer(('0.0.0.0', 8080), Server) try: print('serving on localhost:8080') httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close()
# Version 0 # Initial config version! V0 = { "$schema": "http://json-schema.org/schema#", "type": "object", "properties": { # GitLab private token "token": { "type": "string", }, # GitLab domain (https://git.gitlab.com) "gitlab-host": { "type": "string", }, # GitLab Namespace name "namespace": { "type": "string", }, # GitLab Namespace ID (we'd have to retrieve that) "namespace-id": { "type": "integer", }, # Verbose name of the course (might be unnecessary) "course-name": { "type": "string", }, # Current semester "semester": { "type": "string", "pattern": r"^\d{4}-(SP|FS|SS)$" }, # Roster of students "roster": { "type": "array", "items": { "type": "object", "properties": { # Their full name "name": { "type": "string" }, # Section "section": { "type": "string" }, # Their GitLab username (single sign on) "username": { "type": "string", "pattern": "^[\w\.\-]+$", }, # Their GitLab id (might be handy, but we'd have # to fetch it and save it). Should save time in # the long run instead of constantly querying "id": { "type": "integer", }, }, "required": ["name", "username", "section"], "additionalProperties": False, }, }, # Canvas API token "canvas-token": { "type": "string", }, # Canvas domain "canvas-host": { "type": "string", } }, "required": ["gitlab-host", "namespace", "token", "semester"], "additionalProperties": False, }
"""Helper functions""" from itertools import product from math import isinf from typing import List import mpmath as mp import numpy as np import pandas as pd from utils.exceptions import ParameterOutOfBounds EPSILON = 1e-09 def get_q(p: float) -> float: """ :param p: Hoelder p :return: q """ if p <= 1.0: raise ParameterOutOfBounds(f"p={p} must be >1") # 1/p + 1/q = 1 # 1/q = (p - 1) / p return p / (p - 1.0) def get_p_n(p_list) -> float: """ :param p_list: first p_1, ..., p_n in generalized Hoelder inequality :return: last p_n """ inv_p = [0.0] * len(p_list) for i in range(len(p_list)): if p_list[i] <= 1.0: raise ParameterOutOfBounds(f"p={p_list[i]} must be >1") inv_p[i] = 1.0 / p_list[i] if sum(inv_p) >= 1: raise ParameterOutOfBounds(f"p_i's are too small") return 1.0 / (1.0 - sum(inv_p)) def is_equal(float1: float, float2: float, epsilon=EPSILON) -> bool: """ :param float1: real 1 :param float2: real 2 :param epsilon: accuracy of the comparison (default is the global Epsilon) :return: returns true if distance is less than epsilon """ if isinf(float1) and isinf(float2): return True return abs(float1 - float2) < epsilon def expand_grid(list_input: list) -> pd.DataFrame: """ implement R-expand.grid() function :param list_input: list of values to be expanded :return: expanded data frame """ return pd.DataFrame([row for row in product(*list_input)]) def centroid_without_one_row(simplex: np.ndarray, index: int) -> np.ndarray: # type hint does not work with int and np.ndarray[int] # column mean of simplex without a given row # (usually the one with the worst y-to_value) return np.delete(simplex, index, 0).mean(axis=0) def average_towards_best_row(simplex: np.ndarray, best_index: int, shrink_factor: float) -> np.ndarray: # type hint does not work with int and np.ndarray[int] index = 0 for row in simplex: simplex[index] = simplex[best_index] + shrink_factor * ( row - simplex[best_index]) index += 1 return simplex def get_unit_vector(length: int, index: int) -> List[float]: """ :param length: length of unit vector :param index: index of 1.0 to_value :return: unit vector with 1.0 at index and 0.0 else """ res = [0.0] * length res[index] = 1.0 return res def same_sign(a, b): return a * b > 0 def bisect(func, low, high) -> float: """Find root of continuous function where f(low) and f(high) have opposite signs""" counter = 20 while func(low) >= -EPSILON and counter > 0: low /= 2.0 counter -= 1 if func(low) >= 0: return low while func(high) <= EPSILON and counter > 0: high *= 2.0 counter -= 1 assert not same_sign(func(low), func(high)) midpoint = mp.mpf((low + high) / 2.0) for i in range(54): midpoint = mp.mpf((low + high) / 2.0) if same_sign(func(low), func(midpoint)): low = mp.mpf(midpoint) else: high = mp.mpf(midpoint) if abs(high - low) < EPSILON: break return midpoint if __name__ == '__main__': print(get_p_n(p_list=[3.0, 3.0])) SIMPLEX_START_TEST = np.array([[0.1, 2.0], [1.0, 3.0], [2.0, 2.0]]) print(SIMPLEX_START_TEST) print(centroid_without_one_row(simplex=SIMPLEX_START_TEST, index=0)) print( average_towards_best_row(SIMPLEX_START_TEST, best_index=0, shrink_factor=0.5))
class cell: """ A cell for the CA problem. """ def __init__(self, rule, state): self.right = None self.left = None self.rule = rule self.state = state def situate(self): """looks around itself and calculated its situation. Returns an int from 0 to 7""" return (self.right).state + (self.left).state + self.state def choose(self): """Looks at its own situation and chooses the next state based on the given rule""" return self.rule[-1 - self.situate()]
from pathlib2 import Path import pathlib2 import os PROJECT_DIR = Path(__file__).resolve().parents[1] DATA_DIR = PROJECT_DIR / "data"
__title__ = 'Geo km' __description__ = 'Calculate distance and travel time between points using Google API.' __url__ = 'not available' __version__ = '2.0' __author__ = 'Shkaberda V., Zubriichuk V.' __author_email__ = 'not available' __license__ = 'MIT License' __copyright__ = 'Copyright 2021 Shkaberda Vadim, Vitaliy Zubriichuk'
from django.contrib.auth import views as auth_views from django.urls import path from .forms import CustomAuthenticationForm app_name = "accounts" login_view = auth_views.LoginView.as_view( template_name="accounts/login.html", authentication_form=CustomAuthenticationForm ) logout_view = auth_views.LogoutView.as_view( template_name="accounts/logout.html" ) urlpatterns = [ path('login/', login_view, name='login'), path('logout/', logout_view, name='logout'), # path('signup/', signup_request_view, name='signup_request') ]
from django.apps import AppConfig class ResourcecenterConfig(AppConfig): name = 'apps.hobbygroups' verbose_name = 'Hobby groups'
import os os.environ["TF_CPP_MIN_LOG_LEVEL"]='1' # 这是默认的显示等级,显示所有信息   os.environ["TF_CPP_MIN_LOG_LEVEL"]='2' # 只显示 warning 和 Error    os.environ["TF_CPP_MIN_LOG_LEVEL"]='3' # 只显示 Error import tensorflow as tf import numpy as np import matplotlib.pyplot as plt x_sin=np.linspace(-10,10,200) c,s=np.cos(x_sin),np.sin(x_sin) plt.subplot(2,2,2) plt.plot(x_sin,c,color="blue",linestyle="-",label="COS",alpha=0.5) plt.plot(x_sin,s,"r*",label="SIN") x_data=np.linspace(-10,10,300) out=tf.sigmoid(x_data) init=tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) y=sess.run(out) plt.subplot(2, 2, 1) plt.xlabel('Activity of Neuron') plt.ylabel('Output of Neuron') plt.title('Sigmoid Activation function') plt.plot(x_data,y) plt.show() # # a=tf.constant(2,shape=[2,]) # # b=tf.constant(3,shape=[2,]) # c=tf.add(a,b) # with tf.Session() as sess: # # print (sess.run(c))
# Extract values for each place. Create dictionaries for each case at each place. roman = thou[th] + hund[h] + tens[t] + ones[o]. class Solution(object): def intToRoman(self, num): """ :type num: int :rtype: str """ thousands = {'0':'', '1':'M', '2':'MM', '3':'MMM'} hundreds = {'0':'', '1':'C','2':'CC','3':'CCC','4':'CD','5':'D','6':'DC','7':'DCC','8':'DCCC','9':'CM'} tens = {'0':'', '1':'X', '2':'XX', '3':'XXX', '4':'XL', '5':'L', '6':'LX', '7':'LXX', '8':'LXXX', '9':'XC'} ones = {'0':'', '1':'I', '2':'II', '3':'III', '4':'IV', '5':'V', '6':'VI', '7':'VII', '8':'VIII', '9':'IX'} num = str(num).zfill(4) th, h, t, o = num roman = thousands[th] + hundreds[h] + tens[t] + ones[o] return roman
# -*- coding: utf-8 -*- # Copyright (c) 2016 Ericsson AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from calvin.utilities.utils import enum PURPOSE = enum('INIT', 'CONNECT', 'DISCONNECT') class BaseConnection(object): """BaseConnection""" def __init__(self, node, purpose, port, peer_port_meta, callback, factory, *args, **kwargs): super(BaseConnection, self).__init__() self.node = node self.purpose = purpose self.port = port self.peer_port_meta = peer_port_meta self.callback = callback self.factory = factory self._parallel_connections = [] def async_reply(self, status): if not self.callback: return self.callback( status=status, actor_id=self.port.owner.id, port_name=self.port.name, port_id=self.port.id, peer_node_id=self.peer_port_meta.node_id, peer_actor_id=self.peer_port_meta.actor_id, peer_port_name=self.peer_port_meta.port_name, peer_port_id=self.peer_port_meta.port_id ) def parallel_connections(self, connections): self._parallel_connections = connections def parallel_set(self, key, value): for c in self._parallel_connections: setattr(c, key, value) def init(self): return None def __str__(self): return "%s(port_id=%s, peer_port_id=%s)" % (self.__class__.__name__, self.port.id, self.peer_port_meta.port_id)
from django.urls import path, include from rest_framework import routers from employee_management_backend.users.api import views router_users = routers.DefaultRouter() router_users.register("user", views.UserViewSet, "user") router_users.register("profiles", views.ProfileViewSet, "profiles") router_users.register("addresses", views.AddressViewSet, "addresses") urlpatterns = [path("", include(router_users.urls))]
def expanded_form(num): return ' + '.join( [ str(int(i)*j) for i,j in zip( str(num)[::-1], [1,10]+[pow(10,p) for p in range(2,50)] ) if i!='0'][::-1] )
# Generated by Django 3.0.3 on 2020-03-12 10:17 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0017_lot_matiere_premiere2'), ] operations = [ migrations.RenameField( model_name='lot', old_name='matiere_premiere2', new_name='biocarburant', ), migrations.RemoveField( model_name='lot', name='volume2', ), ]
import unittest import sys sys.path.append('.') from dl.lexer import DLLexer from dl.parser import DLParser from dl.ast import Integer, Variable, BinOp, RelOp, ArrayIndex, \ Assign, Print, Read, Return, If, While, Block, \ FunctionCall, Arguments, FunctionDeclaration, \ Declarations, VariableDeclarations, Program class TestParser(unittest.TestCase): def test_parse_print_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ print(1) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Print(Integer(1))))") def test_parse_addition_constants(self): lexer = DLLexer() parser = DLParser() source_string = "{ print(2 + 3) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Print(BinOp(PLUSOP, Integer(2), Integer(3)))))") def test_parse_addition_variables(self): lexer = DLLexer() parser = DLParser() source_string = "{ print(x + y) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Print(BinOp(PLUSOP, Variable(x), Variable(y)))))") def test_parse_assignment_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = 5 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), Integer(5))))") def test_parse_addition_assignment(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = 2 + 3 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), BinOp(PLUSOP, Integer(2), Integer(3)))))") def test_parse_subtraction_assignment(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = 4 - 3 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), BinOp(MINUSOP, Integer(4), Integer(3)))))") def test_parse_multiplication_assignment(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = 4 * 5 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), BinOp(MULTIPLYOP, Integer(4), Integer(5)))))") def test_parse_division_assignment(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = 10 / 2 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), BinOp(DIVIDEOP, Integer(10), Integer(2)))))") def test_parse_assignment_statement_to_array_constant(self): lexer = DLLexer() parser = DLParser() source_string = "{ x[1] = 5 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(ArrayIndex(Variable(x), Integer(1)), Integer(5))))") def test_parse_assignment_statement_to_array_variable(self): lexer = DLLexer() parser = DLParser() source_string = "{ x[y] = z }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(ArrayIndex(Variable(x), Variable(y)), Variable(z))))") def test_parse_assignment_statement_from_array_constant(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = y[1] }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), ArrayIndex(Variable(y), Integer(1)))))") def test_parse_assignment_statement_from_array_variable(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = y[z] }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), ArrayIndex(Variable(y), Variable(z)))))") def test_parse_read_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ read(x) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Read(Variable(x))))") def test_parse_return_statement_constant(self): lexer = DLLexer() parser = DLParser() source_string = "{ return 2 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Return(Integer(2))))") def test_parse_return_statement_variable(self): lexer = DLLexer() parser = DLParser() source_string = "{ return x }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Return(Variable(x))))") def test_parse_return_statement_expression(self): lexer = DLLexer() parser = DLParser() source_string = "{ return x + 5 }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Return(BinOp(PLUSOP, Variable(x), Integer(5)))))") def test_parse_if_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ if(1 == 1) {print(1)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(If(RelOp(EQOP, Integer(1), Integer(1)), Block(Print(Integer(1))))))") def test_parse_if_else_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ if(1 == 5) {print(1)} else {print(0)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(If(RelOp(EQOP, Integer(1), Integer(5)), Block(Print(Integer(1))), Block(Print(Integer(0))))))") def test_parse_while_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ while(1 == 1) {print(1)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(While(RelOp(EQOP, Integer(1), Integer(1)), Block(Print(Integer(1))))))") def test_parse_not_equal_relop(self): lexer = DLLexer() parser = DLParser() source_string = "{ while(1 != 2) {print(2)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(While(RelOp(NEOP, Integer(1), Integer(2)), Block(Print(Integer(2))))))") def test_parse_less_than_relop(self): lexer = DLLexer() parser = DLParser() source_string = "{ while(2 < 3) {print(3)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(While(RelOp(LTOP, Integer(2), Integer(3)), Block(Print(Integer(3))))))") def test_parse_less_or_equal_relop(self): lexer = DLLexer() parser = DLParser() source_string = "{ while(3 <= 4) {print(4)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(While(RelOp(LEOP, Integer(3), Integer(4)), Block(Print(Integer(4))))))") def test_parse_greater_than_relop(self): lexer = DLLexer() parser = DLParser() source_string = "{ while(5 > 4) {print(5)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(While(RelOp(GTOP, Integer(5), Integer(4)), Block(Print(Integer(5))))))") def test_parse_greater_or_equal_relop(self): lexer = DLLexer() parser = DLParser() source_string = "{ while(6 >= 5) {print(6)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(While(RelOp(GEOP, Integer(6), Integer(5)), Block(Print(Integer(6))))))") def test_parse_block_statement(self): lexer = DLLexer() parser = DLParser() source_string = "{ {print(1)} }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Block(Print(Integer(1)))))") def test_parse_statement_sequence(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = 2; print(x) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), Integer(2)), Print(Variable(x))))") def test_parse_program_variable_declaration(self): lexer = DLLexer() parser = DLParser() source_string = """ int x; { x = 1 } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Declarations(VariableDeclarations(INT, Variable(x))), Block(Assign(Variable(x), Integer(1))))") def test_parse_program_variable_declaration_sequence(self): lexer = DLLexer() parser = DLParser() source_string = """ int x, y, z; { x = 1 } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Declarations(VariableDeclarations(INT, Variable(x), Variable(y), Variable(z))), Block(Assign(Variable(x), Integer(1))))") def test_parse_program_array_declaration(self): lexer = DLLexer() parser = DLParser() source_string = """ int x[10]; { x[5] = 1 } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Declarations(VariableDeclarations(INT, ArrayIndex(Variable(x), Integer(10)))), Block(Assign(ArrayIndex(Variable(x), Integer(5)), Integer(1))))") def test_parse_function_call_constant(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = foo(1) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), FunctionCall(foo, Arguments(Integer(1))))))") def test_parse_function_call_variable(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = bar(y) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), FunctionCall(bar, Arguments(Variable(y))))))") def test_parse_function_call_multiple_arguments(self): lexer = DLLexer() parser = DLParser() source_string = "{ x = baz(y, 1, z, 2) }" tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Block(Assign(Variable(x), FunctionCall(baz, Arguments(Variable(y), Integer(1), Variable(z), Integer(2))))))") def test_parse_function_declaration_no_arguments(self): lexer = DLLexer() parser = DLParser() source_string = """ /* Declare a function named 'foo', with no arguments. */ foo(); { return 5; } /* Call 'foo' in the main body of the program. */ { x = foo() } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertEqual(str(result), "Program(Declarations(FunctionDeclaration(foo, Block(Return(Integer(5))))), Block(Assign(Variable(x), FunctionCall(foo))))") def test_parse_function_declaration_one_argument(self): lexer = DLLexer() parser = DLParser() source_string = """ /* Declare a function named 'bar', with one argument. */ bar(x); { return x * 10; } /* Call 'bar' in the main body of the program. */ { y = bar(5) } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertIsInstance(result, Program) self.assertEqual(str(result.declarations), "Declarations(FunctionDeclaration(bar, Arguments(Variable(x)), Block(Return(BinOp(MULTIPLYOP, Variable(x), Integer(10))))))") self.assertEqual(str(result.body), "Block(Assign(Variable(y), FunctionCall(bar, Arguments(Integer(5)))))") def test_parse_function_declaration_with_variables(self): lexer = DLLexer() parser = DLParser() source_string = """ /* Declare a function named 'baz', with a variable declaration. */ baz(); int x; { return x + 2 } /* Call 'baz' in the main body of the program. */ { y = baz(3) } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertIsInstance(result, Program) self.assertEqual(str(result.body), "Block(Assign(Variable(y), FunctionCall(baz, Arguments(Integer(3)))))") decs = result.declarations self.assertIsInstance(decs, Declarations) func = decs.declarations[0] self.assertIsInstance(func, FunctionDeclaration) self.assertEqual(func.name, "baz") self.assertEqual(func.args, None) self.assertEqual(str(func.vars), "VariableDeclarations(INT, Variable(x))") self.assertEqual(str(func.body), "Block(Return(BinOp(PLUSOP, Variable(x), Integer(2))))") def test_parse_function_declaration_with_arguments_and_variables(self): lexer = DLLexer() parser = DLParser() source_string = """ /* Declare a function named 'buz', with one argument, and a variable declaration. */ buz(x); int y; { y = 2; return x * y } /* Call 'buz' in the main body of the program. */ { z = buz(3) } """ tokens = lexer.tokenize(source_string) result = parser.parse(tokens) self.assertIsInstance(result, Program) self.assertEqual(str(result.declarations), "Declarations(FunctionDeclaration(buz, Arguments(Variable(x)), VariableDeclarations(INT, Variable(y)), Block(Assign(Variable(y), Integer(2)), Return(BinOp(MULTIPLYOP, Variable(x), Variable(y))))))") self.assertEqual(str(result.body), "Block(Assign(Variable(z), FunctionCall(buz, Arguments(Integer(3)))))") def test_parse_code_example(self): lexer = DLLexer() parser = DLParser() source_file = open("tests/simple.dl",'r') source_string = source_file.read() tokens = lexer.tokenize(source_string) source_file.close() result = parser.parse(tokens) self.assertIsInstance(result, Program) decs = result.declarations self.assertIsInstance(decs, Declarations) func = decs.declarations[0] self.assertIsInstance(func, FunctionDeclaration) self.assertEqual(func.name, "factorial") self.assertEqual(str(func.args), "Arguments(Variable(n))") self.assertEqual(func.vars, None) self.assertIsInstance(func.body, Block) conditional = func.body.statements[0] self.assertIsInstance(conditional, If) self.assertEqual(str(conditional.condition), "RelOp(EQOP, Variable(n), Integer(0))") self.assertEqual(str(conditional.body_true), "Block(Return(Integer(1)))") self.assertEqual(str(conditional.body_else), "Block(Return(BinOp(MULTIPLYOP, Variable(n), FunctionCall(factorial, Arguments(BinOp(MINUSOP, Variable(n), Integer(1)))))))") main_vars = decs.declarations[1] self.assertIsInstance(main_vars, VariableDeclarations) self.assertEqual(str(main_vars), "VariableDeclarations(INT, Variable(x))") main = result.body self.assertIsInstance(main, Block) first = main.statements[0] self.assertEqual(str(first), "Assign(Variable(x), Integer(1))") second = main.statements[1] self.assertIsInstance(second, While) self.assertEqual(str(second.condition), "RelOp(LEOP, Variable(x), Integer(10))") self.assertEqual(str(second.body), "Block(Print(FunctionCall(factorial, Arguments(Variable(x)))), Assign(Variable(x), BinOp(PLUSOP, Variable(x), Integer(1))))") if __name__ == '__main__': unittest.main()
UNIFI_DEFAULT_HOST = "unifi" UNIFI_DEFAULT_PORT = 443 UNIFI_DEFAULT_USERNAME = "admin" UNIFI_DEFAULT_PASSWORD = "ubnt" UNIFI_DEFAULT_SITE = "default" MQTT_DEFAULT_HOST = "localhost" MQTT_DEFAULT_PORT = 1883 MQTT_DEFAULT_NAME = "unifi" MQTT_DEFAULT_USERNAME = "mqtt" MQTT_DEFAULT_PASSWORD = "mqtt"
"""shell pip install autokeras """ import os import numpy as np import tensorflow as tf from sklearn.datasets import load_files import autokeras as ak """ ## A Simple Example The first step is to prepare your data. Here we use the [IMDB dataset](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification) as an example. """ dataset = tf.keras.utils.get_file( fname="aclImdb.tar.gz", origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", extract=True, ) # set path to dataset IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb") classes = ["pos", "neg"] train_data = load_files( os.path.join(IMDB_DATADIR, "train"), shuffle=True, categories=classes ) test_data = load_files( os.path.join(IMDB_DATADIR, "test"), shuffle=False, categories=classes ) x_train = np.array(train_data.data) y_train = np.array(train_data.target) x_test = np.array(test_data.data) y_test = np.array(test_data.target) print(x_train.shape) # (25000,) print(y_train.shape) # (25000, 1) print(x_train[0][:50]) # this film was just brilliant casting """ The second step is to run the [TextClassifier](/text_classifier). As a quick demo, we set epochs to 2. You can also leave the epochs unspecified for an adaptive number of epochs. """ # Initialize the text classifier. clf = ak.TextClassifier( overwrite=True, max_trials=1 ) # It only tries 1 model as a quick demo. # Feed the text classifier with training data. clf.fit(x_train, y_train, epochs=2) # Predict with the best model. predicted_y = clf.predict(x_test) # Evaluate the best model with testing data. print(clf.evaluate(x_test, y_test)) """ ## Validation Data By default, AutoKeras use the last 20% of training data as validation data. As shown in the example below, you can use `validation_split` to specify the percentage. """ clf.fit( x_train, y_train, # Split the training data and use the last 15% as validation data. validation_split=0.15, ) """ You can also use your own validation set instead of splitting it from the training data with `validation_data`. """ split = 5000 x_val = x_train[split:] y_val = y_train[split:] x_train = x_train[:split] y_train = y_train[:split] clf.fit( x_train, y_train, epochs=2, # Use your own validation set. validation_data=(x_val, y_val), ) """ ## Customized Search Space For advanced users, you may customize your search space by using [AutoModel](/auto_model/#automodel-class) instead of [TextClassifier](/text_classifier). You can configure the [TextBlock](/block/#textblock-class) for some high-level configurations, e.g., `vectorizer` for the type of text vectorization method to use. You can use 'sequence', which uses [TextToInteSequence](/block/#texttointsequence-class) to convert the words to integers and use [Embedding](/block/#embedding-class) for embedding the integer sequences, or you can use 'ngram', which uses [TextToNgramVector](/block/#texttongramvector-class) to vectorize the sentences. You can also do not specify these arguments, which would leave the different choices to be tuned automatically. See the following example for detail. """ input_node = ak.TextInput() output_node = ak.TextBlock(block_type="ngram")(input_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) clf.fit(x_train, y_train, epochs=2) """ The usage of [AutoModel](/auto_model/#automodel-class) is similar to the [functional API](https://www.tensorflow.org/guide/keras/functional) of Keras. Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks. To add an edge from `input_node` to `output_node` with `output_node = ak.[some_block]([block_args])(input_node)`. You can even also use more fine grained blocks to customize the search space even further. See the following example. """ input_node = ak.TextInput() output_node = ak.TextToIntSequence()(input_node) output_node = ak.Embedding()(output_node) # Use separable Conv layers in Keras. output_node = ak.ConvBlock(separable=True)(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) clf.fit(x_train, y_train, epochs=2) """ ## Data Format The AutoKeras TextClassifier is quite flexible for the data format. For the text, the input data should be one-dimensional For the classification labels, AutoKeras accepts both plain labels, i.e. strings or integers, and one-hot encoded encoded labels, i.e. vectors of 0s and 1s. We also support using [tf.data.Dataset]( https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable) format for the training data. """ train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))).batch(32) test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))).batch(32) clf = ak.TextClassifier(overwrite=True, max_trials=2) # Feed the tensorflow Dataset to the classifier. clf.fit(train_set, epochs=2) # Predict with the best model. predicted_y = clf.predict(test_set) # Evaluate the best model with testing data. print(clf.evaluate(test_set)) """ ## Reference [TextClassifier](/text_classifier), [AutoModel](/auto_model/#automodel-class), [TextBlock](/block/#textblock-class), [TextToInteSequence](/block/#texttointsequence-class), [Embedding](/block/#embedding-class), [TextToNgramVector](/block/#texttongramvector-class), [ConvBlock](/block/#convblock-class), [TextInput](/node/#textinput-class), [ClassificationHead](/block/#classificationhead-class). """
import sys from functools import reduce from operator import add from .lib import validate, Bracket def main(): print(reduce(add, map(lambda b: b.score if type(b) == Bracket else 0, map(validate, sys.stdin)))) if __name__ == '__main__': main()
import hassapi as hass import sys import yaml # # Centralizes messaging. # # Args: # # Version 2.0: # Initial Version class Notifier_Dispatch(hass.Hass): def initialize(self): self.gh_tts_google_mode = self.args.get("gh_tts_google_mode") self.gh_switch_entity = self.args.get("gh_switch") self.gh_selected_media_player = self.args.get("gh_selected_media_player") self.alexa_switch_entity = self.args.get("alexa_switch") self.tts_language = self.args.get("tts_language") self.tts_period_of_day_volume = self.args.get("tts_period_of_day_volume") self.tts_dnd = self.args.get("dnd") self.text_notifications = self.args.get("text_notifications") self.screen_notifications = self.args.get("screen_notifications") self.speech_notifications = self.args.get("speech_notifications") self.phone_notifications = self.args.get("phone_notifications") self.html_mode = self.args.get("html_mode") self.text_notify = self.args.get("text_notify") self.phone_notify = self.args.get("phone_notify") self.priority_message = self.args.get("priority_message") self.guest_mode = self.args.get("guest_mode") self.persistent_notification_info = self.args.get("persistent_notification_info") self.location_tracker = self.args.get("location_tracker") self.personal_assistant_name = self.args.get("personal_assistant_name") self.phone_called_number = self.args.get("phone_called_number") self.sensor = self.args.get("sensor") self.set_state(self.sensor, state="on") #### FROM SECRET FILE ### config = self.get_plugin_config() config_dir = config["config_dir"] self.log(f"configuration dir: {config_dir}") secretsFile = config_dir + "/packages/secrets.yaml" with open(secretsFile, "r") as ymlfile: cfg = yaml.load(ymlfile, Loader=yaml.FullLoader) # yaml.safe_load self.gh_tts = cfg.get("tts_google", "google_translate_say") self.gh_notify = cfg.get("notify_google", "google_assistant") self.phone_sip_server = cfg.get("sip_server_name", "fritz.box:5060") self.gh_tts_cloud = cfg.get("tts_google_cloud", "google_cloud") self.reverso_tts = cfg.get("reverso_tts", "reversotts_say") ### APP MANAGER ### self.notification_manager = self.get_app("Notification_Manager") self.gh_manager = self.get_app("GH_Manager") self.alexa_manager = self.get_app("Alexa_Manager") self.phone_manager = self.get_app("Phone_Manager") ### LISTEN EVENT ### self.listen_event(self.notify_hub, "hub") ##################################################################### def check_flag(self, data): return str(data).lower() in ["1", "true", "on", "yes"] def check_location(self, data, location): return str(data).lower() == "" or str(data).lower() == location def check_notify(self, data): return False if (str(data).lower() in ["false", "off", "no"] or data == "0" or data == 0) else True def convert(self, lst): return {lst[1]: lst[3]} def createTTSdict(self, data) -> list: dizionario = "" if data == "" or (not self.check_notify(data)): flag = False elif str(data).lower() in ["1","true","on","yes"]: flag = True dizionario = {} else: if "OrderedDict([(" in str(data): dizionario = self.convert(list(data.split("'"))) if dizionario.get("mode") != None: flag = self.check_flag(dizionario["mode"]) else: flag = True else: dizionario = data if isinstance(data, dict) else eval(data) # convert to dict if dizionario.get("mode") != None: flag = self.check_flag(dizionario["mode"]) else: flag = True return [flag,dizionario] def notify_hub(self, event_name, data, kwargs): self.log("#### START NOTIFIER_DISPATCH ####") location_status = self.get_state(self.location_tracker) ### FLAG priority_flag = self.check_flag(data["priority"]) noshow_flag = self.check_flag(data["no_show"]) location_flag = self.check_location(data["location"], location_status) notify_flag = self.check_notify(data["notify"]) ### GOOGLE #### google_flag = self.createTTSdict(data["google"])[0] if len(str(data["google"])) != 0 else False google = self.createTTSdict(data["google"])[1] if len(str(data["google"])) != 0 else False ### ALEXA #### alexa_flag = self.createTTSdict(data["alexa"])[0] if len(str(data["alexa"])) != 0 else False alexa = self.createTTSdict(data["alexa"])[1] if len(str(data["alexa"])) != 0 else False ### FROM INPUT BOOLEAN ### dnd_status = self.get_state(self.tts_dnd) guest_status = self.get_state(self.guest_mode) priority_status = (self.get_state(self.priority_message) == "on") or priority_flag ### FROM INPUT SELECT ### notify_name = self.get_state(self.text_notify) phone_notify_name = self.get_state(self.phone_notify) ### NOTIFICATION ### if priority_status: useNotification = True elif ( self.get_state(self.text_notifications) == "on" and data["message"] != "" and notify_flag and location_flag ): useNotification = True else: useNotification = False ### PERSISTENT ### if priority_status: usePersistentNotification = True elif self.get_state(self.screen_notifications) == "on" and data["message"] != "" and not noshow_flag: usePersistentNotification = True else: usePersistentNotification = False ### TTS ### if priority_status: useTTS = True elif ( self.get_state(self.speech_notifications) == "on" and dnd_status == "off" and (location_status == "home" or guest_status == "on") ): useTTS = True else: useTTS = False ### PHONE ### if priority_status: usePhone = True elif self.get_state(self.phone_notifications) == "on" and data["message"] != "" and dnd_status == "off": usePhone = True else: usePhone = False ### TTS SWITCH ### gh_switch = self.get_state(self.gh_switch_entity) alexa_switch = self.get_state(self.alexa_switch_entity) ### SERVIZIO TTS/NOTIFY DI GOOGLE ### if self.get_state(self.gh_tts_google_mode) != None: if self.get_state(self.gh_tts_google_mode).lower() == "reverso": gh_notifica = self.reverso_tts elif self.get_state(self.gh_tts_google_mode).lower() == "google cloud": gh_notifica = self.gh_tts_cloud elif self.get_state(self.gh_tts_google_mode).lower() == "google say": gh_notifica = self.gh_tts else: gh_notifica = self.gh_notify ### FROM SCRIPT_NOTIFY ### if data["called_number"] == "": data.update({"called_number": self.get_state(self.phone_called_number)}) if data["html"] == "": data.update({"html": self.get_state(self.html_mode)}) ########################### if usePersistentNotification: try: self.notification_manager.send_persistent(data, self.persistent_notification_info) except Exception as ex: self.log("An error occurred in persistent notification: {}".format(ex), level="ERROR") self.set_state(self.sensor, state="Error in Persistent Notification: {}".format(ex)) self.log(sys.exc_info()) if useNotification: try: self.notification_manager.send_notify(data, notify_name, self.get_state(self.personal_assistant_name)) except Exception as ex: self.log("An error occurred in text-telegram notification: {}".format(ex), level="ERROR") self.set_state(self.sensor, state="Error in Text Notification: {}".format(ex)) self.log(sys.exc_info()) if useTTS: if gh_switch == "on" and google_flag: if (data["google"]) != "": if "media_player" not in google: google["media_player"] = self.get_state(self.gh_selected_media_player) if "volume" not in google: google["volume"] = float(self.get_state(self.tts_period_of_day_volume)) / 100 if "media_content_id" not in google: google["media_content_id"] = "" if "media_content_type" not in google: google["media_content_type"] = "" if "message_tts" not in google: google["message_tts"] = data["message"] if "language" not in google: google["language"] = self.get_state(self.tts_language).lower() self.gh_manager.speak(google, self.get_state(self.gh_tts_google_mode), gh_notifica) if alexa_switch == "on" and alexa_flag: if (data["alexa"]) != "": if "message_tts" not in alexa: alexa["message_tts"] = data["message"] if "title" not in alexa: alexa["title"] = data["title"] if "volume" not in alexa: alexa["volume"] = float(self.get_state(self.tts_period_of_day_volume)) / 100 if "language" not in alexa: alexa["language"] = self.get_state(self.tts_language) self.alexa_manager.speak(alexa) if usePhone: try: language = self.get_state(self.tts_language) self.phone_manager.send_voice_call(data, phone_notify_name, self.phone_sip_server, language) except Exception as ex: self.log("An error occurred in phone notification: {}".format(ex), level="ERROR") self.set_state(self.sensor, state="Error in Phone Notification: {}".format(ex)) self.log(sys.exc_info()) ### ripristino del priority a OFF if self.get_state(self.priority_message) == "on": self.set_state(self.priority_message, state="off")
from typing import List class Solution: def findDiagonalOrder(self, nums: List[List[int]]) -> List[int]: m = [] for i, row in enumerate(nums): for j, v in enumerate(row): if i + j >= len(m): m.append([]) m[i+j].append(v) return [v for d in m for v in reversed(d)] nums = [[1,2,3],[4,5,6],[7,8,9]] res = Solution().findDiagonalOrder(nums) print(res)
# -*- coding: utf8 -*- # Copyright (c) 2019 Niklas Rosenstein # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Provides a processor that implements various filter capabilities. """ from nr.databind.core import Field, Struct from nr.interface import implements from pydoc_markdown.interfaces import Processor from pydoc_markdown.reflection import ModuleGraph @implements(Processor) class FilterProcessor(Struct): """ The `filter` processor removes module and class members based on certain criteria. # Example ```py - type: filter expression: not name.startswith('_') and default() ``` """ expression = Field(str, default=None) documented_only = Field(bool, default=True) exclude_private = Field(bool, default=True) exclude_special = Field(bool, default=True) include_root_objects = Field(bool, default=True) SPECIAL_MEMBERS = ('__path__', '__annotations__', '__name__', '__all__') def process(self, graph, _resolver): graph.visit(self._process_member) def _process_member(self, node): def _check(node): if self.documented_only and not node.docstring: return False if self.exclude_private and node.name.startswith('_') and not node.name.endswith('_'): return False if self.exclude_special and node.name in self.SPECIAL_MEMBERS: return False return True if self.expression: scope = {'name': node.name, 'node': node, 'default': _check} if not eval(self.expression, scope): # pylint: disable=eval-used node.visible = False if self.include_root_objects and ( not node.parent or isinstance(node.parent, ModuleGraph)): return if not _check(node): node.visible = False
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """get args""" import ast import argparse def get_args(phase): """Define the common options that are used in training.""" parser = argparse.ArgumentParser(description='WGAN') parser.add_argument('--device_target', default='Ascend', help='enables npu') parser.add_argument('--device_id', type=int, default=0) if phase == 'train': parser.add_argument('--dataset', default='lsun', help='cifar10 | lsun') parser.add_argument('--dataroot', default=None, help='path to dataset') parser.add_argument('--is_modelarts', type=ast.literal_eval, default=False, help='train in Modelarts or not') parser.add_argument('--data_url', default=None, help='Location of data.') parser.add_argument('--train_url', default=None, help='Location of training outputs.') parser.add_argument('--workers', type=int, help='number of data loading workers', default=8) parser.add_argument('--batchSize', type=int, default=64, help='input batch size') parser.add_argument('--imageSize', type=int, default=64, help='the height/width of the input image to network') parser.add_argument('--nc', type=int, default=3, help='input image channels') parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') parser.add_argument('--ngf', type=int, default=64) parser.add_argument('--ndf', type=int, default=64) parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') parser.add_argument('--lrD', type=float, default=0.00005, help='learning rate for Critic, default=0.00005') parser.add_argument('--lrG', type=float, default=0.00005, help='learning rate for Generator, default=0.00005') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') parser.add_argument('--netG', default='', help="path to netG (to continue training)") parser.add_argument('--netD', default='', help="path to netD (to continue training)") parser.add_argument('--clamp_lower', type=float, default=-0.01) parser.add_argument('--clamp_upper', type=float, default=0.01) parser.add_argument('--Diters', type=int, default=5, help='number of D iters per each G iter') parser.add_argument('--noBN', type=ast.literal_eval, default=False, help='use batchnorm or not (for DCGAN)') parser.add_argument('--n_extra_layers', type=int, default=0, help='Number of extra layers on gen and disc') parser.add_argument('--experiment', default=None, help='Where to store samples and models') parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)') elif phase == 'export': parser.add_argument('--config', required=True, type=str, help='path to generator config .json file') parser.add_argument('--ckpt_file', type=str, required=True, help="Checkpoint file path.") parser.add_argument('--file_name', type=str, default="WGAN", help="output file name prefix.") parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], \ default='AIR', help='file format') parser.add_argument('--nimages', required=True, type=int, help="number of images to generate", default=1) elif phase == 'eval': parser.add_argument('--config', required=True, type=str, help='path to generator config .json file') parser.add_argument('--ckpt_file', required=True, type=str, help='path to generator weights .ckpt file') parser.add_argument('--output_dir', required=True, type=str, help="path to output directory") parser.add_argument('--nimages', required=True, type=int, help="number of images to generate", default=1) parser.add_argument('--input_seed', type=int, help="seed used for generation", default=None) elif phase == 'eval_onnx': parser.add_argument('--config', required=True, type=str, help='path to generator config .json file') parser.add_argument('--file_name', required=True, type=str, help='path to ONNX model') parser.add_argument('--output_dir', required=True, type=str, help="path to output directory") parser.add_argument('--nimages', type=int, help="number of images to generate", default=1) parser.add_argument('--input_seed', type=int, help="seed used for generation", default=None) elif phase == 'pre310': parser.add_argument('--config', required=True, type=str, help='path to generator config .json file') parser.add_argument('--pre_result_path', type=str, help="preprocess dir", default='./preprocess_Result/') parser.add_argument('--nimages', required=True, type=int, help="number of images to generate", default=1) elif phase == 'post310': parser.add_argument('--config', required=True, type=str, help='path to generator config .json file') parser.add_argument('--output_dir', type=str, help="path to output directory", default='./infer_output') parser.add_argument('--post_result_path', type=str, help="postprocess dir", default='./result_Files') parser.add_argument('--nimages', required=True, type=int, help="number of images to generate", default=1) args_opt = parser.parse_args() return args_opt
from yahoo.YahooSocket import to_ydict, from_ydict, argsep import unittest, string d = {'1': 'penultimatefire','2': 'some ascii'} d2= { 1: 'penultimatefire', 2: 'some ascii'} bytes = '1\xc0\x80penultimatefire\xc0\x802\xc0\x80some ascii\xc0\x80' class YahooTestingSuite(unittest.TestCase): def testYDictConstruction(self): str = string.Template("1${a}penultimatefire${a}2${a}some ascii${a}") self.assertEqual(to_ydict(d), to_ydict(d2)) self.assertEqual(to_ydict(d), str.substitute(a=argsep)) self.assertEqual(to_ydict(d), bytes) def testYDictFromNetwork(self): self.assertEqual(from_ydict(bytes), [d]) self.assertEqual(from_ydict(""), {}) if __name__ == "__main__": unittest.main()
from __future__ import division from models import * from utils.utils import * from utils.datasets import * from utils.parse_config import * import os import sys import time import datetime import argparse import torch from torch.utils.data import DataLoader from torch.utils.data import Subset from torchvision import datasets from torchvision import transforms from torch.autograd import Variable import torch.optim as optim parser = argparse.ArgumentParser() parser.add_argument("--epochs", type=int, default=2, help="number of epochs") # parser.add_argument("--image_folder", type=str, default="data/samples", help="path to dataset") parser.add_argument("--batch_size", type=int, default=2, help="size of each image batch") parser.add_argument("--model_config_path", type=str, default="./config/yolov3-custom-classes.cfg", help="path to model config file") # parser.add_argument("--data_config_path", type=str, default="config/coco.data", help="path to data config file") parser.add_argument("--train_path", type=str, default="./data/bdd100k_images/bdd100k/images/100k/train", help="path to trainset") parser.add_argument("--label_path", type=str, default="./data/bdd100k_labels_release/bdd100k/labels/simple_train_labels.json", help="abs path to train labels") parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file") parser.add_argument("--class_path", type=str, default="./data/class.names", help="path to class label file") parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold") parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression") parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation") parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension") parser.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model weights") parser.add_argument( "--checkpoint_dir", type=str, default="checkpoints", help="directory where model checkpoints are saved" ) parser.add_argument("--use_cuda", type=bool, default=True, help="whether to use cuda if available") opt = parser.parse_args() print(opt) cuda = torch.cuda.is_available() and opt.use_cuda os.makedirs("output", exist_ok=True) os.makedirs("checkpoints", exist_ok=True) classes = load_classes(opt.class_path) # Get data and labels path train_path = opt.train_path label_path = opt.label_path classname_path = opt.class_path # Get hyper parameters hyperparams = parse_model_config(opt.model_config_path)[0] learning_rate = float(hyperparams["learning_rate"]) momentum = float(hyperparams["momentum"]) decay = float(hyperparams["decay"]) burn_in = int(hyperparams["burn_in"]) # Initiate model model = Darknet(opt.model_config_path, img_size=opt.img_size) model.load_weights(opt.weights_path) # model.apply(weights_init_normal) if cuda: model = model.cuda() model.train() # Get dataloader # Using first 6500 imgs in train folder as trainset dataloader = torch.utils.data.DataLoader( Subset(ListDataset(train_path, label_path, classname_path, img_size=opt.img_size), range(6)), batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu ) Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters())) optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate, weight_decay=decay, momentum=momentum) scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda x: ((x+1)/burn_in)**2, last_epoch=-1) for epoch in range(opt.epochs): start_time = time.time() for batch_i, (_, imgs, targets) in enumerate(dataloader): batch_starttime = time.time() #update lr in burn_in if epoch==0 and batch_i<burn_in: scheduler.step() imgs = Variable(imgs.type(Tensor)) targets = Variable(targets.type(Tensor), requires_grad=False) optimizer.zero_grad() loss = model(imgs, targets) loss.backward() optimizer.step() model.seen += imgs.size(0) batch_endtime = time.time() batchtime = batch_endtime-batch_starttime print( "[Epoch %d/%d, Batch %d/%d] [Losses: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f, recall: %.5f, precision: %.5f, Time: %.2fs, lr: %.10f]" % ( epoch, opt.epochs, batch_i, len(dataloader), model.losses["x"], model.losses["y"], model.losses["w"], model.losses["h"], model.losses["conf"], model.losses["cls"], loss.item(), model.losses["recall"], model.losses["precision"], batchtime, optimizer.param_groups[0]['lr'] ) ) if (epoch+1) % opt.checkpoint_interval == 0: model.save_weights("%s/%d.weights" % (opt.checkpoint_dir, epoch+1)) end_time = time.time() epoch_time = end_time-start_time print(f'Epoch {epoch+1} done! Time used: {epoch_time:.2f}s') print(f'Time on each img: {epoch_time/6:.2f}s')
import abc import fnmatch import os from typing import List, Optional, Union import uqbar.objects import supriya def _search(pattern: str, root_path: str): search_path, pattern = os.path.split(pattern) search_path = os.path.expanduser(search_path) if not search_path: search_path = os.path.join(root_path, "assets") elif not os.path.isabs(search_path): search_path = os.path.join(root_path, "assets", search_path) result: List[str] = [] result = os.listdir(search_path) result = fnmatch.filter(result, pattern) result = [os.path.join(search_path, _) for _ in result] if len(result) == 1: return result[0] return result class _AssetsMeta(abc.ABCMeta): root_path: str = supriya.__path__[0] # type: ignore def __getitem__(self, pattern) -> Union[str, List[str]]: return _search(pattern, self.root_path) class Assets(metaclass=_AssetsMeta): def __init__(self, root_path: str) -> None: self.root_path = root_path def __getitem__(self, pattern) -> Union[str, List[str]]: return _search(pattern, self.root_path) class SupriyaObject(metaclass=abc.ABCMeta): """ Abstract base class from which many custom classes inherit. """ ### CLASS VARIABLES ### __documentation_section__: Optional[str] = None __slots__ = () ### SPECIAL METHODS ### def __repr__(self): return uqbar.objects.get_repr(self, multiline=True) class SupriyaValueObject(SupriyaObject): ### CLASS VARIABLES ### __slots__ = () ### SPECIAL METHODS ### def __copy__(self, *args): return uqbar.objects.new(self) def __eq__(self, expr): self_values = type(self), uqbar.objects.get_vars(self) try: expr_values = type(expr), uqbar.objects.get_vars(expr) except AttributeError: expr_values = type(expr), expr return self_values == expr_values def __hash__(self): args, var_args, kwargs = uqbar.objects.get_vars(self) hash_values = [type(self)] hash_values.append(tuple(args.items())) hash_values.append(tuple(var_args)) hash_values.append(tuple(sorted(kwargs.items()))) return hash(tuple(hash_values))
import torch # torch.multiprocessing.set_sharing_strategy('file_system') import pandas as pd import numpy as np import sys import os import pickle import argparse # local imports import train_model import experiment_routines as experiments from subsetting_exp import subset_experiment # can add different from dataset import get_data_loaders if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('group_key', type=str, help='which key to subset on') parser.add_argument('--num_seeds', type=int, default=5, help='number of seeds to run') parser.add_argument('--seed_beginning', type=int, default=0, help='seed to start with') parser.add_argument('--dataset_name', type=str, default='isic', help='seed to start with') args = parser.parse_args() group_key = args.group_key num_seeds, seed_beginning = args.num_seeds, args.seed_beginning dataset_name = args.dataset_name fracs_group_a = [0.02, 0.05, 0.2, 0.5, 0.8, 0.95, 0.98] learning_rates = [0.01, 0.001, 0.0001] weight_decays = [.01, .001, 0.0001] # momentums = [0, 0.9] momentums = [0.9] # number_of_epochs = [10,20] number_of_epochs = [20] if dataset_name == 'isic': label_colname_ = 'benign_malignant_01' original_data_csv_ = 'isic/fresh/int_data/df_no_sonic.csv' image_fp_ = 'isic/fresh/Data/Images' all_group_colnames_ = ['study_name_id', 'age_approx_by_decade_id', 'age_over_45_id', 'age_over_50_id', 'age_over_60_id', 'anatom_site_general_id', 'sex_id'] results_descriptor_ ='isic_hpSel' elif dataset_name == 'cifar': if (group_key == "air"): label_colname_ = "animal" else: label_colname_ = "air" original_data_csv_ = 'cifar4/df_cifar4_labels.csv' image_fp_ = 'cifar4/images' all_group_colnames_ = ["air", "animal"] results_descriptor_ ='cifar4_subsetting_hpSel_debug' else: print('TODO: need to input the data and image files') for lr in learning_rates: for wd in weight_decays: for momentum in momentums: for num_epochs in number_of_epochs: sgd_params = {'lr': lr, 'weight_decay': wd, 'momentum': momentum} subset_experiment(num_seeds, seed_beginning, group_key, label_colname = label_colname_, original_data_csv = original_data_csv_, image_fp = image_fp_, all_group_colnames = all_group_colnames_, fracs_group_a = fracs_group_a, eval_key='val', results_descriptor = results_descriptor_, num_epochs=num_epochs, sgd_params = sgd_params, )
#!/usr/bin/env python # -*- coding: UTF-8 -*- import pymongo from pymongo import MongoClient from nltk.stem.snowball import SnowballStemmer from collections import defaultdict, OrderedDict import math import codecs import SR_2_Twitter_users as SR2 STEMMER = SnowballStemmer("english", ignore_stopwords=True ) f_in = "graph_data_100K.tab" f_out = "graph_data_with_SR_100K.tab" def read_in_graph_data(f_in): usr_data = defaultdict(int) with codecs.open(f_in,'r', encoding='utf8') as input_file: for line in input_file: line = line.split() usr1 = line[0] usr2 = line[1] weight = line[2] usr_data[(usr1, usr2)] = weight return usr_data def calculate_edges_SR(): usr_data = read_in_graph_data(f_in) usr_data_with_SR = defaultdict(int) for e in usr_data.iterkeys(): usr1 = e[0] usr2 = e[1] weight = usr_data[(usr1, usr2)] usr_data_with_SR[(usr1, usr2)] = (weight, SR2.SR_2_users(usr1, usr2)) print usr1, usr2, weight, usr_data_with_SR[(usr1, usr2)] return usr_data_with_SR def save_weighted_edges_SR(f_out): usr_data_with_SR = calculate_edges_SR() with codecs.open(f_out,'w', encoding='utf8') as output_file: for key in usr_data_with_SR: usr1 = key[0] usr2 = key[1] output_file.write(str(usr1) + '\t' + str(usr2) + '\t' + str(usr_data_with_SR[key][0]) + '\t' + str(usr_data_with_SR[key][1]) + '\n') save_weighted_edges_SR(f_out)
from django.shortcuts import render from django.views.generic.detail import DetailView from .models import Page class PageView(DetailView): model = Page def index(request): return render(request, 'pages/index.html', dict(pages=Page.objects.all()))
import statistics import time import random import multiprocessing import pytest import sys sys.path.append('..') import doctable def example_func(x, y=2): return x**y def example_sleep_func(x): time.sleep(x/100000) def test_workerpool(n=100): pool = doctable.WorkerPool(1, verbose=False) assert(not pool.any_alive()) pool.start() assert(pool.any_alive()) print(pool) print(f'average efficiency: {pool.av_efficiency()}') pool.join() assert(not pool.any_alive()) with pytest.raises(doctable.NoWorkersAvailable): pool.av_efficiency() with doctable.WorkerPool(3, verbose=False) as pool: assert(pool.any_alive()) print(f'av efficiency: {pool.av_efficiency()}') # test most basic map function elements = list(range(100)) assert(pool.map(example_func, elements) == [example_func(e) for e in elements]) print(f'av efficiency: {pool.av_efficiency()}') elements = list(range(100)) with doctable.Timer(): with doctable.WorkerPool(10, verbose=False) as pool: pool.map(example_sleep_func, elements) print(f'av efficiency: {pool.av_efficiency()}') if __name__ == '__main__': test_workerpool()
# test imports import sys #import sys as system, os, re #import re as regex #from os import path #from os import pipe as bar print sys #print system #print regex #print path #print bar
from datetime import datetime import pytest from sqlalchemy.exc import IntegrityError from finance.exceptions import AssetNotFoundException, AssetValueUnavailableException from finance.models import ( Account, Asset, AssetValue, Granularity, Portfolio, Record, RecordType, Transaction, TransactionState, balance_adjustment, deposit, get_asset_by_fund_code, ) from finance.utils import parse_date, parse_datetime def test_create_model(): Account.create(institution="Chase", number="1234") # IntegrityError is raised due to the unique constraint with pytest.raises(IntegrityError): Account.create(institution="Chase", number="1234") assert not Account.create(institution="Chase", number="1234", ignore_if_exists=True) def test_stock_asset(stock_asset_ncsoft): assert stock_asset_ncsoft.bps assert stock_asset_ncsoft.eps def test_get_asset_by_fund_code(asset_sp500): asset = get_asset_by_fund_code("KR5223941018") assert asset.name == "KB Star S&P500" def test_get_asset_by_fund_code_non_existing(asset_sp500): with pytest.raises(AssetNotFoundException): get_asset_by_fund_code("non-exisiting") def test_get_asset_by_symbol(stock_asset_ncsoft): asset = Asset.get_by_symbol("036570.KS") assert asset.description == "NCsoft Corporation" def test_get_asset_by_symbol_non_existing(asset_sp500): with pytest.raises(AssetNotFoundException): Asset.get_by_symbol("non-exisiting") def test_get_asset_by_isin(stock_asset_nvda): asset = Asset.get_by_isin("US67066G1040") assert asset.code == "NVDA" def test_get_asset_by_isin_non_existing(stock_asset_nvda): with pytest.raises(AssetNotFoundException): Asset.get_by_isin("non-exisiting") def test_balance(account_checking, asset_krw, asset_usd): assert account_checking.balance() == {} deposit(account_checking, asset_krw, 1000, parse_date("2016-05-01")) assert account_checking.balance(parse_date("2016-05-19")) == {asset_krw: 1000} deposit(account_checking, asset_krw, -500, parse_date("2016-05-02")) assert account_checking.balance(parse_date("2016-05-19")) == {asset_krw: 500} deposit(account_checking, asset_usd, 25, parse_date("2016-05-03")) assert account_checking.balance(parse_date("2016-05-19")) == { asset_krw: 500, asset_usd: 25, } balance_adjustment(account_checking, asset_usd, 40, parse_date("2016-05-04")) assert account_checking.balance(parse_date("2016-05-19")) == { asset_krw: 500, asset_usd: 40, } # FIXME: I don't understand why `autouse` does not work for `session` fixture def test_portfolio(session, account_hf, asset_hf1, account_checking, asset_krw): portfolio = Portfolio() portfolio.base_asset = asset_krw portfolio.add_accounts(account_hf, account_checking) deposit(account_checking, asset_krw, 500000, parse_date("2015-12-04")) with Transaction.create() as t: deposit(account_checking, asset_krw, -500000, parse_date("2015-12-04"), t) deposit(account_hf, asset_hf1, 1, parse_date("2015-12-04"), t) # The net asset value shall not be available at this point with pytest.raises(AssetValueUnavailableException): net_worth = portfolio.net_worth( evaluated_at=parse_date("2015-12-04"), granularity=Granularity.day ) # Initial asset value AssetValue.create( evaluated_at=parse_date("2015-12-04"), asset=asset_hf1, base_asset=asset_krw, granularity=Granularity.day, close=500000, ) net_worth = portfolio.net_worth( evaluated_at=parse_date("2015-12-04"), granularity=Granularity.day ) assert 500000 == net_worth # 1st payment interest, tax, returned = 3923, 740, 30930 deposit(account_checking, asset_krw, returned, parse_date("2016-01-08")) # Remaining principle value after the 1st payment AssetValue.create( evaluated_at=parse_date("2016-01-08"), asset=asset_hf1, base_asset=asset_krw, granularity=Granularity.day, close=472253, ) net_worth = portfolio.net_worth( evaluated_at=parse_date("2016-01-08"), granularity=Granularity.day ) assert 500000 + (interest - tax) == net_worth # 2nd payment deposit(account_checking, asset_krw, 25016, parse_date("2016-02-05")) # Remaining principle value after the 2nd payment AssetValue.create( evaluated_at=parse_date("2016-02-05"), asset=asset_hf1, base_asset=asset_krw, granularity=Granularity.day, close=450195, ) session.delete(portfolio) session.commit() def test_portfolio_balance( session, account_checking, account_savings, account_sp500, asset_krw, asset_sp500 ): """Ensures a portfolio, which is essentially a collection of accounts, calculates its balance correctly. """ portfolio = Portfolio() portfolio.base_asset = asset_krw portfolio.add_accounts(account_checking, account_savings, account_sp500) assert portfolio.balance(parse_date("2016-05-20")) == {} deposit(account_checking, asset_krw, 1500, parse_date("2016-05-01")) deposit(account_savings, asset_krw, 3000, parse_date("2016-05-01")) deposit(account_sp500, asset_sp500, 120, parse_date("2016-05-01")) assert portfolio.balance(parse_date("2016-05-20")) == { asset_krw: 4500, asset_sp500: 120, } deposit(account_savings, asset_krw, 4000, parse_date("2016-05-02")) deposit(account_savings, asset_krw, 5000, parse_date("2016-05-03")) assert portfolio.balance(parse_date("2016-05-20")) == { asset_krw: 13500, asset_sp500: 120, } balance_adjustment(account_savings, asset_krw, 10000, parse_date("2016-05-04")) assert portfolio.balance(parse_date("2016-05-20")) == { asset_krw: 11500, asset_sp500: 120, } session.delete(portfolio) session.commit() def test_transaction(): with Transaction.create() as t: assert t.state == TransactionState.initiated assert t.state == TransactionState.closed t = Transaction.create() assert t.state == TransactionState.initiated t.close(closed_at=datetime.utcnow()) assert t.state == TransactionState.closed def test_records(account_checking, asset_krw): with Transaction.create() as t: record = deposit(account_checking, asset_krw, 1000, parse_date("2016-03-14"), t) # Make sure the record type has been set implictly assert RecordType.deposit == record.type with Transaction.create() as t: record = deposit( account_checking, asset_krw, -2000, parse_date("2016-03-14"), t ) # Make sure the record type has been set implictly assert RecordType.withdraw == record.type with Transaction.create() as t: record = balance_adjustment( account_checking, asset_krw, 3000, parse_date("2016-03-14"), t ) # Make sure the record type has been set explicitly assert RecordType.balance_adjustment == record.type def test_record_created_at(account_checking, asset_krw): record = deposit(account_checking, asset_krw, 1000) # `created_at` must be set as the time at which the record created assert record.created_at def test_net_worth_without_asset_value( session, request, account_sp500, asset_krw, asset_sp500 ): asset_values = AssetValue.query.filter_by(asset=asset_sp500) for asset_value in asset_values: session.delete(asset_value) session.commit() record = deposit(account_sp500, asset_sp500, 1000, parse_date("2016-05-27")) with pytest.raises(AssetValueUnavailableException): account_sp500.net_worth(parse_date("2016-05-28"), base_asset=asset_krw) def teardown(): session.delete(record) session.commit() request.addfinalizer(teardown) def test_account_net_worth_1(account_checking, asset_krw): assert 0 == account_checking.net_worth( evaluated_at=parse_date("2016-01-01"), base_asset=asset_krw ) assert 0 == account_checking.net_worth( evaluated_at=parse_date("2016-01-02"), base_asset=asset_krw ) assert 0 == account_checking.net_worth( evaluated_at=parse_date("2016-01-03"), base_asset=asset_krw ) assert 0 == account_checking.net_worth( evaluated_at=parse_date("2016-01-04"), base_asset=asset_krw ) with Transaction.create() as t: deposit(account_checking, asset_krw, 1000, parse_date("2016-01-01"), t) assert 1000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-01"), base_asset=asset_krw ) assert 1000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-02"), base_asset=asset_krw ) assert 1000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-03"), base_asset=asset_krw ) assert 1000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-04"), base_asset=asset_krw ) with Transaction.create() as t: deposit(account_checking, asset_krw, 2000, parse_date("2016-01-02"), t) assert 1000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-01"), base_asset=asset_krw ) assert 3000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-02"), base_asset=asset_krw ) assert 3000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-03"), base_asset=asset_krw ) assert 3000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-04"), base_asset=asset_krw ) with Transaction.create() as t: deposit(account_checking, asset_krw, -1500, parse_date("2016-01-03"), t) assert 1000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-01"), base_asset=asset_krw ) assert 3000 == account_checking.net_worth( evaluated_at=parse_date("2016-01-02"), base_asset=asset_krw ) assert 1500 == account_checking.net_worth( evaluated_at=parse_date("2016-01-03"), base_asset=asset_krw ) assert 1500 == account_checking.net_worth( evaluated_at=parse_date("2016-01-04"), base_asset=asset_krw ) def test_account_net_worth_2(account_checking, account_sp500, asset_krw, asset_sp500): AssetValue.create( evaluated_at=parse_date("2016-02-25"), asset=asset_sp500, base_asset=asset_krw, granularity=Granularity.day, close=921.77, ) AssetValue.create( evaluated_at=parse_date("2016-02-24"), asset=asset_sp500, base_asset=asset_krw, granularity=Granularity.day, close=932.00, ) AssetValue.create( evaluated_at=parse_date("2016-02-23"), asset=asset_sp500, base_asset=asset_krw, granularity=Granularity.day, close=921.06, ) AssetValue.create( evaluated_at=parse_date("2016-02-22"), asset=asset_sp500, base_asset=asset_krw, granularity=Granularity.day, close=921.76, ) with Transaction.create() as t: deposit(account_sp500, asset_sp500, 1000, parse_date("2016-02-25"), t) deposit( account_checking, asset_krw, -1000 * 921.77, parse_date("2016-02-25"), t ) assert 921770 == account_sp500.net_worth( evaluated_at=parse_date("2016-02-25"), base_asset=asset_krw ) assert 921770 == account_sp500.net_worth( evaluated_at=parse_date("2016-03-01"), approximation=True, base_asset=asset_krw ) def test_account_net_worth_3(account_checking, asset_usd): """Ensures Account.net_worth() works with implicit `created_at`, which is the current datetime. """ deposit(account_checking, asset_usd, 1000) net_worth = account_checking.net_worth(base_asset=asset_usd) assert net_worth == 1000 def test_account_net_worth_4(account_checking, asset_usd): """Ensures Account.net_worth() works with explicit `created_at`.""" deposit(account_checking, asset_usd, 1000, parse_datetime("2018-08-30 23:00:00")) net_worth = account_checking.net_worth( base_asset=asset_usd, evaluated_at=parse_date("2018-08-30") ) assert net_worth == 1000 def test_granularity_enum(): assert Granularity.sec assert Granularity.min assert Granularity.five_min assert Granularity.hour assert Granularity.day assert Granularity.week assert Granularity.month assert Granularity.year with pytest.raises(AttributeError): Granularity.nano_sec def test_valid_granularity(): values = ( Granularity.sec, Granularity.min, Granularity.five_min, Granularity.hour, Granularity.day, Granularity.week, Granularity.month, Granularity.year, ) for value in values: assert Granularity.is_valid(value) def test_invalid_granularity(): assert not Granularity.is_valid(None) assert not Granularity.is_valid("invalid") def test_transaction_state_enum(): assert TransactionState.initiated assert TransactionState.closed assert TransactionState.pending assert TransactionState.invalid with pytest.raises(AttributeError): TransactionState.error def test_record_type_enum(): assert RecordType.deposit assert RecordType.withdraw assert RecordType.balance_adjustment with pytest.raises(AttributeError): RecordType.steal
import pandas as pd import dgl import os import torch import numpy as np import scipy.sparse as sp import time from functools import partial from .. import randomwalk import stanfordnlp import re import tqdm import string class MovieLens(object): def __init__(self, directory): ''' directory: path to movielens directory which should have the three files: users.dat movies.dat ratings.dat ''' self.directory = directory users = [] movies = [] ratings = [] # read users with open(os.path.join(directory, 'users.dat')) as f: for l in f: id_, gender, age, occupation, zip_ = l.strip().split('::') users.append({ 'id': int(id_), 'gender': gender, 'age': age, 'occupation': occupation, 'zip': zip_, }) self.users = pd.DataFrame(users).set_index('id').astype('category') # read movies with open(os.path.join(directory, 'movies.dat'), encoding='latin1') as f: for l in f: id_, title, genres = l.strip().split('::') genres_set = set(genres.split('|')) # extract year assert re.match(r'.*\([0-9]{4}\)$', title) year = title[-5:-1] title = title[:-6].strip() data = {'id': int(id_), 'title': title, 'year': year} for g in genres_set: data[g] = True movies.append(data) self.movies = ( pd.DataFrame(movies) .set_index('id') .fillna(False) .astype({'year': 'category'})) self.genres = self.movies.columns[self.movies.dtypes == bool] # read ratings with open(os.path.join(directory, 'ratings.dat')) as f: for l in f: user_id, movie_id, rating, timestamp = [int(_) for _ in l.split('::')] ratings.append({ 'user_id': user_id, 'movie_id': movie_id, 'rating': rating, 'timestamp': timestamp, }) ratings = pd.DataFrame(ratings) movie_count = ratings['movie_id'].value_counts() movie_count.name = 'movie_count' ratings = ratings.join(movie_count, on='movie_id') self.ratings = ratings # drop users and movies which do not exist in ratings self.users = self.users[self.users.index.isin(self.ratings['user_id'])] self.movies = self.movies[self.movies.index.isin(self.ratings['movie_id'])] self.data_split() self.build_graph() self.find_neighbors(0.2, 2000, 1000) def split_user(self, df, filter_counts=False): df_new = df.copy() df_new['prob'] = 0 if filter_counts: df_new_sub = (df_new['movie_count'] >= 10).nonzero()[0] else: df_new_sub = df_new['train'].nonzero()[0] prob = np.linspace(0, 1, df_new_sub.shape[0], endpoint=False) np.random.shuffle(prob) df_new['prob'].iloc[df_new_sub] = prob return df_new def data_split(self): self.ratings = self.ratings.groupby('user_id', group_keys=False).apply( partial(self.split_user, filter_counts=True)) self.ratings['train'] = self.ratings['prob'] <= 0.8 self.ratings['valid'] = (self.ratings['prob'] > 0.8) & (self.ratings['prob'] <= 0.9) self.ratings['test'] = self.ratings['prob'] > 0.9 self.ratings.drop(['prob'], axis=1, inplace=True) def build_graph(self): user_ids = list(self.users.index) movie_ids = list(self.movies.index) user_ids_invmap = {id_: i for i, id_ in enumerate(user_ids)} movie_ids_invmap = {id_: i for i, id_ in enumerate(movie_ids)} self.user_ids = user_ids self.movie_ids = movie_ids self.user_ids_invmap = user_ids_invmap self.movie_ids_invmap = movie_ids_invmap g = dgl.DGLGraph() g.add_nodes(len(user_ids) + len(movie_ids)) # user features for user_column in self.users.columns: udata = torch.zeros(g.number_of_nodes(), dtype=torch.int64) # 0 for padding udata[:len(user_ids)] = \ torch.LongTensor(self.users[user_column].cat.codes.values.astype('int64') + 1) g.ndata[user_column] = udata # movie genre movie_genres = torch.from_numpy(self.movies[self.genres].values.astype('float32')) g.ndata['genre'] = torch.zeros(g.number_of_nodes(), len(self.genres)) g.ndata['genre'][len(user_ids):len(user_ids) + len(movie_ids)] = movie_genres # movie year g.ndata['year'] = torch.zeros(g.number_of_nodes(), dtype=torch.int64) # 0 for padding g.ndata['year'][len(user_ids):len(user_ids) + len(movie_ids)] = \ torch.LongTensor(self.movies['year'].cat.codes.values.astype('int64') + 1) # movie title nlp = stanfordnlp.Pipeline(use_gpu=False, processors='tokenize,lemma') vocab = set() title_words = [] for t in tqdm.tqdm(self.movies['title'].values): doc = nlp(t) words = set() for s in doc.sentences: words.update(w.lemma.lower() for w in s.words if not re.fullmatch(r'['+string.punctuation+']+', w.lemma)) vocab.update(words) title_words.append(words) vocab = list(vocab) vocab_invmap = {w: i for i, w in enumerate(vocab)} # bag-of-words g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab)) for i, tw in enumerate(tqdm.tqdm(title_words)): g.ndata['title'][i, [vocab_invmap[w] for w in tw]] = 1 self.vocab = vocab self.vocab_invmap = vocab_invmap rating_user_vertices = [user_ids_invmap[id_] for id_ in self.ratings['user_id'].values] rating_movie_vertices = [movie_ids_invmap[id_] + len(user_ids) for id_ in self.ratings['movie_id'].values] self.rating_user_vertices = rating_user_vertices self.rating_movie_vertices = rating_movie_vertices g.add_edges( rating_user_vertices, rating_movie_vertices, data={'inv': torch.zeros(self.ratings.shape[0], dtype=torch.uint8)}) g.add_edges( rating_movie_vertices, rating_user_vertices, data={'inv': torch.ones(self.ratings.shape[0], dtype=torch.uint8)}) self.g = g def find_neighbors(self, restart_prob, max_nodes, top_T): # TODO: replace with more efficient PPR estimation neighbor_probs, neighbors = randomwalk.random_walk_distribution_topt( self.g, self.g.nodes(), restart_prob, max_nodes, top_T) self.user_neighbors = [] for i in range(len(self.user_ids)): user_neighbor = neighbors[i] self.user_neighbors.append(user_neighbor.tolist()) self.movie_neighbors = [] for i in range(len(self.user_ids), len(self.user_ids) + len(self.movie_ids)): movie_neighbor = neighbors[i] self.movie_neighbors.append(movie_neighbor.tolist()) def generate_mask(self): while True: ratings = self.ratings.groupby('user_id', group_keys=False).apply(self.split_user) prior_prob = ratings['prob'].values for i in range(5): train_mask = (prior_prob >= 0.2 * i) & (prior_prob < 0.2 * (i + 1)) prior_mask = ~train_mask train_mask &= ratings['train'].values prior_mask &= ratings['train'].values yield prior_mask, train_mask def refresh_mask(self): if not hasattr(self, 'masks'): self.masks = self.generate_mask() prior_mask, train_mask = next(self.masks) valid_tensor = torch.from_numpy(self.ratings['valid'].values.astype('uint8')) test_tensor = torch.from_numpy(self.ratings['test'].values.astype('uint8')) train_tensor = torch.from_numpy(train_mask.astype('uint8')) prior_tensor = torch.from_numpy(prior_mask.astype('uint8')) edge_data = { 'prior': prior_tensor, 'valid': valid_tensor, 'test': test_tensor, 'train': train_tensor, } self.g.edges[self.rating_user_vertices, self.rating_movie_vertices].data.update(edge_data) self.g.edges[self.rating_movie_vertices, self.rating_user_vertices].data.update(edge_data)
""" Simple function to get the quote for a desired market order from an orderbook. This is useful in a variety of ways and places. """ from gryphon.lib.money import Money from gryphon.lib.exchange.consts import Consts def price_quote_from_orderbook(order_book, mode, volume): if mode == Consts.BID: orders = order_book.get('asks', []) elif mode == Consts.ASK: orders = order_book.get('bids', []) else: raise ValueError('mode must be one of ask/bid') if not isinstance(volume, Money): raise ValueError('Volume must be a Money() object') if volume.currency != orders[0].volume.currency: raise ValueError('Volume currency does not match orderbook currency! %s != %s' % ( volume.currency, orders[0].volume.currency, )) if not orders: raise Exception('no orders on one side of the book.') price = 0 volume_remaining = volume for order in orders: volume_from_this_order = order.volume if order.volume > volume_remaining: volume_from_this_order = volume_remaining volume_remaining -= volume_from_this_order price += (volume_from_this_order.amount * order.price) if volume_remaining <= 0: break last_order = order # Because the loop broke. if volume_remaining > 0: raise Exception('not enough liquidity for a %s %s' % (volume, mode)) response = { 'total_price': price, 'price_for_order': last_order.price, } return response
# Copyright (c) 2022, RTE (https://www.rte-france.com) # See AUTHORS.txt # SPDX-License-Identifier: Apache-2.0 (see LICENSE.txt) # This file is part of ReLife, an open source Python library for asset # management based on reliability theory and lifetime data analysis. import pytest import numpy as np from relife.datasets import load_power_transformer from relife.distribution import ( Exponential, MinimumDistribution, Weibull, Gompertz, Gamma, LogLogistic, ) # fixtures @pytest.fixture(scope="module") def data(): return load_power_transformer() @pytest.fixture( scope="module", params=[ Exponential(0.00795203), Weibull(3.46597395, 0.01227849), Gompertz(0.00865741, 0.06062632), Gamma(5.3571091, 0.06622822), LogLogistic(3.92614064, 0.0133325), ], ) def model(request): return request.param # test functions def test_sf(model): assert model.sf(model.median()) == pytest.approx(0.5, rel=1e-3) def test_rvs(model): size = 10 assert model.rvs(size=size).shape == (size,) def test_mean(model): assert super(type(model), model).mean() == pytest.approx(model.mean(), rel=1e-3) def test_mrl(model): t = np.arange(10) assert model.mrl(t).shape == (t.size,) def test_fit(model, data): params = model.params.copy() model.fit(*data.astuple()) assert model.params == pytest.approx(params, rel=1e-3) def test_minimum_distribution(model, data): params = model.params.copy() n = np.ones((data.size, 1)) model = MinimumDistribution(model).fit(*data.astuple(), args=(n,)) assert model.params == pytest.approx(params, rel=1e-3)
from django.urls import reverse from rest_framework import status from lego.apps.quotes.models import Quote from lego.apps.users.models import AbakusGroup, User from lego.utils.test_utils import BaseAPITestCase def _get_list_url(): return reverse('api:v1:quote-list') def _get_list_approved_url(): return _get_list_url() + '?approved=True' def _get_list_unapproved_url(): return _get_list_url() + '?approved=False' def _get_detail_url(pk): return reverse('api:v1:quote-detail', kwargs={'pk': pk}) def _get_approve_url(pk): return reverse('api:v1:quote-approve', kwargs={'pk': pk}) def _get_unapprove_url(pk): return reverse('api:v1:quote-unapprove', kwargs={'pk': pk}) class QuoteViewSetTestCase(BaseAPITestCase): fixtures = ['test_users.yaml', 'test_abakus_groups.yaml', 'test_quotes.yaml'] def setUp(self): self.authenticated_user = User.objects.get(username='test1') self.group = AbakusGroup.objects_with_text.get(name='QuoteAdminTest') self.group.add_user(self.authenticated_user) self.unauthenticated_user = User.objects.get(username='test2') self.quote_data = { 'text': 'TestText', 'source': 'TestSource', } def test_create_authenticated(self): """Users with permissions should be able to create quotes""" self.client.force_authenticate(self.authenticated_user) response = self.client.post(_get_list_url(), self.quote_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_unauthenticated(self): """Users with no permissions should not be able to create quotes""" self.client.force_authenticate(self.unauthenticated_user) response = self.client.post(_get_list_url(), self.quote_data) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_list_authenticated(self): """Users with permissions should be able to list quotes""" self.client.force_authenticate(self.authenticated_user) response = self.client.get(_get_list_approved_url()) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data) def test_list_unauthenticated(self): """Users with no permissions should not be able to list quotes""" self.client.force_authenticate(user=self.unauthenticated_user) response = self.client.get(_get_list_approved_url()) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_detail_authenticated(self): """Users with permissions should be able to see detailed quotes""" self.client.force_authenticate(self.authenticated_user) response = self.client.get(_get_detail_url(1)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data) def test_detail_unauthenticated(self): """Users with no permissions should not be able see detailed quotes""" self.client.force_authenticate(user=self.unauthenticated_user) response = self.client.get(_get_detail_url(1)) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_approve_authenticated(self): """Users with permissions should be able to approve quotes""" self.client.force_authenticate(self.authenticated_user) response = self.client.put(_get_approve_url(3)) self.assertEquals(response.status_code, status.HTTP_200_OK) quote = Quote.objects.get(id=3) self.assertTrue(quote.approved) def test_approve_unauthenticated(self): """Users with no permissions should not be able to approve quotes""" self.client.force_authenticate(self.unauthenticated_user) response = self.client.put(_get_approve_url(3)) self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN) def test_list_unapproved_authenticated(self): """Users with permissions should be able to see unapproved quotes""" self.client.force_authenticate(self.authenticated_user) response = self.client.get(_get_list_unapproved_url()) self.assertEqual(response.status_code, status.HTTP_200_OK) first_quote = response.data['results'][0] self.assertFalse(first_quote['approved']) def test_list_unapproved_unauthenticated(self): """Users with no permissions should not be able to see unapproved quotes""" self.client.force_authenticate(self.unauthenticated_user) response = self.client.get(_get_list_unapproved_url()) self.assertEquals(status.HTTP_403_FORBIDDEN, response.status_code) def test_list_approved_unauthorized(self): """Users with regular permissions should be able to see approved quotes""" self.group.permissions.remove('/sudo/admin/quotes/edit/') self.group.save() self.client.force_authenticate(self.authenticated_user) response = self.client.get(_get_list_approved_url()) self.assertEquals(status.HTTP_200_OK, response.status_code) self.assertTrue(len(response.data['results']) > 0) def test_list_unapproved_unauthorized(self): """Users with regular permissions should not be able to see unapproved quotes""" self.group.permissions.remove('/sudo/admin/quotes/edit/') self.group.save() self.client.force_authenticate(self.authenticated_user) response = self.client.get(_get_list_unapproved_url()) self.assertEquals(status.HTTP_200_OK, response.status_code) self.assertEquals(len(response.data['results']), 0)
from evalml.pipelines.components.transformers.samplers.base_sampler import ( BaseOverSampler, ) from evalml.utils.woodwork_utils import infer_feature_types class SMOTESampler(BaseOverSampler): """SMOTE Oversampler component. Works on numerical datasets only. This component is only run during training and not during predict. Arguments: sampling_ratio (float): This is the goal ratio of the minority to majority class, with range (0, 1]. A value of 0.25 means we want a 1:4 ratio of the minority to majority class after oversampling. We will create the a sampling dictionary using this ratio, with the keys corresponding to the class and the values responding to the number of samples. Defaults to 0.25. k_neighbors_default (int): The number of nearest neighbors used to construct synthetic samples. This is the default value used, but the actual k_neighbors value might be smaller if there are less samples. Defaults to 5. n_jobs (int): The number of CPU cores to use. Defaults to -1. random_seed (int): The seed to use for random sampling. Defaults to 0. """ name = "SMOTE Oversampler" hyperparameter_ranges = {} """{}""" def __init__( self, sampling_ratio=0.25, k_neighbors_default=5, n_jobs=-1, random_seed=0, **kwargs ): super().__init__( "SMOTE", sampling_ratio=sampling_ratio, k_neighbors_default=k_neighbors_default, n_jobs=n_jobs, random_seed=random_seed, **kwargs ) class SMOTENCSampler(BaseOverSampler): """SMOTENC Oversampler component. Uses SMOTENC to generate synthetic samples. Works on a mix of nomerical and categorical columns. Input data must be Woodwork type, and this component is only run during training and not during predict. Arguments: sampling_ratio (float): This is the goal ratio of the minority to majority class, with range (0, 1]. A value of 0.25 means we want a 1:4 ratio of the minority to majority class after oversampling. We will create the a sampling dictionary using this ratio, with the keys corresponding to the class and the values responding to the number of samples. Defaults to 0.25. k_neighbors_default (int): The number of nearest neighbors used to construct synthetic samples. This is the default value used, but the actual k_neighbors value might be smaller if there are less samples. Defaults to 5. n_jobs (int): The number of CPU cores to use. Defaults to -1. random_seed (int): The seed to use for random sampling. Defaults to 0. """ name = "SMOTENC Oversampler" hyperparameter_ranges = {} """{}""" def __init__( self, sampling_ratio=0.25, k_neighbors_default=5, n_jobs=-1, random_seed=0, **kwargs ): self.categorical_features = None super().__init__( "SMOTENC", sampling_ratio=sampling_ratio, k_neighbors_default=k_neighbors_default, n_jobs=n_jobs, random_seed=random_seed, **kwargs ) def _get_categorical(self, X): X = infer_feature_types(X) self.categorical_features = [ i for i, val in enumerate(X.ww.types["Logical Type"].items()) if str(val[1]) in {"Boolean", "Categorical"} ] self._parameters["categorical_features"] = self.categorical_features def fit(self, X, y): # get categorical features first self._get_categorical(X) super().fit(X, y) class SMOTENSampler(BaseOverSampler): """ SMOTEN Oversampler component. Uses SMOTEN to generate synthetic samples. Works for purely categorical datasets. This component is only run during training and not during predict. Arguments: sampling_ratio (float): This is the goal ratio of the minority to majority class, with range (0, 1]. A value of 0.25 means we want a 1:4 ratio of the minority to majority class after oversampling. We will create the a sampling dictionary using this ratio, with the keys corresponding to the class and the values responding to the number of samples. Defaults to 0.25. k_neighbors_default (int): The number of nearest neighbors used to construct synthetic samples. This is the default value used, but the actual k_neighbors value might be smaller if there are less samples. Defaults to 5. n_jobs (int): The number of CPU cores to use. Defaults to -1. random_seed (int): The seed to use for random sampling. Defaults to 0. """ name = "SMOTEN Oversampler" hyperparameter_ranges = {} """{}""" def __init__( self, sampling_ratio=0.25, k_neighbors_default=5, n_jobs=-1, random_seed=0, **kwargs ): super().__init__( "SMOTEN", sampling_ratio=sampling_ratio, k_neighbors_default=k_neighbors_default, n_jobs=n_jobs, random_seed=random_seed, **kwargs )
import os VERSION = "1.0" MODEL_NAME = os.path.basename(os.path.dirname(__file__)) DOCKERHUB_REPO = f"danieldeutsch/{MODEL_NAME}" DEFAULT_IMAGE = f"{DOCKERHUB_REPO}:{VERSION}" AUTOMATICALLY_PUBLISH = True from repro.models.liu2019.models import BertSumExt, BertSumExtAbs, TransformerAbs from repro.models.liu2019.setup import Liu2019SetupSubcommand
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from fuji_server.models.base_model_ import Model #from fuji_server.models.object import Object # noqa: F401,E501 from fuji_server import util from fuji_server.models.any_of_fair_results_results_items import AnyOfFAIRResultsResultsItems class FAIRResults(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, test_id: str=None, request: Dict=None, timestamp: datetime=None, expiry_timestamp: datetime=None, metric_specification: str=None, metric_version: str=None, software_version: str=None, total_metrics: int=None, results: List[AnyOfFAIRResultsResultsItems]=None): # noqa: E501 """FAIRResults - a model defined in Swagger :param test_id: The test_id of this FAIRResults. # noqa: E501 :type test_id: str :param request: The request of this FAIRResults. # noqa: E501 :type request: Dict :param timestamp: The timestamp of this FAIRResults. # noqa: E501 :type timestamp: datetime :param expiry_timestamp: The expiry_timestamp of this FAIRResults. # noqa: E501 :type expiry_timestamp: datetime :param metric_specification: The metric_specification of this FAIRResults. # noqa: E501 :type metric_specification: str :param metric_version: The metric_version of this FAIRResults. # noqa: E501 :type metric_version: str :param software_version: The software_version of this FAIRResults. # noqa: E501 :type software_version: str :param total_metrics: The total_metrics of this FAIRResults. # noqa: E501 :type total_metrics: int :param results: The results of this FAIRResults. # noqa: E501 :type results: List[AnyOfFAIRResultsResultsItems] """ self.swagger_types = { 'test_id': str, 'request': Dict, 'timestamp': datetime, 'expiry_timestamp': datetime, 'metric_specification': str, 'metric_version': str, 'software_version': str, 'total_metrics': int, 'results': List[AnyOfFAIRResultsResultsItems] } self.attribute_map = { 'test_id': 'test_id', 'request': 'request', 'timestamp': 'timestamp', 'expiry_timestamp': 'expiry_timestamp', 'metric_specification': 'metric_specification', 'metric_version': 'metric_version', 'software_version': 'software_version', 'total_metrics': 'total_metrics', 'results': 'results' } self._test_id = test_id self._request = request self._timestamp = timestamp self._expiry_timestamp = expiry_timestamp self._metric_specification = metric_specification self._metric_version = metric_version self._software_version = software_version self._total_metrics = total_metrics self._results = results @classmethod def from_dict(cls, dikt) -> 'FAIRResults': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The FAIRResults of this FAIRResults. # noqa: E501 :rtype: FAIRResults """ return util.deserialize_model(dikt, cls) @property def test_id(self) -> str: """Gets the test_id of this FAIRResults. :return: The test_id of this FAIRResults. :rtype: str """ return self._test_id @test_id.setter def test_id(self, test_id: str): """Sets the test_id of this FAIRResults. :param test_id: The test_id of this FAIRResults. :type test_id: str """ self._test_id = test_id @property def request(self) -> Dict: """Gets the request of this FAIRResults. :return: The request of this FAIRResults. :rtype: Dict """ return self._request @request.setter def request(self, request: Dict): """Sets the request of this FAIRResults. :param request: The request of this FAIRResults. :type request: Dict """ self._request = request @property def timestamp(self) -> datetime: """Gets the timestamp of this FAIRResults. :return: The timestamp of this FAIRResults. :rtype: datetime """ return self._timestamp @timestamp.setter def timestamp(self, timestamp: datetime): """Sets the timestamp of this FAIRResults. :param timestamp: The timestamp of this FAIRResults. :type timestamp: datetime """ self._timestamp = timestamp @property def expiry_timestamp(self) -> datetime: """Gets the expiry_timestamp of this FAIRResults. :return: The expiry_timestamp of this FAIRResults. :rtype: datetime """ return self._expiry_timestamp @expiry_timestamp.setter def expiry_timestamp(self, expiry_timestamp: datetime): """Sets the expiry_timestamp of this FAIRResults. :param expiry_timestamp: The expiry_timestamp of this FAIRResults. :type expiry_timestamp: datetime """ self._expiry_timestamp = expiry_timestamp @property def metric_specification(self) -> str: """Gets the metric_specification of this FAIRResults. :return: The metric_specification of this FAIRResults. :rtype: str """ return self._metric_specification @metric_specification.setter def metric_specification(self, metric_specification: str): """Sets the metric_specification of this FAIRResults. :param metric_specification: The metric_specification of this FAIRResults. :type metric_specification: str """ self._metric_specification = metric_specification @property def metric_version(self) -> str: """Gets the metric_version of this FAIRResults. :return: The metric_version of this FAIRResults. :rtype: str """ return self._metric_version @metric_version.setter def metric_version(self, metric_version: str): """Sets the metric_version of this FAIRResults. :param metric_version: The metric_version of this FAIRResults. :type metric_version: str """ self._metric_version = metric_version @property def software_version(self) -> str: """Gets the software_version of this FAIRResults. :return: The software_version of this FAIRResults. :rtype: str """ return self._software_version @software_version.setter def software_version(self, software_version: str): """Sets the software_version of this FAIRResults. :param software_version: The software_version of this FAIRResults. :type software_version: str """ self._software_version = software_version @property def total_metrics(self) -> int: """Gets the total_metrics of this FAIRResults. :return: The total_metrics of this FAIRResults. :rtype: int """ return self._total_metrics @total_metrics.setter def total_metrics(self, total_metrics: int): """Sets the total_metrics of this FAIRResults. :param total_metrics: The total_metrics of this FAIRResults. :type total_metrics: int """ self._total_metrics = total_metrics @property def results(self) -> List[AnyOfFAIRResultsResultsItems]: """Gets the results of this FAIRResults. :return: The results of this FAIRResults. :rtype: List[AnyOfFAIRResultsResultsItems] """ return self._results @results.setter def results(self, results: List[AnyOfFAIRResultsResultsItems]): """Sets the results of this FAIRResults. :param results: The results of this FAIRResults. :type results: List[AnyOfFAIRResultsResultsItems] """ self._results = results
# Item Merge # Create a program that will compare two shopping lists from “Week A” and “Week B”. It will return any unique items contained on the list. ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def flatMap(arr, func=lambda x: x): return [func(j) for i in arr for j in i] def main(): getList = lambda x: input(f'{x} Shopping List: ').lower().split(', ') shoppingListsCount = int(input('Number of shopping lists: ')) shoppingLists = flatMap([getList(f'Week {ALPHABET[i%26]}') for i in range(shoppingListsCount)]) unique = [i for i in shoppingLists if shoppingLists.count(i) == 1] count = [(i, shoppingLists.count(i)) for i in shoppingLists] count = list(dict.fromkeys(count)) count.sort(key=lambda x: x[1], reverse=True) print() print(f'Unique Items: {", ".join(unique)}') print(f'3 Most popular items: {", ".join([i[0] for i in count[:3]])}') if __name__ == '__main__': main()
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 Raphael Michel and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # # This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of # the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>. # # This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A # full history of changes and contributors is available at <https://github.com/pretix/pretix>. # # This file contains Apache-licensed contributions copyrighted by: Daniel, Enrique Saez, Jahongir, Mason Mohkami, # Sohalt, Tobias Kunze, [email protected], luto, oocf # # Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License. from django.conf.urls import include, re_path from django.views.generic.base import RedirectView from pretix.control.views import ( auth, checkin, dashboards, event, geo, global_settings, item, main, oauth, orderimport, orders, organizer, pdf, search, shredder, subevents, typeahead, user, users, vouchers, waitinglist, ) urlpatterns = [ re_path(r'^logout$', auth.logout, name='auth.logout'), re_path(r'^login$', auth.login, name='auth.login'), re_path(r'^login/2fa$', auth.Login2FAView.as_view(), name='auth.login.2fa'), re_path(r'^register$', auth.register, name='auth.register'), re_path(r'^invite/(?P<token>[a-zA-Z0-9]+)$', auth.invite, name='auth.invite'), re_path(r'^forgot$', auth.Forgot.as_view(), name='auth.forgot'), re_path(r'^forgot/recover$', auth.Recover.as_view(), name='auth.forgot.recover'), re_path(r'^$', dashboards.user_index, name='index'), re_path(r'^widgets.json$', dashboards.user_index_widgets_lazy, name='index.widgets'), re_path(r'^global/settings/$', global_settings.GlobalSettingsView.as_view(), name='global.settings'), re_path(r'^global/update/$', global_settings.UpdateCheckView.as_view(), name='global.update'), re_path(r'^global/license/$', global_settings.LicenseCheckView.as_view(), name='global.license'), re_path(r'^global/message/$', global_settings.MessageView.as_view(), name='global.message'), re_path(r'^logdetail/$', global_settings.LogDetailView.as_view(), name='global.logdetail'), re_path(r'^logdetail/payment/$', global_settings.PaymentDetailView.as_view(), name='global.paymentdetail'), re_path(r'^logdetail/refund/$', global_settings.RefundDetailView.as_view(), name='global.refunddetail'), re_path(r'^geocode/$', geo.GeoCodeView.as_view(), name='global.geocode'), re_path(r'^reauth/$', user.ReauthView.as_view(), name='user.reauth'), re_path(r'^sudo/$', user.StartStaffSession.as_view(), name='user.sudo'), re_path(r'^sudo/stop/$', user.StopStaffSession.as_view(), name='user.sudo.stop'), re_path(r'^sudo/(?P<id>\d+)/$', user.EditStaffSession.as_view(), name='user.sudo.edit'), re_path(r'^sudo/sessions/$', user.StaffSessionList.as_view(), name='user.sudo.list'), re_path(r'^users/$', users.UserListView.as_view(), name='users'), re_path(r'^users/select2$', typeahead.users_select2, name='users.select2'), re_path(r'^users/add$', users.UserCreateView.as_view(), name='users.add'), re_path(r'^users/impersonate/stop', users.UserImpersonateStopView.as_view(), name='users.impersonate.stop'), re_path(r'^users/(?P<id>\d+)/$', users.UserEditView.as_view(), name='users.edit'), re_path(r'^users/(?P<id>\d+)/reset$', users.UserResetView.as_view(), name='users.reset'), re_path(r'^users/(?P<id>\d+)/impersonate', users.UserImpersonateView.as_view(), name='users.impersonate'), re_path(r'^users/(?P<id>\d+)/anonymize', users.UserAnonymizeView.as_view(), name='users.anonymize'), re_path(r'^pdf/editor/webfonts.css', pdf.FontsCSSView.as_view(), name='pdf.css'), re_path(r'^settings/?$', user.UserSettings.as_view(), name='user.settings'), re_path(r'^settings/history/$', user.UserHistoryView.as_view(), name='user.settings.history'), re_path(r'^settings/notifications/$', user.UserNotificationsEditView.as_view(), name='user.settings.notifications'), re_path(r'^settings/notifications/off/(?P<id>\d+)/(?P<token>[^/]+)/$', user.UserNotificationsDisableView.as_view(), name='user.settings.notifications.off'), re_path(r'^settings/oauth/authorized/$', oauth.AuthorizationListView.as_view(), name='user.settings.oauth.list'), re_path(r'^settings/oauth/authorized/(?P<pk>\d+)/revoke$', oauth.AuthorizationRevokeView.as_view(), name='user.settings.oauth.revoke'), re_path(r'^settings/oauth/apps/$', oauth.OAuthApplicationListView.as_view(), name='user.settings.oauth.apps'), re_path(r'^settings/oauth/apps/add$', oauth.OAuthApplicationRegistrationView.as_view(), name='user.settings.oauth.apps.register'), re_path(r'^settings/oauth/apps/(?P<pk>\d+)/$', oauth.OAuthApplicationUpdateView.as_view(), name='user.settings.oauth.app'), re_path(r'^settings/oauth/apps/(?P<pk>\d+)/disable$', oauth.OAuthApplicationDeleteView.as_view(), name='user.settings.oauth.app.disable'), re_path(r'^settings/oauth/apps/(?P<pk>\d+)/roll$', oauth.OAuthApplicationRollView.as_view(), name='user.settings.oauth.app.roll'), re_path(r'^settings/2fa/$', user.User2FAMainView.as_view(), name='user.settings.2fa'), re_path(r'^settings/2fa/add$', user.User2FADeviceAddView.as_view(), name='user.settings.2fa.add'), re_path(r'^settings/2fa/enable', user.User2FAEnableView.as_view(), name='user.settings.2fa.enable'), re_path(r'^settings/2fa/disable', user.User2FADisableView.as_view(), name='user.settings.2fa.disable'), re_path(r'^settings/2fa/regenemergency', user.User2FARegenerateEmergencyView.as_view(), name='user.settings.2fa.regenemergency'), re_path(r'^settings/2fa/totp/(?P<device>[0-9]+)/confirm', user.User2FADeviceConfirmTOTPView.as_view(), name='user.settings.2fa.confirm.totp'), re_path(r'^settings/2fa/webauthn/(?P<device>[0-9]+)/confirm', user.User2FADeviceConfirmWebAuthnView.as_view(), name='user.settings.2fa.confirm.webauthn'), re_path(r'^settings/2fa/(?P<devicetype>[^/]+)/(?P<device>[0-9]+)/delete', user.User2FADeviceDeleteView.as_view(), name='user.settings.2fa.delete'), re_path(r'^organizers/$', organizer.OrganizerList.as_view(), name='organizers'), re_path(r'^organizers/add$', organizer.OrganizerCreate.as_view(), name='organizers.add'), re_path(r'^organizers/select2$', typeahead.organizer_select2, name='organizers.select2'), re_path(r'^organizer/(?P<organizer>[^/]+)/$', organizer.OrganizerDetail.as_view(), name='organizer'), re_path(r'^organizer/(?P<organizer>[^/]+)/edit$', organizer.OrganizerUpdate.as_view(), name='organizer.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/settings/email$', organizer.OrganizerMailSettings.as_view(), name='organizer.settings.mail'), re_path(r'^organizer/(?P<organizer>[^/]+)/settings/email/setup$', organizer.MailSettingsSetup.as_view(), name='organizer.settings.mail.setup'), re_path(r'^organizer/(?P<organizer>[^/]+)/settings/email/preview$', organizer.MailSettingsPreview.as_view(), name='organizer.settings.mail.preview'), re_path(r'^organizer/(?P<organizer>[^/]+)/delete$', organizer.OrganizerDelete.as_view(), name='organizer.delete'), re_path(r'^organizer/(?P<organizer>[^/]+)/settings/display$', organizer.OrganizerDisplaySettings.as_view(), name='organizer.display'), re_path(r'^organizer/(?P<organizer>[^/]+)/properties$', organizer.EventMetaPropertyListView.as_view(), name='organizer.properties'), re_path(r'^organizer/(?P<organizer>[^/]+)/property/add$', organizer.EventMetaPropertyCreateView.as_view(), name='organizer.property.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/property/(?P<property>[^/]+)/edit$', organizer.EventMetaPropertyUpdateView.as_view(), name='organizer.property.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/property/(?P<property>[^/]+)/delete$', organizer.EventMetaPropertyDeleteView.as_view(), name='organizer.property.delete'), re_path(r'^organizer/(?P<organizer>[^/]+)/membershiptypes$', organizer.MembershipTypeListView.as_view(), name='organizer.membershiptypes'), re_path(r'^organizer/(?P<organizer>[^/]+)/membershiptype/add$', organizer.MembershipTypeCreateView.as_view(), name='organizer.membershiptype.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/membershiptype/(?P<type>[^/]+)/edit$', organizer.MembershipTypeUpdateView.as_view(), name='organizer.membershiptype.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/membershiptype/(?P<type>[^/]+)/delete$', organizer.MembershipTypeDeleteView.as_view(), name='organizer.membershiptype.delete'), re_path(r'^organizer/(?P<organizer>[^/]+)/customers$', organizer.CustomerListView.as_view(), name='organizer.customers'), re_path(r'^organizer/(?P<organizer>[^/]+)/customers/select2$', typeahead.customer_select2, name='organizer.customers.select2'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/add$', organizer.CustomerCreateView.as_view(), name='organizer.customer.create'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/(?P<customer>[^/]+)/$', organizer.CustomerDetailView.as_view(), name='organizer.customer'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/(?P<customer>[^/]+)/edit$', organizer.CustomerUpdateView.as_view(), name='organizer.customer.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/(?P<customer>[^/]+)/membership/add$', organizer.MembershipCreateView.as_view(), name='organizer.customer.membership.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/(?P<customer>[^/]+)/membership/(?P<id>[^/]+)/edit$', organizer.MembershipUpdateView.as_view(), name='organizer.customer.membership.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/(?P<customer>[^/]+)/membership/(?P<id>[^/]+)/delete$', organizer.MembershipDeleteView.as_view(), name='organizer.customer.membership.delete'), re_path(r'^organizer/(?P<organizer>[^/]+)/customer/(?P<customer>[^/]+)/anonymize$', organizer.CustomerAnonymizeView.as_view(), name='organizer.customer.anonymize'), re_path(r'^organizer/(?P<organizer>[^/]+)/giftcards$', organizer.GiftCardListView.as_view(), name='organizer.giftcards'), re_path(r'^organizer/(?P<organizer>[^/]+)/giftcard/add$', organizer.GiftCardCreateView.as_view(), name='organizer.giftcard.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/giftcard/(?P<giftcard>[^/]+)/$', organizer.GiftCardDetailView.as_view(), name='organizer.giftcard'), re_path(r'^organizer/(?P<organizer>[^/]+)/giftcard/(?P<giftcard>[^/]+)/edit$', organizer.GiftCardUpdateView.as_view(), name='organizer.giftcard.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/webhooks$', organizer.WebHookListView.as_view(), name='organizer.webhooks'), re_path(r'^organizer/(?P<organizer>[^/]+)/webhook/add$', organizer.WebHookCreateView.as_view(), name='organizer.webhook.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/webhook/(?P<webhook>[^/]+)/edit$', organizer.WebHookUpdateView.as_view(), name='organizer.webhook.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/webhook/(?P<webhook>[^/]+)/logs$', organizer.WebHookLogsView.as_view(), name='organizer.webhook.logs'), re_path(r'^organizer/(?P<organizer>[^/]+)/devices$', organizer.DeviceListView.as_view(), name='organizer.devices'), re_path(r'^organizer/(?P<organizer>[^/]+)/device/add$', organizer.DeviceCreateView.as_view(), name='organizer.device.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/device/(?P<device>[^/]+)/edit$', organizer.DeviceUpdateView.as_view(), name='organizer.device.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/device/(?P<device>[^/]+)/connect$', organizer.DeviceConnectView.as_view(), name='organizer.device.connect'), re_path(r'^organizer/(?P<organizer>[^/]+)/device/(?P<device>[^/]+)/revoke$', organizer.DeviceRevokeView.as_view(), name='organizer.device.revoke'), re_path(r'^organizer/(?P<organizer>[^/]+)/device/(?P<device>[^/]+)/logs$', organizer.DeviceLogView.as_view(), name='organizer.device.logs'), re_path(r'^organizer/(?P<organizer>[^/]+)/gates$', organizer.GateListView.as_view(), name='organizer.gates'), re_path(r'^organizer/(?P<organizer>[^/]+)/gate/add$', organizer.GateCreateView.as_view(), name='organizer.gate.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/gate/(?P<gate>[^/]+)/edit$', organizer.GateUpdateView.as_view(), name='organizer.gate.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/gate/(?P<gate>[^/]+)/delete$', organizer.GateDeleteView.as_view(), name='organizer.gate.delete'), re_path(r'^organizer/(?P<organizer>[^/]+)/teams$', organizer.TeamListView.as_view(), name='organizer.teams'), re_path(r'^organizer/(?P<organizer>[^/]+)/team/add$', organizer.TeamCreateView.as_view(), name='organizer.team.add'), re_path(r'^organizer/(?P<organizer>[^/]+)/team/(?P<team>[^/]+)/$', organizer.TeamMemberView.as_view(), name='organizer.team'), re_path(r'^organizer/(?P<organizer>[^/]+)/team/(?P<team>[^/]+)/edit$', organizer.TeamUpdateView.as_view(), name='organizer.team.edit'), re_path(r'^organizer/(?P<organizer>[^/]+)/team/(?P<team>[^/]+)/delete$', organizer.TeamDeleteView.as_view(), name='organizer.team.delete'), re_path(r'^organizer/(?P<organizer>[^/]+)/slugrng', main.SlugRNG.as_view(), name='events.add.slugrng'), re_path(r'^organizer/(?P<organizer>[^/]+)/logs', organizer.LogView.as_view(), name='organizer.log'), re_path(r'^organizer/(?P<organizer>[^/]+)/export/$', organizer.ExportView.as_view(), name='organizer.export'), re_path(r'^organizer/(?P<organizer>[^/]+)/export/do$', organizer.ExportDoView.as_view(), name='organizer.export.do'), re_path(r'^nav/typeahead/$', typeahead.nav_context_list, name='nav.typeahead'), re_path(r'^events/$', main.EventList.as_view(), name='events'), re_path(r'^events/add$', main.EventWizard.as_view(), name='events.add'), re_path(r'^events/typeahead/$', typeahead.event_list, name='events.typeahead'), re_path(r'^events/typeahead/meta/$', typeahead.meta_values, name='events.meta.typeahead'), re_path(r'^search/orders/$', search.OrderSearch.as_view(), name='search.orders'), re_path(r'^search/payments/$', search.PaymentSearch.as_view(), name='search.payments'), re_path(r'^event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/', include([ re_path(r'^$', dashboards.event_index, name='event.index'), re_path(r'^widgets.json$', dashboards.event_index_widgets_lazy, name='event.index.widgets'), re_path(r'^logs/embed$', dashboards.event_index_log_lazy, name='event.index.logs'), re_path(r'^live/$', event.EventLive.as_view(), name='event.live'), re_path(r'^logs/$', event.EventLog.as_view(), name='event.log'), re_path(r'^delete/$', event.EventDelete.as_view(), name='event.delete'), re_path(r'^comment/$', event.EventComment.as_view(), name='event.comment'), re_path(r'^quickstart/$', event.QuickSetupView.as_view(), name='event.quick'), re_path(r'^settings/$', event.EventUpdate.as_view(), name='event.settings'), re_path(r'^settings/plugins$', event.EventPlugins.as_view(), name='event.settings.plugins'), re_path(r'^settings/payment/(?P<provider>[^/]+)$', event.PaymentProviderSettings.as_view(), name='event.settings.payment.provider'), re_path(r'^settings/payment$', event.PaymentSettings.as_view(), name='event.settings.payment'), re_path(r'^settings/tickets$', event.TicketSettings.as_view(), name='event.settings.tickets'), re_path(r'^settings/tickets/preview/(?P<output>[^/]+)$', event.TicketSettingsPreview.as_view(), name='event.settings.tickets.preview'), re_path(r'^settings/email$', event.MailSettings.as_view(), name='event.settings.mail'), re_path(r'^settings/email/setup$', event.MailSettingsSetup.as_view(), name='event.settings.mail.setup'), re_path(r'^settings/email/preview$', event.MailSettingsPreview.as_view(), name='event.settings.mail.preview'), re_path(r'^settings/email/layoutpreview$', event.MailSettingsRendererPreview.as_view(), name='event.settings.mail.preview.layout'), re_path(r'^settings/cancel', event.CancelSettings.as_view(), name='event.settings.cancel'), re_path(r'^settings/invoice$', event.InvoiceSettings.as_view(), name='event.settings.invoice'), re_path(r'^settings/invoice/preview$', event.InvoicePreview.as_view(), name='event.settings.invoice.preview'), re_path(r'^settings/display', event.DisplaySettings.as_view(), name='event.settings.display'), re_path(r'^settings/tax/$', event.TaxList.as_view(), name='event.settings.tax'), re_path(r'^settings/tax/(?P<rule>\d+)/$', event.TaxUpdate.as_view(), name='event.settings.tax.edit'), re_path(r'^settings/tax/add$', event.TaxCreate.as_view(), name='event.settings.tax.add'), re_path(r'^settings/tax/(?P<rule>\d+)/delete$', event.TaxDelete.as_view(), name='event.settings.tax.delete'), re_path(r'^settings/widget$', event.WidgetSettings.as_view(), name='event.settings.widget'), re_path(r'^pdf/editor/webfonts.css', pdf.FontsCSSView.as_view(), name='pdf.css'), re_path(r'^pdf/editor/(?P<filename>[^/]+).pdf$', pdf.PdfView.as_view(), name='pdf.background'), re_path(r'^subevents/$', subevents.SubEventList.as_view(), name='event.subevents'), re_path(r'^subevents/select2$', typeahead.subevent_select2, name='event.subevents.select2'), re_path(r'^subevents/(?P<subevent>\d+)/$', subevents.SubEventUpdate.as_view(), name='event.subevent'), re_path(r'^subevents/(?P<subevent>\d+)/delete$', subevents.SubEventDelete.as_view(), name='event.subevent.delete'), re_path(r'^subevents/add$', subevents.SubEventCreate.as_view(), name='event.subevents.add'), re_path(r'^subevents/bulk_add$', subevents.SubEventBulkCreate.as_view(), name='event.subevents.bulk'), re_path(r'^subevents/bulk_action$', subevents.SubEventBulkAction.as_view(), name='event.subevents.bulkaction'), re_path(r'^subevents/bulk_edit$', subevents.SubEventBulkEdit.as_view(), name='event.subevents.bulkedit'), re_path(r'^items/$', item.ItemList.as_view(), name='event.items'), re_path(r'^items/add$', item.ItemCreate.as_view(), name='event.items.add'), re_path(r'^items/(?P<item>\d+)/$', item.ItemUpdateGeneral.as_view(), name='event.item'), re_path(r'^items/(?P<item>\d+)/up$', item.item_move_up, name='event.items.up'), re_path(r'^items/(?P<item>\d+)/down$', item.item_move_down, name='event.items.down'), re_path(r'^items/reorder$', item.reorder_items, name='event.items.reorder'), re_path(r'^items/(?P<item>\d+)/delete$', item.ItemDelete.as_view(), name='event.items.delete'), re_path(r'^items/typeahead/meta/$', typeahead.item_meta_values, name='event.items.meta.typeahead'), re_path(r'^items/select2$', typeahead.items_select2, name='event.items.select2'), re_path(r'^items/select2/variation$', typeahead.variations_select2, name='event.items.variations.select2'), re_path(r'^categories/$', item.CategoryList.as_view(), name='event.items.categories'), re_path(r'^categories/select2$', typeahead.category_select2, name='event.items.categories.select2'), re_path(r'^categories/(?P<category>\d+)/delete$', item.CategoryDelete.as_view(), name='event.items.categories.delete'), re_path(r'^categories/(?P<category>\d+)/up$', item.category_move_up, name='event.items.categories.up'), re_path(r'^categories/(?P<category>\d+)/down$', item.category_move_down, name='event.items.categories.down'), re_path(r'^categories/reorder$', item.reorder_categories, name='event.items.categories.reorder'), re_path(r'^categories/(?P<category>\d+)/$', item.CategoryUpdate.as_view(), name='event.items.categories.edit'), re_path(r'^categories/add$', item.CategoryCreate.as_view(), name='event.items.categories.add'), re_path(r'^questions/$', item.QuestionList.as_view(), name='event.items.questions'), re_path(r'^questions/reorder$', item.reorder_questions, name='event.items.questions.reorder'), re_path(r'^questions/(?P<question>\d+)/delete$', item.QuestionDelete.as_view(), name='event.items.questions.delete'), re_path(r'^questions/(?P<question>\d+)/$', item.QuestionView.as_view(), name='event.items.questions.show'), re_path(r'^questions/(?P<question>\d+)/change$', item.QuestionUpdate.as_view(), name='event.items.questions.edit'), re_path(r'^questions/add$', item.QuestionCreate.as_view(), name='event.items.questions.add'), re_path(r'^quotas/$', item.QuotaList.as_view(), name='event.items.quotas'), re_path(r'^quotas/(?P<quota>\d+)/$', item.QuotaView.as_view(), name='event.items.quotas.show'), re_path(r'^quotas/select$', typeahead.quotas_select2, name='event.items.quotas.select2'), re_path(r'^quotas/(?P<quota>\d+)/change$', item.QuotaUpdate.as_view(), name='event.items.quotas.edit'), re_path(r'^quotas/(?P<quota>\d+)/delete$', item.QuotaDelete.as_view(), name='event.items.quotas.delete'), re_path(r'^quotas/add$', item.QuotaCreate.as_view(), name='event.items.quotas.add'), re_path(r'^vouchers/$', vouchers.VoucherList.as_view(), name='event.vouchers'), re_path(r'^vouchers/tags/$', vouchers.VoucherTags.as_view(), name='event.vouchers.tags'), re_path(r'^vouchers/rng$', vouchers.VoucherRNG.as_view(), name='event.vouchers.rng'), re_path(r'^vouchers/item_select$', typeahead.itemvarquota_select2, name='event.vouchers.itemselect2'), re_path(r'^vouchers/(?P<voucher>\d+)/$', vouchers.VoucherUpdate.as_view(), name='event.voucher'), re_path(r'^vouchers/(?P<voucher>\d+)/delete$', vouchers.VoucherDelete.as_view(), name='event.voucher.delete'), re_path(r'^vouchers/(?P<voucher>\d+)/deletecarts$', vouchers.VoucherDeleteCarts.as_view(), name='event.voucher.deletecarts'), re_path(r'^vouchers/add$', vouchers.VoucherCreate.as_view(), name='event.vouchers.add'), re_path(r'^vouchers/go$', vouchers.VoucherGo.as_view(), name='event.vouchers.go'), re_path(r'^vouchers/bulk_add$', vouchers.VoucherBulkCreate.as_view(), name='event.vouchers.bulk'), re_path(r'^vouchers/bulk_action$', vouchers.VoucherBulkAction.as_view(), name='event.vouchers.bulkaction'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/transition$', orders.OrderTransition.as_view(), name='event.order.transition'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/resend$', orders.OrderResendLink.as_view(), name='event.order.resendlink'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/(?P<position>\d+)/resend$', orders.OrderResendLink.as_view(), name='event.order.resendlink'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/invoice$', orders.OrderInvoiceCreate.as_view(), name='event.order.geninvoice'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/invoices/(?P<id>\d+)/regenerate$', orders.OrderInvoiceRegenerate.as_view(), name='event.order.regeninvoice'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/invoices/(?P<id>\d+)/reissue$', orders.OrderInvoiceReissue.as_view(), name='event.order.reissueinvoice'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/download/(?P<position>\d+)/(?P<output>[^/]+)/$', orders.OrderDownload.as_view(), name='event.order.download.ticket'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/answer/(?P<answer>[^/]+)/$', orders.AnswerDownload.as_view(), name='event.order.download.answer'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/checkvatid', orders.OrderCheckVATID.as_view(), name='event.order.checkvatid'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/extend$', orders.OrderExtend.as_view(), name='event.order.extend'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/reactivate$', orders.OrderReactivate.as_view(), name='event.order.reactivate'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/contact$', orders.OrderContactChange.as_view(), name='event.order.contact'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/locale', orders.OrderLocaleChange.as_view(), name='event.order.locale'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/comment$', orders.OrderComment.as_view(), name='event.order.comment'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/change$', orders.OrderChange.as_view(), name='event.order.change'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/approve', orders.OrderApprove.as_view(), name='event.order.approve'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/deny$', orders.OrderDeny.as_view(), name='event.order.deny'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/delete$', orders.OrderDelete.as_view(), name='event.order.delete'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/info', orders.OrderModifyInformation.as_view(), name='event.order.info'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/sendmail$', orders.OrderSendMail.as_view(), name='event.order.sendmail'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/(?P<position>[0-9A-Z]+)/sendmail$', orders.OrderPositionSendMail.as_view(), name='event.order.position.sendmail'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/mail_history$', orders.OrderEmailHistory.as_view(), name='event.order.mail_history'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/payments/(?P<payment>\d+)/cancel$', orders.OrderPaymentCancel.as_view(), name='event.order.payments.cancel'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/payments/(?P<payment>\d+)/confirm$', orders.OrderPaymentConfirm.as_view(), name='event.order.payments.confirm'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/refund$', orders.OrderRefundView.as_view(), name='event.order.refunds.start'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/refunds/(?P<refund>\d+)/cancel$', orders.OrderRefundCancel.as_view(), name='event.order.refunds.cancel'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/refunds/(?P<refund>\d+)/process$', orders.OrderRefundProcess.as_view(), name='event.order.refunds.process'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/refunds/(?P<refund>\d+)/done$', orders.OrderRefundDone.as_view(), name='event.order.refunds.done'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/cancellationrequests/(?P<req>\d+)/delete$', orders.OrderCancellationRequestDelete.as_view(), name='event.order.cancellationrequests.delete'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/transactions/$', orders.OrderTransactions.as_view(), name='event.order.transactions'), re_path(r'^orders/(?P<code>[0-9A-Z]+)/$', orders.OrderDetail.as_view(), name='event.order'), re_path(r'^invoice/(?P<invoice>[^/]+)$', orders.InvoiceDownload.as_view(), name='event.invoice.download'), re_path(r'^orders/overview/$', orders.OverView.as_view(), name='event.orders.overview'), re_path(r'^orders/import/$', orderimport.ImportView.as_view(), name='event.orders.import'), re_path(r'^orders/import/(?P<file>[^/]+)/$', orderimport.ProcessView.as_view(), name='event.orders.import.process'), re_path(r'^orders/export/$', orders.ExportView.as_view(), name='event.orders.export'), re_path(r'^orders/export/do$', orders.ExportDoView.as_view(), name='event.orders.export.do'), re_path(r'^orders/refunds/$', orders.RefundList.as_view(), name='event.orders.refunds'), re_path(r'^orders/go$', orders.OrderGo.as_view(), name='event.orders.go'), re_path(r'^orders/$', orders.OrderList.as_view(), name='event.orders'), re_path(r'^orders/search$', orders.OrderSearch.as_view(), name='event.orders.search'), re_path(r'^dangerzone/$', event.DangerZone.as_view(), name='event.dangerzone'), re_path(r'^cancel/$', orders.EventCancel.as_view(), name='event.cancel'), re_path(r'^shredder/$', shredder.StartShredView.as_view(), name='event.shredder.start'), re_path(r'^shredder/export$', shredder.ShredExportView.as_view(), name='event.shredder.export'), re_path(r'^shredder/download/(?P<file>[^/]+)/$', shredder.ShredDownloadView.as_view(), name='event.shredder.download'), re_path(r'^shredder/shred', shredder.ShredDoView.as_view(), name='event.shredder.shred'), re_path(r'^waitinglist/$', waitinglist.WaitingListView.as_view(), name='event.orders.waitinglist'), re_path(r'^waitinglist/action$', waitinglist.WaitingListActionView.as_view(), name='event.orders.waitinglist.action'), re_path(r'^waitinglist/auto_assign$', waitinglist.AutoAssign.as_view(), name='event.orders.waitinglist.auto'), re_path(r'^waitinglist/(?P<entry>\d+)/delete$', waitinglist.EntryDelete.as_view(), name='event.orders.waitinglist.delete'), re_path(r'^checkins/$', checkin.CheckinListView.as_view(), name='event.orders.checkins'), re_path(r'^checkinlists/$', checkin.CheckinListList.as_view(), name='event.orders.checkinlists'), re_path(r'^checkinlists/add$', checkin.CheckinListCreate.as_view(), name='event.orders.checkinlists.add'), re_path(r'^checkinlists/select2$', typeahead.checkinlist_select2, name='event.orders.checkinlists.select2'), re_path(r'^checkinlists/(?P<list>\d+)/$', checkin.CheckInListShow.as_view(), name='event.orders.checkinlists.show'), re_path(r'^checkinlists/(?P<list>\d+)/change$', checkin.CheckinListUpdate.as_view(), name='event.orders.checkinlists.edit'), re_path(r'^checkinlists/(?P<list>\d+)/delete$', checkin.CheckinListDelete.as_view(), name='event.orders.checkinlists.delete'), ])), re_path(r'^event/(?P<organizer>[^/]+)/$', RedirectView.as_view(pattern_name='control:organizer'), name='event.organizerredirect'), ]
from pathlib import Path from unittest.mock import patch from click.testing import CliRunner from tbpore.external_tools import ExternalTool from tbpore.tbpore import TMP_NAME, main_cli @patch.object(ExternalTool, ExternalTool._run_core.__name__) class TestClusterCLI: @staticmethod def get_command_line_from_mock(mock, index): return " ".join(mock.call_args_list[index].args[0]) def test_no_input___fails(self, run_core_mock, tmp_path): opts = ["cluster"] runner = CliRunner() with runner.isolated_filesystem(temp_dir=tmp_path): result = runner.invoke(main_cli, opts) assert result.exit_code == 2 assert ( b"To cluster consensus sequences, please provide at least two input consensus sequences" in result.stdout_bytes ) def test_single_fasta_as_input___fails(self, run_core_mock, tmp_path): runner = CliRunner() with runner.isolated_filesystem(temp_dir=tmp_path) as td: td = Path(td) infile = td / "in.fq" with open(infile, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") opts = ["cluster", str(infile)] result = runner.invoke(main_cli, opts) assert result.exit_code == 2 assert ( b"To cluster consensus sequences, please provide at least two input consensus sequences" in result.stdout_bytes ) @patch("tbpore.tbpore.produce_clusters") def test_whole_execution___minimum_params( self, produce_clusters_mock, run_core_mock, tmp_path ): runner = CliRunner() with runner.isolated_filesystem(temp_dir=tmp_path) as td: td = Path(td) infile_1 = td / "in1.fq" with open(infile_1, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") infile_2 = td / "in2.fq" with open(infile_2, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") opts = ["cluster", "-o", str(td), str(infile_1), str(infile_2)] result = runner.invoke(main_cli, opts) assert result.exit_code == 0 assert run_core_mock.call_count == 1 psdm_cl = self.get_command_line_from_mock(run_core_mock, 0) psdm_matrix = td / TMP_NAME / "psdm.matrix.csv" assert ( psdm_cl == f"psdm --ignore-case --quiet --sort -t 1 -o {psdm_matrix} {td}/{TMP_NAME}/all_sequences.fq.gz" ) threshold = 6 produce_clusters_mock.assert_called_once_with(psdm_matrix, threshold, td) @patch("tbpore.tbpore.produce_clusters") def test_whole_execution___several_params( self, produce_clusters_mock, run_core_mock, tmp_path ): runner = CliRunner() with runner.isolated_filesystem(temp_dir=tmp_path) as td: td = Path(td) infile_1 = td / "in1.fq" with open(infile_1, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") infile_2 = td / "in2.fq" with open(infile_2, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") opts = [ "cluster", "-o", str(td), "--threshold", "500", "--tmp", str(td / "custom_tmp"), "--threads", "101", "--cleanup", str(infile_1), str(infile_2), ] result = runner.invoke(main_cli, opts) assert result.exit_code == 0 assert run_core_mock.call_count == 1 psdm_cl = self.get_command_line_from_mock(run_core_mock, 0) psdm_matrix = td / "custom_tmp/psdm.matrix.csv" assert ( psdm_cl == f"psdm --ignore-case --quiet --sort -t 101 -o {psdm_matrix} {td}/custom_tmp/all_sequences.fq.gz" ) threshold = 500 produce_clusters_mock.assert_called_once_with(psdm_matrix, threshold, td) @patch("tbpore.tbpore.produce_clusters") def test_no_cleanup(self, produce_clusters_mock, run_core_mock, tmp_path): runner = CliRunner() with runner.isolated_filesystem(temp_dir=tmp_path) as td: td = Path(td) infile_1 = td / "in1.fq" with open(infile_1, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") infile_2 = td / "in2.fq" with open(infile_2, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") opts = [ "cluster", "--no-cleanup", "-o", str(td), str(infile_1), str(infile_2), ] result = runner.invoke(main_cli, opts) assert result.exit_code == 0 tbpore_tmp = td / TMP_NAME assert tbpore_tmp.exists() @patch("tbpore.tbpore.produce_clusters") def test_with_cleanup(self, produce_clusters_mock, run_core_mock, tmp_path): runner = CliRunner() with runner.isolated_filesystem(temp_dir=tmp_path) as td: td = Path(td) infile_1 = td / "in1.fq" with open(infile_1, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") infile_2 = td / "in2.fq" with open(infile_2, "w") as fp: fp.write("@r1\nACGT\n+$$$%\n") opts = ["cluster", "--cleanup", "-o", str(td), str(infile_1), str(infile_2)] result = runner.invoke(main_cli, opts) assert result.exit_code == 0 tbpore_tmp = td / TMP_NAME assert not tbpore_tmp.exists()
#!/usr/bin/env python """The setup script.""" import os.path as op import warnings from setuptools import find_packages, setup def _read_md_as_rst(file): """Read Markdown file and convert it to ReStructuredText.""" from pypandoc import convert_file return convert_file(file, 'rst', format='md') def _read_md_as_md(file): """Read Markdown file.""" with open(op.join(op.dirname(__file__), file)) as ifh: return ifh.read() def read_md(file): """Read MarkDown file and try to convert it to ReStructuredText if you can.""" try: return _read_md_as_rst(file) except ImportError: warnings.warn("pypandoc module not found, could not convert Markdown to RST!") return _read_md_as_md(file) requirements = [ 'Click>=6.0', # TODO: put package requirements here ] test_requirements = [ 'pytest', # TODO: put package test requirements here ] setup( name='jobsubmitter', version='0.1.1', description="Package for running jobs on Sun Grid Engine (SGE) / PBS / Slurm clusters.", long_description=read_md('README.md') + '\n\n' + read_md('HISTORY.md'), author="Alexey Strokach", author_email='[email protected]', url='https://gitlab.com/kimlab/jobsubmitter', packages=find_packages(), entry_points={'console_scripts': ['jobsubmitter=jobsubmitter.cli:main']}, scripts=['jobsubmitter/scripts/qsub.sh'], package_data={ 'jobsubmitter': 'scripts/*.sh', }, include_package_data=True, install_requires=requirements, license='MIT', zip_safe=False, keywords='jobsubmitter', classifiers=[ "License :: OSI Approved :: MIT License", 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language :: Python :: 3.6', ], test_suite='tests', tests_require=test_requirements, )
""" Created on Jul 5, 2018 @author: lubo """ import pytest @pytest.mark.parametrize("variants", ["variants_impala", "variants_vcf"]) @pytest.mark.parametrize( "variant_type,count", [(None, 10), ("sub", 9), ("del", 1), ("sub or del", 10), ], ) def test_single_alt_allele_variant_types( variants_impl, variants, variant_type, count ): fvars = variants_impl(variants)("backends/effects_trio") vs = list(fvars.query_variants(variant_type=variant_type,)) for v in vs: print(v.variant_types) assert len(vs) == count @pytest.mark.parametrize("variants", ["variants_impala", "variants_vcf"]) @pytest.mark.parametrize( "variant_type,count", [(None, 3), ("sub", 3), ("del", 1), ("del or sub", 3)], ) def test_multi_alt_allele_variant_types( variants_impl, variants, variant_type, count ): fvars = variants_impl(variants)("backends/effects_trio_multi") vs = list(fvars.query_variants(variant_type=variant_type,)) for v in vs: print(v.variant_types) assert len(vs) == count
import pickle import re from pathlib import Path from typing import List, NamedTuple, Optional import requests from ...core.mixins import PointMixin class NavaidTuple(NamedTuple): name: str type: str lat: float lon: float alt: Optional[float] frequency: Optional[float] magnetic_variation: Optional[float] description: Optional[str] def __getstate__(self): return self.__dict__ def __setstate__(self, d): self.__dict__.update(d) def __getattr__(self, name): if name == "latitude": return self.lat if name == "longitude": return self.lon if name == "altitude": return self.alt class Navaid(NavaidTuple, PointMixin): def __repr__(self): if self.type == "FIX": return f"{self.name} ({self.type}): {self.lat} {self.lon}" else: return ( f"{self.name} ({self.type}): {self.lat} {self.lon}" f" {self.alt:.0f}\n" f"{self.description if self.description is not None else ''}" f" {self.frequency}{'kHz' if self.type=='NDB' else 'MHz'}" ) __github_url = "https://raw.githubusercontent.com/" base_url = __github_url + "ProfHoekstra/bluesky/master/data/navdata" class NavaidParser(object): cache: Optional[Path] = None def __init__(self) -> None: if self.cache is not None and self.cache.exists(): with self.cache.open("rb") as fh: self.navaids = pickle.load(fh) else: self.initialize() if self.cache is not None: with self.cache.open("wb") as fh: pickle.dump(self.navaids, fh) def __getitem__(self, name: str) -> Optional[Navaid]: return next( (pt for pt in self.navaids if (pt.name == name.upper())), None ) def search(self, name: str) -> List[Navaid]: return list( ( pt for pt in self.navaids if ( pt.description is not None and (re.match(name, pt.description, re.IGNORECASE)) ) or (pt.name == name.upper()) ) ) def initialize(self): self.navaids = [] c = requests.get(f"{base_url}/fix.dat") for line in c.iter_lines(): line = line.decode(encoding="ascii", errors="ignore").strip() # Skip empty lines or comments if len(line) < 3 or line[0] == "#": continue # Start with valid 2 digit latitude -45. or 52. if not ((line[0] == "-" and line[3] == ".") or line[2] == "."): continue # Data line => Process fields of this record, separated by a comma # Example line: # 30.580372 -094.384169 FAREL fields = line.split() self.navaids.append( Navaid( fields[2], "FIX", float(fields[0]), float(fields[1]), None, None, None, None, ) ) c = requests.get(f"{base_url}/nav.dat") for line in c.iter_lines(): line = line.decode(encoding="ascii", errors="ignore").strip() # Skip empty lines or comments if len(line) == 0 or line[0] == "#": continue # Data line => Process fields of this record, separated by a comma # Example lines: # 2 58.61466599 125.42666626 451 522 30 0.0 A Aldan NDB # 3 31.26894444 -085.72630556 334 11120 40 -3.0 OZR CAIRNS VOR-DME # type lat lon elev freq ? var id desc # 0 1 2 3 4 5 6 7 8 fields = line.split() # Valid line starst with integers if not fields[0].isdigit(): continue # Next line # Get code for type of navaid itype = int(fields[0]) # Type names wptypedict = { 2: "NDB", 3: "VOR", 4: "ILS", 5: "LOC", 6: "GS", 7: "OM", 8: "MM", 9: "IM", 12: "DME", 13: "TACAN", } # Type code never larger than 20 if itype not in list(wptypedict.keys()): continue # Next line wptype = wptypedict[itype] # Select types to read if wptype not in ["NDB", "VOR", "DME", "TACAN"]: continue # Next line # Find description try: idesc = line.index(fields[7]) + len(fields[7]) description = line[idesc:].strip().upper() except Exception: description = None self.navaids.append( Navaid( fields[7], wptype, float(fields[1]), float(fields[2]), float(fields[3]), float(fields[4]) if wptype == "NDB" else float(fields[4]) / 100, float(fields[6]) if wptype in ["VOR", "NDB"] else None, description, ) )
import os from rafiki.utils.log import configure_logging from rafiki.admin import Admin from rafiki.admin.app import app configure_logging('admin') if __name__ == "__main__": # Run seed logic for admin at start-up admin = Admin() admin.seed() # Run Flask app app.run( host='0.0.0.0', port=os.getenv('ADMIN_PORT', 3000), threaded=True)
from .attributegenie import *
""" Distillation loss for the student training """ from typing import Optional import torch import torch.nn as nn import models.defaults as defaults from utils.stft_losses import MultiResolutionSTFTLoss class DistillationLoss(nn.Module): def __init__(self, student, teacher, *, infer_teacher: Optional[callable] = None, teacher_dtype: torch.dtype, stft_loss_coeff: float, # V-- below are spectrogram params hop_length: int, win_length: int, num_mels: int, n_fft: Optional[int] = None, sample_rate=defaults.SAMPLING_RATE, min_frequency=defaults.MEL_FMIN, max_frequency=defaults.MEL_FMAX, eps=1e-5): """ A pytorch module that computes a distillation loss. :type student: [Flow, WideFlow, Affine, WaveNet]Student :type teacher: WaveGlowTeacher :param infer_teacher: optionally provide a more efficient version of teacher.infer :param teacher_dtype: dtype of inputs and outputs to teacher, :param stft_loss_coeff: multiplicative coefficient for stft loss. loss = MAE + stft_loss_coeff * STFT_LOSS """ super().__init__() self.student, self.teacher = student, teacher self.infer_teacher = infer_teacher or teacher.infer self.teacher_dtype, self.stft_loss_coeff = teacher_dtype, stft_loss_coeff self.n_fft = n_fft if n_fft is not None else win_length self.hop_length, self.win_length, self.num_mels = hop_length, win_length, num_mels self.sample_rate, self.min_frequency, self.max_frequency = sample_rate, min_frequency, max_frequency self.eps = eps self.multires_stft_loss = MultiResolutionSTFTLoss() def forward(self, mel: torch.Tensor, sigma=1.0): """ :param mel: a batch of mel-spectrograms [batch_size, channels, length], already with matching device and dtype :returns: three scalar loss values: (total loss, MAE component, STFT loss component) """ with torch.no_grad(): upsampled_mels, *wg_noise = self.teacher.sample_inputs_for(mel.to(self.teacher_dtype), sigma=sigma) reference = self.infer_teacher(upsampled_mels, *wg_noise).to(mel.dtype) student_input = torch.cat(wg_noise, dim=1).to(mel.dtype) student_prediction = self.student(student_input, upsampled_mels.to(mel.dtype)) student_prediction = student_prediction.permute(0, 2, 1).flatten(1) loss_mae = abs(student_prediction - reference).mean() loss_sc, loss_mag = self.multires_stft_loss(student_prediction, reference) loss_stft = loss_sc + loss_mag loss_full = loss_mae + loss_stft * self.stft_loss_coeff if self.stft_loss_coeff else loss_mae return loss_full, loss_mae, loss_stft
from screws.freeze.base import FrozenOnly class GPD_2SF(FrozenOnly): """""" def __init__(self, dof): """""" self._dof_ = dof self._freeze_self_() def __call__(self, *args, **kwargs): """"""
import numpy as np from pySDC.core.Errors import ParameterError, ProblemError from pySDC.core.Problem import ptype from pySDC.implementations.datatype_classes.mesh import mesh # noinspection PyUnusedLocal class nonlinear_ODE_1(ptype): """ Example implementing some simple nonlinear ODE with a singularity in the derivative, taken from https://www.osti.gov/servlets/purl/6111421 (Problem E-4) """ def __init__(self, problem_params, dtype_u=mesh, dtype_f=mesh): """ Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed parent class) dtype_f: mesh data type (will be passed parent class) """ # these parameters will be used later, so assert their existence essential_keys = ['u0', 'newton_maxiter', 'newton_tol'] for key in essential_keys: if key not in problem_params: msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys())) raise ParameterError(msg) problem_params['nvars'] = 1 if 'stop_at_nan' not in problem_params: problem_params['stop_at_nan'] = True # invoke super init, passing dtype_u and dtype_f, plus setting number of elements to 2 super(nonlinear_ODE_1, self).__init__((problem_params['nvars'], None, np.dtype('float64')), dtype_u, dtype_f, problem_params) def u_exact(self, t): """ Exact solution Args: t (float): current time Returns: dtype_u: mesh type containing the values """ me = self.dtype_u(self.init) me[:] = t - t ** 2 / 4 return me def eval_f(self, u, t): """ Routine to compute the RHS Args: u (dtype_u): the current values t (float): current time (not used here) Returns: dtype_f: RHS, 1 component """ f = self.dtype_f(self.init) f[:] = np.sqrt(1 - u) return f def solve_system(self, rhs, dt, u0, t): """ Simple Newton solver for the nonlinear equation Args: rhs (dtype_f): right-hand side for the nonlinear system dt (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution u """ # create new mesh object from u0 and set initial values for iteration u = self.dtype_u(u0) # start newton iteration n = 0 res = 99 while n < self.params.newton_maxiter: # form the function g with g(u) = 0 g = u - dt * np.sqrt(1 - u) - rhs # if g is close to 0, then we are done res = np.linalg.norm(g, np.inf) if res < self.params.newton_tol or np.isnan(res): break # assemble dg/du dg = 1 - (-dt) / (2 * np.sqrt(1 - u)) # newton update: u1 = u0 - g/dg u -= 1.0 / dg * g # increase iteration count n += 1 if np.isnan(res) and self.params.stop_at_nan: raise ProblemError('Newton got nan after %i iterations, aborting...' % n) elif np.isnan(res): self.logger.warning('Newton got nan after %i iterations...' % n) if n == self.params.newton_maxiter: raise ProblemError('Newton did not converge after %i iterations, error is %s' % (n, res)) return u
"""Fast CMS computation""" import os, logging, functools from functools import reduce #import blaze as bz import numpy as np import pandas as pd #from into import into from Operations.MiscUtil import Dict, dbg, AddFileSfx, MakeSeq, StatKeeper from Operations.tsvutils import DefineRulesTo_computeMeanStd, DefineRulesTo_normalizeOneColumn, \ computeMeanStd_binned_tsvs, normalizeInBins_tsv from Operations.Shari_Operations.localize import subs from Operations.Shari_Operations.localize.fstBySNP_Npops import fst_onePopPair from Operations.Shari_Operations.localize.CMS import CMSBins from Operations.bioutil import genomeBuild2genMapSfx def getFN_xpop_signif( sweepDir, chrom, pop1, pop2 ): """Return filename of xpop significance scores from Sweep output""" return os.path.join( sweepDir, 'analysis', 'chr%(chrom)s' % locals(), 'xpop_significance_%(pop1)s_%(pop2)s.tsv' % locals() ) def getFN_ihs_signif( sweepDir, chrom, pop ): """Return filename of xpop significance scores from Sweep output""" return os.path.join( sweepDir, 'analysis', 'chr%(chrom)s' % locals(), 'ihs_significance_%(pop)s.tsv' % locals() ) def gatherXPOPscores( pops, chrom, selPop, sweepDir, outFN, getio = None ): """Gather xpop scores into a convenient form.""" pops = [p for p in pops if p != selPop] pop2FN = dict([ ( pop, getFN_xpop_signif( pop1 = selPop, pop2 = pop, sweepDir = sweepDir, chrom = chrom ) ) for pop in pops ]) if getio: return dict( depends_on = list(pop2FN.values()), creates = outFN, attrs = Dict( 'chrom', pop = pops, piperun_short = True ) ) def LoadComparison( pop ): """Load comparison with one pop""" d0 = pd.read_csv( pop2FN[ pop ], sep = '\t', usecols = ( 'Pop 1', 'Pop 2', 'Chrom' ), nrows = 1 ) dbg( 'd0' ) assert str( d0.loc[ 0, 'Chrom' ] ) == str( chrom ) assert ( d0.loc[ 0, 'Pop 1'] == selPop and d0.loc[0, 'Pop 2'] == pop ) or ( d0.loc[0,'Pop 1'] == pop and d0.loc[0,'Pop 2'] == selPop ) flip = ( d0.loc[0,'Pop 1'] == pop ) d = pd.read_csv( pop2FN[ pop ], sep = '\t', usecols = ( 'SNP pos (bases)', 'L AllEHH logratio Deviation', 'R AllEHH logratio Deviation' ), index_col = 'SNP pos (bases)', na_values = ( '-', ) ) d.info() if flip: d[ 'L AllEHH logratio Deviation' ] *= -1 d[ 'R AllEHH logratio Deviation' ] *= -1 return pd.DataFrame.from_dict({ pop : d.max( axis = 1 ) }) # end: def LoadComparison( pop ) comparisons = reduce( lambda d1, d2: d1.join( d2, how = 'inner' ), list(map( LoadComparison, pops )) ).max( axis = 1, columns = ( 'max_xpop', ) ) comparisons.index.name = 'pos' comparisons.name = 'max_xpop' # print 'type of comparisons is', type(comparisons) # print comparisons comparisons.to_csv( outFN, sep = '\t' , header = True ) # end: def gatherXPOPscores( pops, chrom, selPop, sweepDir, outFN, getio = None ): def gather_snp_info( pops, pop2snpInfoFN, pop2ancFreqFN, pop2sampleSizeFN, getio = None ): """Gather SNP freq info""" if getio: return dict( depends_on = list(pop2snpInfoFN.values()), creates = ( pop2ancFreqFN, pop2sampleSizeFN ), attrs = dict( pop = pops ) ) pop2ancFreq = pd.DataFrame( data = dict([ ( pop, pd.read_csv( pop2snpInfoFN[ pop ], sep = '\t', usecols = ( 'SNP pos (bases)', 'Ancestral Freq' ), index_col = 'SNP pos (bases)' )[ 'Ancestral Freq' ] ) for pop in pops ]) ) pop2ancFreq.dropna( inplace = True ) pop2ancFreq.to_csv( pop2ancFreqFN, sep = '\t', index_label = 'pos' ) def getSampleSize( pop ): z = pd.read_csv( pop2snpInfoFN[ pop ], sep = '\t', usecols = ( 'A0count', 'A1count' ), nrows = 1 ) return z.at[ 0, 'A0count' ] + z.at[ 0, 'A1count' ] pop2sampleSize = pd.Series( dict([ ( pop, getSampleSize( pop ) ) for pop in pops ]), name = 'sampleSize' ) pop2sampleSize.to_csv( pop2sampleSizeFN, sep = '\t', header = True, index_label = 'pop' ) # end: def gather_snp_info( pops, pop2snpInfoFN, pop2ancFreqFN, pop2sampleSizeFN, getio = None ) def gather_iHS_scores( selPop, chrom, ihsFN, pop2ancFreqFN, ihsOutFN, dihhOutFN, getio = None ): """Gather iHS scores""" if getio: return dict( depends_on = ( ihsFN, pop2ancFreqFN ), creates = ( ihsOutFN, dihhOutFN ), attrs = Dict( 'chrom', pop = selPop, piperun_short = True ) ) d0 = pd.read_csv( ihsFN, sep = '\t', usecols = ( 'Population', 'Chrom' ), nrows = 1 ) dbg( 'd0' ) assert str( d0.loc[ 0, 'Chrom' ] ) == str( chrom ) assert d0.loc[ 0, 'Population'] == selPop d = pd.read_csv( ihsFN, sep = '\t', usecols = ( 'SNP pos (bases)', 'Ancestral Freq', 'Both iHS', 'Both iHH_D', 'Both iHH_A' ), index_col = 'SNP pos (bases)', na_values = ( '-', ) ) d.index.name = 'pos' pop2ancFreq = pd.read_table( pop2ancFreqFN, index_col = 'pos', usecols = ( 'pos', selPop, ) ) # snp2ancFreq = pd.read_table( snpInfoFN, index_col = 'SNP pos (bases)', # usecols = ( 'SNP pos (bases)', 'Ancestral Freq' ) ) # snp2ancFreq.dropna( inplace = True ) # dbg( 'len(pop2ancFreq) len(snp2ancFreq) pop2ancFreq.index.difference(snp2ancFreq.index)' ) # dbg( 'len(pop2ancFreq) len(snp2ancFreq) snp2ancFreq.index.difference(pop2ancFreq.index)' ) # dbg( 'np.all(pop2ancFreq.index.values==snp2ancFreq.index.values)' ) # dbg( 'np.sum(pop2ancFreq.index.values==snp2ancFreq.index.values)' ) # dbg( 'len(pop2ancFreq.index.values) len(snp2ancFreq.index.values)' ) # pop2ancFreq.index.name = 'pos' # dbg( '3 pop2ancFreq selPop pop2ancFreq.columns' ) pop2ancFreq.rename( columns = { selPop : selPop + '_ancFreq' }, inplace = True ) # dbg( '4 pop2ancFreq' ) # print "ii:", pop2ancFreq.info() # pop2ancFreq.to_csv( 'pf.tsv', sep = '\t', header = True, na_rep = 'NaN' ) # dbg( '1 d' ) d = d.join( pop2ancFreq, how = 'right', sort = True ) # dbg( '2 d' ) # af1 = d['Ancestral Freq'] af2 = d[selPop + '_ancFreq'] # dbg( '"GGGGGGGGGG" (af1-af2).max() (af1.isnull()==af2.isnull()).all()' ) d_iHS = pd.DataFrame( data = dict( iHS = d[ 'Both iHS' ] ) ) d_iHS.to_csv( ihsOutFN, sep = '\t', header = True, na_rep = 'NaN' ) # dihh = subs.normalizeByFreq( rawVals = ( d[ 'Both iHH_D' ] - d[ 'Both iHH_A' ] ).values, # ancfreq = 1.0 - af2.values ) d_iHH = pd.DataFrame( data = dict( iHHDiff = d[ 'Both iHH_D' ] - d[ 'Both iHH_A' ], normingFreqs = 1.0 - af2 ) ) d_iHH.to_csv( dihhOutFN, sep = '\t', header = True, na_rep = 'NaN' ) # end: def gather_iHS_scores( selPop, chrom, ihsFN, snpInfoFN, ihsOutFN, dihhOutFN, getio = None ): def computeMeanFstAndFreqDiffScores( pops, chrom, selPop, sweepDir, pop2ancFreqFN, pop2sampleSizeFN, outMeanFstFN, outFreqDiffFN, getio = None ): """Compute meanFst and freqDiff scores""" if selPop not in pops: pops = tuple( MakeSeq( pops ) ) + ( selPop, ) cmpPops = [ pop for pop in pops if pop != selPop ] if getio: return dict( depends_on = ( pop2ancFreqFN, pop2sampleSizeFN ), creates = ( outMeanFstFN, outFreqDiffFN ), attrs = Dict( 'chrom', pop = pops ) ) # pop2ancFreq.to_csv( 'befdrop.tsv', sep = '\t' ) # pop2ancFreq.fillna( value = 1.0, inplace = True ) # pop2ancFreq.to_csv( 'aftdrop.tsv', sep = '\t' ) pop2ancFreq = pd.read_table( pop2ancFreqFN, index_col = 'pos' ) pop2sampleSize = pd.read_table( pop2sampleSizeFN, index_col = 'pop' ).sampleSize dbg( 'pop2sampleSize' ) #pop2snpInfo.to_csv( 'test.tsv', sep = '\t', header = True ) derFreq = 1.0 - pop2ancFreq[ selPop ] cmpAncFreqs = pop2ancFreq[ [ pop for pop in pops if pop != selPop ] ] meanAnc = cmpAncFreqs.mean( axis = 1 ) freqDiff = derFreq - ( 1.0 - meanAnc ) freqDiff.name = 'freqDiff' freqDiff.to_csv( outFreqDiffFN, sep = '\t', header = True ) # compute meanFst # dbg( '"vvvvvvvvvvvw" selPop pop2ancFreq[selPop] pop2ancFreq["JPT+CHB"] pop2ancFreq["YRI"]' ) # dbg( 'selPop pop2sampleSize[selPop] pop2sampleSize["JPT+CHB"] pop2sampleSize["YRI"]' ) d = dict([ ( pop, fst_onePopPair( ancFreqs = np.array( ( pop2ancFreq[ selPop ], pop2ancFreq[ pop ] ) ), sampleSizes = ( pop2sampleSize[ selPop ], pop2sampleSize[ pop ] ) ) ) for pop in cmpPops ]) fstVals = pd.DataFrame( data = d, index = pop2ancFreq.index ) # spc = fst_onePopPair( ancFreqs = np.array( ( pop2ancFreq[ 'BEB' ], pop2ancFreq[ 'ASN' ] ) ), # sampleSizes = ( pop2sampleSize[ 'BEB' ], pop2sampleSize[ 'ASN' ] ) ) # dbg( '"ddddddddddd" fstVals.loc[526736] spc' ) # dbg( 'fstVals' ) fstVals.fillna( value = 0.0, inplace = True ) #fstVals.to_csv( 'fstvals.tsv', sep = '\t', header = True, na_rep = 'NaN' ) fstMean = fstVals.mean( axis = 1 ) dbg( 'fstVals fstMean' ) fstMean.name = 'meanFst' fstMean.to_csv( outMeanFstFN, sep = '\t', header = True, na_rep = 'NaN' ) # end: def computeMeanFstAndFreqDiffScores( pops, chrom, selPop, sweepDir, outMeanFstFN, outFreqDiffFN, getio = None ) def computeLikeRatioForStat_do( statVals, hitLikes, missLikes, bins ): """Compute likes ratio""" # Precompute the likelihood ratio corresponding to each bin dbg( 'statVals hitLikes missLikes bins' ) indNaN = hitLikes != 1e-10 missingVal = np.log( np.min( hitLikes[indNaN] / missLikes[indNaN] ) ) CLR = [ ( np.log( hitLike / missLike ) if hitLike != 1e-10 else missingVal ) if hitLike != 0.0 else np.nan for hitLike, missLike in zip( hitLikes, missLikes ) ] CLR = np.array( [ CLR[0] ] + CLR + [ CLR[-1] ] ) binIds = np.digitize( statVals.values, bins ) st_binSize = ( bins[1] - bins[0] ) st_nbins = len( bins ) -1 binIds2 = np.where( np.isfinite( statVals.values ), np.clip( ( ( statVals.values - bins[0] ) / st_binSize ).astype( np.int16 ), 0, st_nbins-1 ), len( hitLikes ) ) + 1 return np.where( np.isnan( statVals.values ), np.repeat( np.nan, len( statVals ) ), CLR[ binIds ] ), binIds, binIds2 # end: def computeLikeRatioForStat_do( statVals, hitLikes, missLikes, bins ) def computeLikeRatioForStat( stat, statValsFN, hitLikesFN, missLikesFN, stat_start, stat_end, stat_nbin, statLikesRatioFN, getio = None ): """Compute likes for one stat""" if getio: return dict( depends_on = ( statValsFN, hitLikesFN, missLikesFN ), creates = statLikesRatioFN, uses = computeLikeRatioForStat_do ) statVals = pd.read_table( statValsFN ) hitLikes = pd.read_table( hitLikesFN )[ stat ] missLikes = pd.read_table( missLikesFN )[ stat ] bins = np.linspace( stat_start, stat_end, stat_nbin+1 ) statLikeRatio, statBinIds, statBinIds2 = computeLikeRatioForStat_do( statVals = statVals[ stat ], **Dict( 'hitLikes missLikes bins' ) ) statVals[ stat + 'likeRatio' ] = statLikeRatio statVals[ stat + 'Bin' ] = statBinIds statVals[ stat + 'Bin2' ] = statBinIds2 statVals.to_csv( statLikesRatioFN, sep = '\t', columns = ( 'pos', stat, stat + 'likeRatio', stat + 'Bin', stat + 'Bin2' ), index = False, na_rep = 'NaN' ) # end: def computeLikeRatioForStat ## Bins values by frequencies and then normalizes within bins def normalizeByFreq_getMeanStd(rawVals,ancfreq,stdKeeper,meanKeepers): Frequency = np.arange(0.05, 1.05, 0.05) der_freq = 1 - ancfreq stdKeeper.addVals(rawVals) for i in range(len(Frequency)): idx = ((Frequency[i] - der_freq) < .05 ) & ( Frequency[i] - der_freq > 0) # dbg( 'i Frequency[i] np.sum(idx)' ) meanKeepers[i].addVals( rawVals[ idx ] ) def normalizeByFreq_getMeanStd_tsv(iHHDiffFNs, globalStatFN, binsStatFN, getio = None): """Compute mean and stddev for normalizing ihhdiff within freqs""" if getio: return dict( depends_on = iHHDiffFNs, creates = ( globalStatFN, binsStatFN ) ) stdKeeper = StatKeeper() meanKeepers = [ StatKeeper() for i in range( 20 ) ] for f in iHHDiffFNs: d = pd.read_table( f ) normalizeByFreq_getMeanStd( d.iHHDiff.values, 1.0 - ( 1.0 - d.normingFreqs.values ), stdKeeper, meanKeepers ) # dbg( '"ZZZZZZ" iHHDiffFNs stdKeeper.getStd map(StatKeeper.getMean,meanKeepers) map(StatKeeper.getCount,meanKeepers)' ) pd.DataFrame( dict( std = ( stdKeeper.getStd(), ) ) ).to_csv( globalStatFN, sep = '\t', na_rep = 'NaN', header = True, index = False ) pd.DataFrame( dict( mean = list(map( StatKeeper.getMean, meanKeepers )) ) ).to_csv( binsStatFN, sep = '\t', na_rep = 'NaN', header = True, index_label = 'binId' ) def normalizeByFreq_compute_normed(rawVals,ancfreq, StdDev, expectation): Frequency = np.arange(0.05, 1.05, 0.05) #print Frequency der_freq = 1 - ancfreq normVal = np.repeat( np.nan, len( rawVals ) ) # Bookkeeping dbg( 'StdDev' ) dbg( 'der_freq' ) for i in range(len(Frequency)): idx = ((Frequency[i] - der_freq) < .05) & ( (Frequency[i] - der_freq) >= 0 ) & np.isfinite( rawVals ) normVal[ idx ] = (rawVals[ idx ] - expectation[i])/StdDev # dbg( '"KKKKK" i Frequency[i] expectation[i] idx.nonzero() rawVals[idx] normVal[idx]' ) return normVal def normalizeByFreq_compute_normed_tsv(iHHDiffFN, globalStatFN, binsStatFN, StdDiffFN, getio = None): """Computed normed iHHDiff""" if getio: return dict( depends_on = ( iHHDiffFN, globalStatFN, binsStatFN ), creates = StdDiffFN ) d = pd.read_table( iHHDiffFN ) gstat = pd.read_table( globalStatFN ) binsStat = pd.read_table( binsStatFN ) normVal = normalizeByFreq_compute_normed( rawVals = d.iHHDiff.values, ancfreq = d.normingFreqs.values, StdDev = gstat[ 'std' ].iloc[0], expectation = binsStat[ 'mean' ].values ) d[ 'StdDiff' ] = normVal d.to_csv( StdDiffFN, sep = '\t', header = True, index = False, na_rep = 'NaN' ) def computeMeanStd( inFNs, colName, outFN, getio = None ): """Compute mean and std using blaze""" if getio: return dict( depends_on = inFNs, creates = outFN ) filenames = inFNs dbg( 'inFNs' ) sk = StatKeeper() for f in filenames: dbg( 'f' ) d = pd.read_table( f ) dbg( 'f len(d)' ) sk.addVals( d[ colName ].values ) pd.DataFrame( dict( stat = 'mean std count numNaNs'.split(), val = ( sk.getMean(), sk.getStd(), sk.getCount(), sk.getNumNaNs() ) ) ).to_csv( outFN, sep = '\t', index = False, na_rep = 'NaN' ) def addLikesRatios( inFNs, colNames, outFN, getio = None ): """Add up likes ratios""" if getio: return dict( depends_on = inFNs, creates = outFN ) result = None for fn, colName in zip( inFNs, colNames ): d = pd.read_table( fn, index_col = 0 ).dropna()[ colName ] d.name = 'likesRatio' if result is None: result = d else: result += d result.to_csv( outFN, header = True, na_rep = 'NaN', sep = '\t' ) # data = bz.chunks(bz.CSV)([bz.CSV(fn) for fn in filenames]) # d = bz.Data(data) # into( outFN, pd.DataFrame( dict( mean = d[ colName ].mean(), std = d[ colName ].std() ) ) ) def joinStats( snpInfoFN, statLikesFNs, likesRatioFN, outFN, getio = None ): """Join stats into one file""" if getio: return dict( depends_on = ( snpInfoFN, likesRatioFN ) + tuple( MakeSeq( statLikesFNs ) ), creates = outFN ) snpInfo = pd.read_table( snpInfoFN, index_col = 'SNP pos (bases)' ) snpInfo.index.rename( 'pos', inplace = True ) statLikes = [ pd.read_table( statLikeFN, index_col = 'pos' ) for statLikeFN in statLikesFNs ] likesRatio = pd.read_table( likesRatioFN, index_col = 'pos' ) result = snpInfo.join( statLikes + [ likesRatio ], how = 'outer' ) result.info() dbg( 'result.describe()' ) result.to_csv( outFN, sep = '\t', na_rep = 'NaN', header = True ) # end: def joinStats( snpInfoFN, stats, statFNs, statLikesFNs, outFN, getio = None ) def DefineRulesTo_fastCMS( pr, pops, chroms, selPop, sweepDir, cmsDir, genomeBuild = 'hg19' ): """Define rules to do fast CMS computation. Params: pr - the PipeRun object to which to add rules selPop - testing selection in which pop? pops - comparing selPop to which pops? sweepDir - the sweep directory cmsDir - the directory under which CMS stats go """ pops = list( MakeSeq( pops ) ) if selPop not in pops: pops.append( selPop ) allPops = tuple( MakeSeq( pops ) ) if selPop not in allPops: allPops += ( selPop, ) cmpPops = [ pop for pop in allPops if pop != selPop ] rawScoresFN = {} genMapSfx = genomeBuild2genMapSfx[ genomeBuild ] for pop in allPops: for chrom in chroms: with pr.settingAttrs( 'pop chrom' ): snpInfoFN = os.path.join( sweepDir, 'analysis/chr%(chrom)s/snps_%(pop)s.tsv' % locals() ) projDir = os.path.join( sweepDir, 'data/chr%(chrom)s' % locals() ) ancestralImportedFN = os.path.join( projDir, 'ancestral.tsv.imported' ) genotypesImportedFN = os.path.join( projDir, 'genotypes_chr%(chrom)s_%(pop)s_r21_nr_fwd_phased_all.imported' % locals() ) genMapImportedFN = os.path.join( projDir, 'genetic_map_chr%(chrom)s_%(genMapSfx)s.txt.imported' % locals() ) pr.addRule( name = 'extractSnpInfo', commands = 'java -classpath ../Other/Ilya_Other/sweep/sweepsrc/sweep.jar edu.mit.broad.sweep.Main ExtractAlleleFreqs %(projDir)s/project %(snpInfoFN)s %(pop)s %(chrom)s' % locals(), commandsOld = 'java -classpath ../Other/Ilya_Other/sweep/sweepsrc/sweep/target/sweep-1.0-SNAPSHOT-jar-with-dependencies.jar edu.mit.broad.sweep.Main ExtractAlleleFreqs %(projDir)s/project %(snpInfoFN)s %(pop)s %(chrom)s' % locals(), depends_on = ( ancestralImportedFN, genotypesImportedFN, genMapImportedFN ), creates = snpInfoFN ) chr2dihhFN = {} for chrom in chroms: with pr.settingAttrs( 'chrom' ): chrom_s = 'chr' + str( chrom ) chromDir = os.path.join( cmsDir, chrom_s ) xpopScoresFN = os.path.join( chromDir, AddFileSfx( 'max_xpop.tsv', chrom_s, selPop, pops ) ) pr.addInvokeRule( invokeFn = gatherXPOPscores, invokeArgs = Dict( 'pops chrom selPop sweepDir', outFN = xpopScoresFN ), attrs = dict( pop = allPops, stat = 'max_xpop', piperun_short = True ) ) ihsFN = getFN_ihs_signif( **Dict( 'sweepDir chrom', pop = selPop ) ) ihsScoresFN = os.path.join( chromDir, AddFileSfx( 'iHS.tsv', chrom_s, selPop, pops ) ) dihhScoresFN = os.path.join( chromDir, AddFileSfx( 'dihh.tsv', chrom_s, selPop, pops ) ) chr2dihhFN[ chrom ] = dihhScoresFN pop2ancFreqFN = os.path.join( cmsDir, chrom_s, AddFileSfx( 'pop2ancFreq.tsv', chrom_s, pops ) ) pop2sampleSizeFN = os.path.join( cmsDir, chrom_s, AddFileSfx( 'pop2sampleSize.tsv', chrom_s, pops ) ) pop2snpInfoFN = dict([ ( pop, os.path.join( sweepDir, 'analysis', chrom_s, 'snps_%(pop)s.tsv' % locals() ) ) for pop in pops ]) pr.addInvokeRule( invokeFn = gather_snp_info, invokeArgs = Dict( 'pops pop2snpInfoFN pop2ancFreqFN pop2sampleSizeFN' ) ) pr.addInvokeRule( invokeFn = gather_iHS_scores, invokeArgs = Dict( 'chrom selPop ihsFN pop2ancFreqFN', # snpInfoFN = pop2snpInfoFN[ selPop ], ihsOutFN = ihsScoresFN, dihhOutFN = dihhScoresFN ), attrs = dict( pop = selPop, stat = ( 'iHS', 'StdDiff' ), piperun_short = True ) ) freqDiffScoresFN = os.path.join( chromDir, AddFileSfx( 'freqDiff.tsv', chrom_s, selPop, pops ) ) meanFstScoresFN = os.path.join( chromDir, AddFileSfx( 'meanFst.tsv', chrom_s, selPop, pops ) ) pr.addInvokeRule( invokeFn = computeMeanFstAndFreqDiffScores, invokeArgs = Dict( 'chrom selPop sweepDir pops pop2ancFreqFN pop2sampleSizeFN', outMeanFstFN = meanFstScoresFN, outFreqDiffFN = freqDiffScoresFN ), attrs = dict( pop = allPops, stat = ( 'freqDiff', 'meanFst' ), piperun_short = True ) ) StdDiffScoresFN = os.path.join( chromDir, AddFileSfx( 'StdDiff.tsv', chrom_s, selPop, pops ) ) rawScoresFN[ chrom ] = dict( iHS = ihsScoresFN, StdDiff = StdDiffScoresFN, meanFst = meanFstScoresFN, freqDiff = freqDiffScoresFN, max_xpop = xpopScoresFN ) # end: with pr.settingAttrs( 'chrom' ) # end: for chrom in chroms # ihhStdFN = os.path.join( cmsDir, 'dihhstd.tsv' ) dihhGlobalStdFN = os.path.join( cmsDir, AddFileSfx( 'dihh_global_std.tsv', selPop, pops ) ) dihhBinMeansFN = os.path.join( cmsDir, AddFileSfx( 'dihh_bin_means.tsv', selPop, pops ) ) pr.addInvokeRule( invokeFn = normalizeByFreq_getMeanStd_tsv, invokeArgs = dict( iHHDiffFNs = [ chr2dihhFN[k] for k in chroms ], globalStatFN = dihhGlobalStdFN, binsStatFN = dihhBinMeansFN ), name = 'compute_dihh_meanstd' ) # pr.addInvokeRule( invokeFn = computeMeanStd_binned_tsvs, # invokeArgs = dict( inFNs = chr2dihhFN.values(), valCol = 'iHHDiff', # binCol = 'normingFreqs', binMin = 0.05, binMax = 1.05, binStep = .05, # outFN = ihhStdFN ), # name = 'compute_dihh_std' ) for chrom in chroms: with pr.settingAttrs( 'chrom' ): chrom_s = 'chr' + str( chrom ) chromDir = os.path.join( cmsDir, chrom_s ) StdDiffScoresFN = os.path.join( chromDir, AddFileSfx( 'StdDiff.tsv', chrom_s, selPop, pops ) ) dbg( 'chrom chr2dihhFN[chrom]' ) pr.addInvokeRule( invokeFn = normalizeByFreq_compute_normed_tsv, invokeArgs = dict( iHHDiffFN = chr2dihhFN[ chrom ], globalStatFN = dihhGlobalStdFN, binsStatFN = dihhBinMeansFN, StdDiffFN = StdDiffScoresFN ) ) statFNs = {} statLikesRatioFNs = {} for stat in CMSBins.CMSstats: with pr.settingAttrs( stat = stat, pop = ( selPop, ) if stat in ( 'iHS', 'StdDiff' ) else allPops, piperun_short = True ): if stat not in CMSBins.nonNormedStats: rawFNs = [ rawScoresFN[ chrom ][ stat ] for chrom in chroms ] meanStdFN = os.path.join( cmsDir, AddFileSfx( 'meanStd.tsv', stat, selPop, pops ) ) # DefineRulesTo_computeMeanStd( pr, inFNs = rawFNs, colNum = 1, # outFN = meanStdFN, # addRuleArgs = \ # dict( name = 'computeMeanStd_for_stat', # attrs = dict( chrom = chroms ) ) ) # meanStdBzFN = os.path.join( cmsDir, stat + '_meanStdForStat.tsv' ) pr.addInvokeRule( invokeFn = computeMeanStd, invokeArgs = dict( inFNs = rawFNs, colName = stat, outFN = meanStdFN ) ) # end: if stat not in CMSBins.nonNormedStats for chrom in chroms: with pr.settingAttrs( 'chrom' ): statFN = rawScoresFN[ chrom ][ stat ] if stat not in CMSBins.nonNormedStats: normedFN = AddFileSfx( statFN, 'normed' ) DefineRulesTo_normalizeOneColumn( pr, inFN = statFN, meanStdFN = meanStdFN, colName = stat, outFN = normedFN, addRuleArgs = dict( attrs = Dict( 'chrom' ) ) ) statFN = normedFN bins_beg = CMSBins.stat_start[ stat ] bins_end = CMSBins.stat_end[ stat ] bins_n = CMSBins.stat_nbin[ stat ] statFNs[ ( chrom, stat ) ] = statFN statLikesRatioFN = AddFileSfx( rawScoresFN[ chrom ][ stat ], 'likesRatio' ) statLikesRatioFNs[ ( chrom, stat ) ] = statLikesRatioFN pr.addInvokeRule( invokeFn = computeLikeRatioForStat, invokeArgs = dict( stat = stat, statValsFN = statFN, hitLikesFN = '../Data/Common_Data/sim/likes/hitsLikes_toneutFixed_1.tsv', missLikesFN = '../Data/Common_Data/sim/likes/missLikes_toneutFixed_1.tsv', stat_start = bins_beg, stat_end = bins_end, stat_nbin = bins_n, statLikesRatioFN = statLikesRatioFN ) ) # end: with pr.settingAttrs( 'chrom' ) # end: for chrom in chroms # end: with pr.settingAttrs( stat = stat, piperun_short = True ) # end: for stat in CMSBins.CMSstats for chrom in chroms: with pr.settingAttrs( chrom = chrom, stat = CMSBins.CMSstats ): chrom_s = 'chr' + str( chrom ) chromDir = os.path.join( cmsDir, chrom_s ) likesRatioFN = os.path.join( chromDir, AddFileSfx( 'likesRatio.tsv', CMSBins.CMSstats, selPop, pops ) ) pr.addInvokeRule( invokeFn = addLikesRatios, invokeArgs = dict( inFNs = [ statLikesRatioFNs[ ( chrom, stat ) ] for stat in CMSBins.CMSstats ], colNames = [ colName + 'likeRatio' for colName in CMSBins.CMSstats ], outFN = likesRatioFN ) ) joinStatsFN = os.path.join( chromDir, AddFileSfx( 'joinStats.tsv', CMSBins.CMSstats, selPop, pops ) ) snpInfoFN = os.path.join( sweepDir, 'analysis/chr%(chrom)s/snps_%(selPop)s.tsv' % locals() ) pr.addInvokeRule( invokeFn = joinStats, invokeArgs = dict( snpInfoFN = snpInfoFN, statLikesFNs = [ statLikesRatioFNs[ ( chrom, stat ) ] for stat in CMSBins.CMSstats ], likesRatioFN = likesRatioFN, outFN = joinStatsFN ), attrs = dict( stat = CMSBins.CMSstats, chrom = chrom ) ) # end: def DefineRulesTo_fastCMS( pr, pops, chroms, selPop, popSampleSizesFN, sweepDir, cmsDir ) if __name__ == '__main__': # gatherXPOPscores( pops = ( 'ASN', 'BEB', 'YRI' ), selPop = 'CEU', chrom = 22, sweepDir = '/idi/sabeti-scratch/ilya/gsvn/Data/Elinor_Data/BNG_CMS_1KG_P3_gw/sweepanalysis', outFN = 'cmp.tsv' ) computeMeanFstAndFreqDiffScores( pops = ( 'CEU', 'ASN', 'BEB', 'YRI' ), selPop = 'CEU', chrom = 22, sweepDir = '/idi/sabeti-scratch/ilya/gsvn/Data/Elinor_Data/BNG_CMS_1KG_P3_gw/sweepanalysis', outMeanFstFN = 'cmp.tsv', outFreqDiffFN = 'cmp2.tsv' )
import numpy as np import util class NeuralNetwork: # hyperameters learning_rate = 0.0001 l0, l1, l2, l3 = 0, 0, 0, 0 isShuffle = True # shuffle flag: for shuffling data while training the network isValidate = True # validation flag: for viewing validation results while training the network X_val = np.array([]) y_val = np.array([]) def __init__(self, layers_size): """ this is network cunstruct method, here we intializing the weights and biases Parameters: layers_size : list (list integers, each integer coresponds to no.of nodes in each layer) Returns: (no-returns) """ self.l0 = layers_size[0] self.l1 = layers_size[1] self.l2 = layers_size[2] self.l3 = layers_size[3] self.weights = {'l1': np.random.randn(self.l1, self.l0)/np.sqrt(self.l1), 'l2': np.random.randn(self.l2, self.l1)/np.sqrt(self.l2), 'l3': np.random.randn(self.l3, self.l2)/np.sqrt(self.l3) } self.biases = { 'l1': np.random.randn(1, self.l1), 'l2': np.random.randn(1, self.l2), 'l3': np.random.randn(1, self.l3) } # for viewing network structure in console print("\n[network]: Intializing network with ...\n",104*"-") print("#nodes: layer0[input]:",self.l0," \t| layer1: ",self.l1, " \t| layer2: ",self.l2, " \t\t| layer3[out]: ",self.l3) print("weights: \t\t","- \t| layer1: ",self.weights['l1'].shape," \t| layer2: ",self.weights['l2'].shape," \t| layer3[out]: ",self.weights['l3'].shape) print("biases: \t\t","- \t| layer1: ",self.biases['l1'].shape, " \t| layer2: ",self.biases['l2'].shape, " \t| layer3[out]: ",self.biases['l3'].shape) print(105*"-","\n") def fit(self, X, y, ephocs): """ this method is an implementation training network by using Stocastic-gradient-decent (SGD), it was done by these steps 1. forward propagation -->|-->|-->|--> 2. backward propagation <--|<--|<--|<-- 3. updating weights and biases -+-+-+-+-+-+-+- Parameters: X : numpy array (contains all features) y : numpy array (contains all targets) Returns: (no-returns) """ print("[network]: Training network on ",ephocs," ephocs.") if(self.validate): self.X_val = X[40001:] # out of 42000 samples we taking 2000 (X and y) self.y_val = y[40001:] # for validation X = X[:40000] # updating training set y = y[:40000] # so that we can't see test sapmples in validation set batch_size = X.shape[0] for ephoch in range(ephocs): total_error = [] # shuffling if(self.isShuffle): util.shuffle(X, y) for i in range(batch_size): # feed-forward zs, activations = self.__forward__(X[i]) activations.append(y[i]) # back-forward error, new_weights, new_biases = self.__backward__(zs, activations, y[i]) total_error.append(np.mean(np.abs(error))) # weights & biases update self.weights['l3'] -= self.learning_rate * new_weights[0] self.weights['l2'] -= self.learning_rate * new_weights[1] self.weights['l1'] -= self.learning_rate * new_weights[2] self.biases['l3'] -= self.learning_rate * new_biases[0] self.biases['l2'] -= self.learning_rate * new_biases[1] self.biases['l1'] -= self.learning_rate * new_biases[2] if(self.isValidate): print("\t ephoch ",(ephoch+1),"\t...\t train_loss:", round((sum(total_error)/batch_size)*100,2),"%", "\t val_loss:", round(self.validate(self.X_val, self.y_val)*100,2),"%") else: print("\t ephoch ",(ephoch+1),"\t...\t train_loss:",round((sum(total_error)/batch_size)*100,2),"%") print("\n") def __forward__(self, x): """ this method is an implementation of forward propagation with one sample at a time. Parameters: x : numpy array (contains one sample of features) Returns: zs : list (contains numpy arrays, each array coresponds to sum(xW+b) of respective layer) activations: list (contains numpy arrays, each array coresponds to output of respective layer) """ # demo shapes l0 = x.T # [1, 784] z1 = np.dot(l0, self.weights['l1'].T) + self.biases['l1'] # [1, 300] = [1, 784] .* [784, 300] + [1, 300] l1 = util.relu(z1) # [1, 300] z2 = np.dot(l1, self.weights['l2'].T) + self.biases['l2'] # [1, 90] = [1, 300] .* [300, 90] + [1, 90] l2 = util.relu(z2) # [1, 90] z3 = np.dot(l2, self.weights['l3'].T) + self.biases['l3'] # [1, 10] = [1, 90] .* [90, 10] + [1, 10] l3 = util.softmax(z3) # [1, 10] zs = [z1, z2, z3] activations = [l0, l1, l2, l3] return zs, activations def __backward__(self, zs, activations, y): """ this method is an implementation of backpropagation with one sample at a time. Parameters: zs : list (contains numpy arrays, each array coresponds to sum(xW+b) of respective layer) activations: list (contains numpy arrays, each array coresponds to output of respective layer) y : numpy array (contains one sample of target values) Returns: l3_error : numpy array (contains error of last layer 3 (or) network error) new_weights: numpy array (contains numpy arrays, each array coresponds to new weights of respective layer) new_biases : numpy array (contains numpy arrays, each array coresponds to new biases of respective layer) """ l0, l1, l2, l3 = activations[0], activations[1], activations[2], activations[3] z1, z2 = zs[0], zs[1] # calculating loss of network (or) layer 3 # demo shapes l3_error = l3 - y.T # [1, 10] = [1, 10] - [1, 10] # calculating layer3 weights and biases l3_new_biases = l3_error # [1, 10] = [1, 10] * [1, 10] l3_new_weights = l3_error.T.dot(l2) # [10, 90] = [10, 1] * [1,90] # calculating layer2 weights and biases l2_error = l3_error.dot(self.weights['l3']) # [1, 90] = [1, 10] * [10, 90] l2_error = np.multiply(l2_error, util.relu(z2, derivative=True)) # [1, 90] = [1, 90] * [1,90] l2_new_biases = l2_error l2_new_weights = l2_error.T.dot(l1) # [90, 300] = [90, 1] * [1, 300] # calculating layer1 weights and biases l1_error = l2_error.dot(self.weights['l2']) # [1, 300] = [1, 90] * [90, 300] l1_error = np.multiply(l1_error, util.relu(z1, derivative=True)) # [1, 300] = [1, 300] * [1, 300] l1_new_biases = l1_error l1_new_weights = l1_error.T.dot(l0) # [300, 784] = [300, 1] * [1, 784] new_weights = [l3_new_weights, l2_new_weights, l1_new_weights] new_biases = [l3_new_biases, l2_new_biases, l1_new_biases] return l3_error, new_weights, new_biases def predict(self, x, show=False): """ this method is resposible to predict the digit by using network weights and biases. Parameters: x : numpy array (contains one sample of input features) show: boolean (boolean flag for displaying given sample 'x' into the screen) Returns: integer (integer is an network predicted digit number) """ _ , activations = self.__forward__(x) if(show): util.view_digit(x.T, str(np.argmax(activations[-1]))) return np.argmax(activations[-1]) def validate(self, x, y): """ this method is resposible to validate the network performance by using validation data. Parameters: x : numpy array (contains validate features) y : numpy array (contains validate targets) Returns: integer (integer is an validation loss (or) how many samples network predicted currectly in test data) """ val_score = [] for i in range(x.shape[0]): _ , activations = self.__forward__(x[i]) val_score.append(np.mean(np.abs(y[i].T - activations[-1]))) return sum(val_score)/x.shape[0]
def getAvaliableCows(cows, c): minEnd = float("inf") maxIndex = -1 start = False for i in range(0, len(cows)): if cows[i][0] <= c and cows[i][1] >= c: if not start: start = True # avaliable cow if cows[i][1] < minEnd: maxIndex = i minEnd = cows[i][1] else: if start: break return maxIndex def getNumOfCross(chickens: list, cows: list) -> int: cows = sorted(cows) chickens = sorted(chickens) count = 0 for c in chickens: pair = getAvaliableCows(cows, c) if pair != -1: del cows[pair] count += 1 return count def main(inputFile, outputFile): helpcrossInput = open(inputFile, 'r') helpcrossOutput = open(outputFile, 'w') C, N = helpcrossInput.readline().strip().split() C, N = int(C), int(N) chickens = [] cows = [] for _ in range(C): chickens.append(int(helpcrossInput.readline().strip())) for _ in range(N): line = helpcrossInput.readline().strip().split() cows.append([int(line[0]), int(line[1])]) helpcrossOutput.write(str(getNumOfCross(chickens, cows)) + '\n') helpcrossInput.close() helpcrossOutput.close() main('helpcross.in', 'helpcross.out')
class ComponentLanguage: PYTHON = "Python" R = "R" JAVA = "Java" JUPYTER = "Jupyter"
import numpy from amuse.units import units from amuse.units.quantities import is_quantity, value_in, to_quantity from amuse.datamodel import UnstructuredGrid, StructuredGrid,StructuredBaseGrid try: import matplotlib from matplotlib import tri if not hasattr(tri, "LinearTriInterpolator"): raise Exception("LinearTriInterpolator not in matplotlib.tri") matplotlib_available=True except: matplotlib_available=False class interpolating_2D_remapper(object): def __init__(self, source, target,axes_names=None): """ this class maps a source grid to a target grid using linear interpolation on a triangulation generated by adding a midpoint to every cell (source should be a structured grid) and thus generating 4 triangles for each cell. Values of the midpoints are averaged from the corners. """ if len(source.shape) !=2: raise Exception("source grid is not 2D") if not isinstance(source, StructuredBaseGrid): raise Exception("source grid is not instance of StructuredBaseGrid") self.source=source self.target=target self._axes_names=list(axes_names or source.get_axes_names()) self.generate_triangulation() def _generate_nodes(self,grid,attributes): Nx,Ny=grid.shape x,y=numpy.mgrid[0:Nx,0:Ny] x1,y1=numpy.mgrid[0:Nx-1,0:Ny-1] x_=x.flatten() y_=y.flatten() x1_=x1.flatten() y1_=y1.flatten() l1=Nx*Ny i=numpy.arange(Nx*Ny).reshape((Nx,Ny)) i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1)) nodes=UnstructuredGrid(len(x_)+len(x1_)) for name in attributes: values1=getattr(grid,name)[x_,y_] values2=getattr(grid,name)[x1_,y1_]+getattr(grid,name)[x1_+1,y1_]+\ getattr(grid,name)[x1_,y1_+1]+getattr(grid,name)[x1_+1,y1_+1] setattr(nodes[0], name, 0.*values1[0]) setattr(nodes[:l1], name, 1.*values1) setattr(nodes[l1:], name, values2/4) return nodes def _generate_elements_and_boundaries(self,grid): Nx,Ny=grid.shape l1=Nx*Ny i=numpy.arange(Nx*Ny).reshape((Nx,Ny)) i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1)) e1=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i') e2=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i') e3=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i') e4=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i') e1[:,0]=i[:-1,:-1].flatten() e1[:,1]=i[1:,:-1].flatten() e1[:,2]=i1[:,:].flatten() e2[:,0]=i[1:,:-1].flatten() e2[:,1]=i[1:,1:].flatten() e2[:,2]=i1[:,:].flatten() e3[:,0]=i[1:,1:].flatten() e3[:,1]=i[:-1,1:].flatten() e3[:,2]=i1[:,:].flatten() e4[:,0]=i[:-1,:-1].flatten() e4[:,1]=i1[:,:].flatten() e4[:,2]=i[:-1,1:].flatten() elements=numpy.zeros((4*(Nx-1)*(Ny-1),3),dtype='i8') elements[0::4,:]=e1 elements[1::4,:]=e2 elements[2::4,:]=e3 elements[3::4,:]=e4 boundaries=[xx.flatten() for xx in [i[:,0],i[-1,:],i[::-1,-1],i[0,::-1]] ] elem=UnstructuredGrid(len(elements)) elem.nodes=elements return elem,boundaries def convert_grid_to_nodes_and_elements(self, grid, attributes=None): if attributes is None: attributes=grid.get_attribute_names_defined_in_store() nodes=self._generate_nodes(grid, attributes) elements,boundaries=self._generate_elements_and_boundaries(grid) return nodes,elements,boundaries def generate_triangulation(self): nodes,elements,boundaries=self.convert_grid_to_nodes_and_elements(self.source, self._axes_names) xpos=to_quantity(getattr(nodes,self._axes_names[0])) ypos=to_quantity(getattr(nodes,self._axes_names[1])) self._xpos_unit=xpos.unit xpos=xpos.number self._ypos_unit=ypos.unit ypos=ypos.number n1=elements.nodes[:,0] n2=elements.nodes[:,1] n3=elements.nodes[:,2] elem=numpy.column_stack((n1,n2,n3)) self._triangulation=tri.Triangulation(xpos,ypos,elem) def sample(self, values, xpos, ypos): interpolator=tri.LinearTriInterpolator(self._triangulation,values) return interpolator(xpos,ypos) def forward_mapping(self, attributes, target_names=None): if attributes is None: attributes=self.source.get_attribute_names_defined_in_store() if target_names is None: target_names=attributes source=self.source.empty_copy() channel1=self.source.new_channel_to(source) target=self.target.empty_copy() channel2=self.target.new_channel_to(target) channel3=target.new_channel_to(self.target) channel1.copy_attributes(attributes) channel2.copy_attributes(self._axes_names) nodes=self._generate_nodes(source,attributes) xpos=value_in( getattr(target,self._axes_names[0]), self._xpos_unit) ypos=value_in( getattr(target,self._axes_names[1]), self._ypos_unit) for attribute, target_name in zip(attributes, target_names): values=to_quantity( getattr(nodes,attribute) ) unit=values.unit values=values.number samples=self.sample(values,xpos,ypos) setattr(target, target_name, (samples if unit is units.none else (samples | unit))) channel3.copy_attributes(target_names) class bilinear_2D_remapper(object): def __init__(self, source, target, check_inside=True): """ this class maps a source grid to a target grid using bilinear interpolation. If check_inside=True, raise exception if any target point outside source grid. """ if len(source.shape) !=2: raise Exception("source grid is not 2D") if not isinstance(source, StructuredBaseGrid): raise Exception("source grid is not instance of RegularBaseGrid") self.source=source self.target=target self._axes_names=source.get_axes_names() self.check_inside=check_inside self._weights=None self._indices=None def _calculate_weights(self): x0=getattr(self.source[0,0], self._axes_names[0]) x1=getattr(self.source[1,1], self._axes_names[0]) y0=getattr(self.source[0,0], self._axes_names[1]) y1=getattr(self.source[1,1], self._axes_names[1]) dx=x1-x0 dy=y1-y0 x=getattr(self.target, self._axes_names[0]) y=getattr(self.target, self._axes_names[1]) ix=numpy.floor((x-x0)/dx).astype(int) iy=numpy.floor((y-y0)/dy).astype(int) if self.check_inside: if numpy.any(ix<0) or numpy.any(ix>self.source.shape[0]-2) or \ numpy.any(iy<0) or numpy.any(iy>self.source.shape[1]-2): raise Exception("target not fully inside (restricted) source grid as required") ix=numpy.clip(ix,0, self.source.shape[0]-2) iy=numpy.clip(iy,0, self.source.shape[1]-2) wx=(x0+(ix+1)*dx-x)/dx wy=(y0+(iy+1)*dy-y)/dy wx=numpy.clip(wx,0.,1.) wy=numpy.clip(wy,0.,1.) self._weights=[wx,wy] self._indices=[ix,iy] def _evaluate(self, values): ix,iy=self._indices wx,wy=self._weights result=wx*wy*values[ix,iy]+(1.-wx)*wy*values[ix+1,iy]+ \ wx*(1.-wy)*values[ix,iy+1]+(1.-wx)*(1.-wy)*values[ix+1,iy+1] return result def forward_mapping(self, attributes, target_names=None): if attributes is None: attributes=self.source.get_attribute_names_defined_in_store() if target_names is None: target_names=attributes if self._weights is None: self._calculate_weights() mapped_values=[] for attribute, target_name in zip(attributes, target_names): values=getattr(self.source,attribute) samples=self._evaluate(values) mapped_values.append(samples) self.target.set_values_in_store(None, target_names, mapped_values) class nearest_2D_remapper(object): def __init__(self, source, target, check_inside=True): """ this class maps a source grid to a target grid getting closest grid value. If check_inside=True, raise exception if any target point outside source grid. """ if len(source.shape) !=2: raise Exception("source grid is not 2D") if not isinstance(source, StructuredBaseGrid): raise Exception("source grid is not instance of RegularBaseGrid") self.source=source self.target=target self._axes_names=source.get_axes_names() self.check_inside=check_inside self._indices=None def _calculate_weights(self): x=getattr(self.target, self._axes_names[0]) y=getattr(self.target, self._axes_names[1]) kwargs={self._axes_names[0]: x, self._axes_names[1]:y} indices=self.source.get_index(**kwargs) ix=indices[...,0] iy=indices[...,1] if self.check_inside: if numpy.any(ix<0) or numpy.any(ix>self.source.shape[0]-1) or \ numpy.any(iy<0) or numpy.any(iy>self.source.shape[1]-1): raise Exception("target not fully inside source grid as required") ix=numpy.clip(ix,0, self.source.shape[0]-1) iy=numpy.clip(iy,0, self.source.shape[1]-1) self._indices=[ix,iy] def _evaluate(self, values): return values[self._indices[0], self._indices[1]] def forward_mapping(self, attributes, target_names=None): if attributes is None: attributes=self.source.get_attribute_names_defined_in_store() if target_names is None: target_names=attributes if self._indices is None: self._calculate_weights() mapped_values=[] for attribute, target_name in zip(attributes, target_names): values=getattr(self.source,attribute) samples=self._evaluate(values) mapped_values.append(samples) self.target.set_values_in_store(None, target_names, mapped_values) def conservative_spherical_remapper(*args,**kwargs): raise Exception("conservative_spherical_remapper has moved to omuse.ext")
from django.apps import AppConfig class MainSiteConfig(AppConfig): name = 'mainsite' verbose_name = 'Main Site'
import sys import random from utils import * from judgeManager import Judge_Manager class JudgeServerManager: def Add_Judge_Server(self, Address: str, Secret: str, Friendly_Name: str, Detail: str): db = db_connect() cursor = db.cursor() try: cursor.execute("INSERT INTO Judge_Server (Address, Secret_Key, Friendly_Name, Detail) VALUES (%s, %s, %s, %s)", (Address, Secret, Friendly_Name, Detail)) db.commit() except: db.rollback() sys.stderr.write("SQL Error in JudgeServerManager: Add_Judge_Server\n") db.close() return # def Modify_Server_Detail(self): def Remove_Judge_Server(self, Secret: str): db = db_connect() cursor = db.cursor() try: cursor.execute("DELETE FROM Judge_Server WHERE Secret_Key = %s", (Secret)) db.commit() except: db.rollback() sys.stderr.write("SQL Error in JudgeServerManager: Remove_Judge_Server\n") db.close() return def Flush_Busy(self, Secret: str, New_State: bool, Current_Task: int = -1): db = db_connect() cursor = db.cursor() try: cursor.execute("UPDATE Judge_Server SET Busy = %s, Current_Task = %s WHERE Secret_Key = %s", (str(int(New_State)), Current_Task, Secret)) db.commit() except: db.rollback() sys.stderr.write("SQL Error in JudgeServerManager: Flush_Busy\n") db.close() return def Flush_Heartbeat(self, Secret: str, CurTime: int): db = db_connect() cursor = db.cursor() try: cursor.execute("UPDATE Judge_Server SET Last_Seen_Time = %s WHERE Secret_Key = %s", (str(CurTime), Secret)) db.commit() except: db.rollback() sys.stderr.write("SQL Error in JudgeServerManager: Flush_Heartbeat\n") db.close() return def Get_Last_Heartbeat(self, Secret: str): db = db_connect() cursor = db.cursor() cursor.execute("SELECT Last_Seen_Time FROM Judge_Server WHERE Secret_Key = %s", (Secret, )) ret = cursor.fetchone() db.close() return ret[0] def Get_URL(self, Secret: str): db = db_connect() cursor = db.cursor() cursor.execute("SELECT Address FROM Judge_Server WHERE Secret_Key = %s", (Secret, )) ret = cursor.fetchone() db.close() return ret[0] def Check_Secret(self, Secret: str): db = db_connect() cursor = db.cursor() cursor.execute("SELECT ID FROM Judge_Server WHERE Secret_Key = %s", (Secret)) ret = cursor.fetchall() db.close() return ret != None def Get_Online_Server_List(self, MinTime: int): db = db_connect() cursor = db.cursor() cursor.execute("SELECT Secret_Key FROM Judge_Server WHERE Last_Seen_Time >= %s", (str(MinTime))) data = cursor.fetchall() db.close() return data def Set_Offline(self, Secret: str): db = db_connect() cursor = db.cursor() try: cursor.execute("UPDATE Judge_Server SET Last_Seen_Time = %s WHERE Secret_Key = %s", ('0', Secret)) db.commit() except: db.rollback() sys.stderr.write("SQL Error in JudgeServerManager: Set_Offline\n") cursor.execute("SELECT Current_Task FROM Judge_Server WHERE Secret_Key = %s", (Secret)) Current_Task = cursor.fetchone() if (str(Current_Task) != '-1'): Judge_Manager.update_after_judge(Current_Task, 9) db.close() return def Error_Check_Correct(self, Mintime: int): db = db_connect() cursor = db.cursor() cursor.execute("SELECT Address, Secret_Key, Current_Task FROM Judge_Server WHERE Last_Seen_Time >= %s AND Busy = %s", (str(Mintime), '1')) ret = cursor.fetchall() db.close() if ret == None or len(ret) == 0: # nothing to do return for x in ret: url = x[0] Secret = x[1] Current_Task = x[2] for i in range(0, 3): try: data = {} data['Server_Secret'] = JudgeConfig.Web_Server_Secret re = requests.post(url + '/isBusy', data=data).content.decode() # Fixme: check self-signed SSL if re == '0': JudgeServer_Manager.Flush_Busy(Secret, False) Judge_Manager.update_after_judge(Current_Task, 9) # set system error for fake busy break except: pass def Get_Standby_Server(self, MinTime: int): # self.Error_Check_Correct(MinTime) db = db_connect() cursor = db.cursor() cursor.execute("SELECT Address, Secret_Key FROM Judge_Server WHERE Last_Seen_Time >= %s AND Busy = %s", (str(MinTime), '0')) ret = cursor.fetchall() db.close() if ret == None or len(ret) == 0: return None st = random.randint(0, len(ret) - 1) for i in range(st, st + len(ret)): if ping(ret[i % len(ret)][0]): return ret[i % len(ret)] else: self.Set_Offline(ret[i % len(ret)][0]) return None def Get_Failure_Task(self): db = db_connect() cursor = db.cursor() minTime = unix_nano() - JudgeConfig.Max_Duration cursor.execute("SELECT Current_Task FROM Judge_Server WHERE Last_Seen_Time < %s", (str(minTime), )) ret = cursor.fetchall() db.close() if ret == None or len(ret) == 0: return None return ret def Get_Current_Task(self, secret): db = db_connect() cursor = db.cursor() cursor.execute("SELECT Current_Task FROM Judge_Server WHERE Secret_Key = %s", (secret, )) ret = cursor.fetchone() return int(ret) def Get_Server_List(self): db = db_connect() cursor = db.cursor() cursor.execute("SELECT Last_Seen_Time, Busy, Friendly_Name, Detail FROM Judge_Server") ls = cursor.fetchall() db.close() ret = [] for x in ls: temp = {} temp['Status'] = bool(int(x[0]) > unix_nano() - JudgeConfig.Max_Duration) temp['Name'] = x[2] temp['System'] = x[3].split('\n')[0] temp['Last_Seen_Time'] = readable_time(int(x[0])) temp['Busy'] = bool(x[1]) temp['Provider'] = x[3].split('\n')[1] ret.append(temp) return ret JudgeServer_Manager = JudgeServerManager()