blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc5aff48d559df398e4a2f66aaeeb0e17884dd6b | a1bccead14fe67f560ca54aad98dbb2367c7568b | /tensorpack/tfutils/common.py | 42588423b3fea4d2fd9c3f858dff7f6cb15af704 | [
"Apache-2.0"
] | permissive | Peratham/tensorpack | 9ea7e714b41de1aa4393454d2fa0a88d3b7568b9 | e21fc267c0ada1377bffcc008dad31c28326690d | refs/heads/master | 2021-01-11T17:46:10.488888 | 2017-01-23T17:13:43 | 2017-01-23T17:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,547 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
from ..utils.naming import GLOBAL_STEP_VAR_NAME, GLOBAL_STEP_OP_NAME
import tensorflow as tf
from copy import copy
import six
from contextlib import contextmanager
__all__ = ['get_default_sess_config',
'get_global_step',
'get_global_step_var',
'get_op_tensor_name',
'get_tensors_by_names',
'get_op_or_tensor_by_name',
'backup_collection',
'restore_collection',
'clear_collection',
'freeze_collection',
'get_tf_version',
'get_name_scope_name'
]
def get_default_sess_config(mem_fraction=0.99):
"""
Return a better session config to use as default.
Tensorflow default session config consume too much resources.
Args:
mem_fraction(float): fraction of memory to use.
Returns:
tf.ConfigProto: the config to use.
"""
conf = tf.ConfigProto()
conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction
conf.gpu_options.allocator_type = 'BFC'
conf.gpu_options.allow_growth = True
conf.allow_soft_placement = True
# conf.log_device_placement = True
return conf
def get_global_step_var():
"""
Returns:
tf.Tensor: the global_step variable in the current graph. create if
doesn't exist.
"""
try:
return tf.get_default_graph().get_tensor_by_name(GLOBAL_STEP_VAR_NAME)
except KeyError:
scope = tf.get_variable_scope()
assert scope.name == '', \
"Creating global_step_var under a variable scope would cause problems!"
with tf.variable_scope(scope, reuse=False):
var = tf.get_variable(GLOBAL_STEP_OP_NAME, shape=[],
initializer=tf.constant_initializer(dtype=tf.int32),
trainable=False, dtype=tf.int32)
return var
def get_global_step():
"""
Returns:
float: global_step value in current graph and session"""
return tf.train.global_step(
tf.get_default_session(),
get_global_step_var())
def get_op_tensor_name(name):
"""
Will automatically determine if ``name`` is a tensor name (ends with ':x')
or a op name.
If it is an op name, the corresponding tensor name is assumed to be ``op_name + ':0'``.
Args:
name(str): name of an op or a tensor
Returns:
tuple: (op_name, tensor_name)
"""
if len(name) >= 3 and name[-2] == ':':
return name[:-2], name
else:
return name, name + ':0'
def get_tensors_by_names(names):
"""
Get a list of tensors in the default graph by a list of names.
Args:
names (list):
"""
ret = []
G = tf.get_default_graph()
for n in names:
opn, varn = get_op_tensor_name(n)
ret.append(G.get_tensor_by_name(varn))
return ret
def get_op_or_tensor_by_name(name):
G = tf.get_default_graph()
if len(name) >= 3 and name[-2] == ':':
return G.get_tensor_by_name(name)
else:
return G.get_operation_by_name(name)
def backup_collection(keys):
"""
Args:
keys (list): list of collection keys to backup
Returns:
dict: the backup
"""
ret = {}
for k in keys:
ret[k] = copy(tf.get_collection(k))
return ret
def restore_collection(backup):
"""
Restore from a collection backup.
Args:
backup (dict):
"""
for k, v in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
def clear_collection(keys):
"""
Clear some collections.
Args:
keys(list): list of collection keys.
"""
for k in keys:
del tf.get_collection_ref(k)[:]
@contextmanager
def freeze_collection(keys):
"""
Args:
keys(list): list of collection keys to freeze.
Returns:
a context where the collections are in the end restored to its initial state.
"""
backup = backup_collection(keys)
yield
restore_collection(backup)
def get_tf_version():
"""
Returns:
int:
"""
return int(tf.__version__.split('.')[1])
def get_name_scope_name():
"""
Returns:
str: the name of the current name scope, without the ending '/'.
"""
g = tf.get_default_graph()
s = "RANDOM_STR_ABCDEFG"
unique = g.unique_name(s)
scope = unique[:-len(s)].rstrip('/')
return scope
| [
"[email protected]"
] | |
df8311f1d2805816a6fc20a00ec8a0abcbe78a62 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/Products.CMFPlone-4.1-py2.7.egg/Products/CMFPlone/tests/testCheckId.py | dcf03d009fb74b10bfa220665076ec808f2f2c61 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,862 | py | #
# Test id autogeneration related scripts
#
from Products.CMFPlone.tests import PloneTestCase
from Products.CMFPlone.tests import dummy
from AccessControl import Unauthorized
from Products.CMFCore.utils import getToolByName
from ZODB.POSException import ConflictError
class TestIsIDAutoGenerated(PloneTestCase.PloneTestCase):
'''Tests the isIDAutoGenerated script'''
def testAutoGeneratedId(self):
plone_utils = getToolByName(self.portal, 'plone_utils')
r = plone_utils.isIDAutoGenerated('document.2004-11-09.0123456789')
self.assertEqual(r, True)
def testAutoGeneratedIdWithUnderScores(self):
plone_utils = getToolByName(self.portal, 'plone_utils')
portal_types = getToolByName(self.portal, 'portal_types')
portal_types.test_type=self.portal.portal_types.Event
portal_types.test_type.id="test_type"
r = plone_utils.isIDAutoGenerated('test_type.2004-11-09.0123456789')
del portal_types.test_type
self.assertEqual(r, True)
def testEmptyId(self):
plone_utils = getToolByName(self.portal, 'plone_utils')
r = plone_utils.isIDAutoGenerated('')
self.assertEqual(r, False)
def testValidPortalTypeNameButNotAutoGeneratedId(self):
plone_utils = getToolByName(self.portal, 'plone_utils')
# This was raising an IndexError exception for
# Zope < 2.7.3 (DateTime.py < 1.85.12.11) and a
# SyntaxError for Zope >= 2.7.3 (DateTime.py >= 1.85.12.11)
r = plone_utils.isIDAutoGenerated('document.tar.gz')
self.assertEqual(r, False)
# check DateError
r = plone_utils.isIDAutoGenerated('document.tar.12/32/2004')
self.assertEqual(r, False)
# check TimeError
r = plone_utils.isIDAutoGenerated('document.tar.12/31/2004 12:62')
self.assertEqual(r, False)
class TestCheckId(PloneTestCase.PloneTestCase):
'''Tests the check_id script'''
def testGoodId(self):
r = self.folder.check_id('foo')
self.assertEqual(r, None) # success
def testEmptyId(self):
r = self.folder.check_id('')
self.assertEqual(r, None) # success
def testRequiredId(self):
r = self.folder.check_id('', required=1)
self.assertEqual(r, u'Please enter a name.')
def testAlternativeId(self):
r = self.folder.check_id('', alternative_id='foo')
self.assertEqual(r, None) # success
def testBadId(self):
r = self.folder.check_id('=')
self.assertEqual(r, u'= is not a legal name. The following characters are invalid: =')
def testCatalogIndex(self):
# TODO: Tripwire
portal_membership = getToolByName(self.portal, 'portal_membership')
have_permission = portal_membership.checkPermission
self.failUnless(have_permission('Search ZCatalog', self.portal.portal_catalog),
'Expected permission "Search ZCatalog"')
r = self.folder.check_id('created')
self.assertEqual(r, u'created is reserved.')
def testCatalogMetadata(self):
portal_catalog = getToolByName(self.portal, 'portal_catalog')
portal_catalog.addColumn('new_metadata')
self.failUnless('new_metadata' in portal_catalog.schema())
self.failIf('new_metadata' in portal_catalog.indexes())
r = self.folder.check_id('new_metadata')
self.assertEqual(r, u'new_metadata is reserved.')
def testCollision(self):
self.folder.invokeFactory('Document', id='foo')
self.folder.invokeFactory('Document', id='bar')
r = self.folder.foo.check_id('bar')
self.assertEqual(r, u'There is already an item named bar in this folder.')
def testTempObjectCollision(self):
foo = self.folder.restrictedTraverse('portal_factory/Document/foo')
self.folder._setObject('bar', dummy.Item('bar'))
r = foo.check_id('bar')
self.assertEqual(r, u'bar is reserved.')
def testReservedId(self):
self.folder._setObject('foo', dummy.Item('foo'))
r = self.folder.foo.check_id('portal_catalog')
self.assertEqual(r, u'portal_catalog is reserved.')
def testHiddenObjectId(self):
# If a parallel object is not in content-space, should get 'reserved'
# instead of 'taken'
r = self.folder.check_id('portal_skins')
self.assertEqual(r, u'portal_skins is reserved.')
def testCanOverrideParentNames(self):
self.folder.invokeFactory('Document', id='item1')
self.folder.invokeFactory('Folder', id='folder1')
self.folder.invokeFactory('Document', id='foo')
r = self.folder.folder1.foo.check_id('item1')
self.assertEqual(r, None)
def testInvalidId(self):
self.folder._setObject('foo', dummy.Item('foo'))
r = self.folder.foo.check_id('_foo')
self.assertEqual(r, u'_foo is reserved.')
def testContainerHook(self):
# Container may have a checkValidId method; make sure it is called
self.folder._setObject('checkValidId', dummy.Raiser(dummy.Error))
self.folder._setObject('foo', dummy.Item('foo'))
r = self.folder.foo.check_id('whatever')
self.assertEqual(r, u'whatever is reserved.')
def testContainerHookRaisesUnauthorized(self):
# check_id should not swallow Unauthorized errors raised by hook
self.folder._setObject('checkValidId', dummy.Raiser(Unauthorized))
self.folder._setObject('foo', dummy.Item('foo'))
self.assertRaises(Unauthorized, self.folder.foo.check_id, 'whatever')
def testContainerHookRaisesConflictError(self):
# check_id should not swallow ConflictErrors raised by hook
self.folder._setObject('checkValidId', dummy.Raiser(ConflictError))
self.folder._setObject('foo', dummy.Item('foo'))
self.assertRaises(ConflictError, self.folder.foo.check_id, 'whatever')
def testMissingUtils(self):
# check_id should not bomb out if the plone_utils tool is missing
self.portal._delObject('plone_utils')
r = self.folder.check_id('foo')
self.assertEqual(r, None) # success
def testMissingCatalog(self):
# check_id should not bomb out if the portal_catalog tool is missing
self.portal._delObject('portal_catalog')
r = self.folder.check_id('foo')
self.assertEqual(r, None) # success
def testMissingFactory(self):
# check_id should not bomb out if the portal_factory tool is missing
self.portal._delObject('portal_factory')
r = self.folder.check_id('foo')
self.assertEqual(r, None) # success
def testCatalogIndexSkipped(self):
# Note that the check is skipped when we don't have
# the "Search ZCatalogs" permission.
self.portal.manage_permission('Search ZCatalog', ['Manager'], acquire=0)
r = self.folder.check_id('created')
# But now the final hasattr check picks this up
self.assertEqual(r, u'created is reserved.')
def testCollisionSkipped(self):
# Note that check is skipped when we don't have
# the "Access contents information" permission.
self.folder.manage_permission('Access contents information', [], acquire=0)
self.folder._setObject('foo', dummy.Item('foo'))
self.folder._setObject('bar', dummy.Item('bar'))
r = self.folder.foo.check_id('bar')
self.assertEqual(r, None) # success
def testReservedIdSkipped(self):
# This check is picked up by the checkIdAvailable, unless we don't have
# the "Add portal content" permission, in which case it is picked up by
# the final hasattr check.
self.folder.manage_permission('Add portal content', [], acquire=0)
self.folder._setObject('foo', dummy.Item('foo'))
r = self.folder.foo.check_id('portal_catalog')
self.assertEqual(r, u'portal_catalog is reserved.')
def testInvalidIdSkipped(self):
# Note that the check is skipped when we don't have
# the "Add portal content" permission.
self.folder.manage_permission('Add portal content', [], acquire=0)
self.folder._setObject('foo', dummy.Item('foo'))
r = self.folder.foo.check_id('_foo')
self.assertEqual(r, None) # success
def testParentMethodAliasDisallowed(self):
# Note that the check is skipped when we don't have
# the "Add portal content" permission.
self.folder.manage_permission('Add portal content', ['Manager'], acquire=0)
self.folder._setObject('foo', dummy.Item('foo'))
for alias in self.folder.getTypeInfo().getMethodAliases().keys():
r = self.folder.foo.check_id(alias)
self.assertEqual(r, u'%s is reserved.' % alias)
def testCheckingMethodAliasesOnPortalRoot(self):
# Test for bug http://dev.plone.org/plone/ticket/4351
self.setRoles(['Manager'])
self.portal.manage_permission('Add portal content', ['Manager'], acquire=0)
# Should not raise: Before we were using obj.getTypeInfo(), which is
# not defined on the portal root.
try:
self.portal.check_id('foo')
except AttributeError, e:
self.fail(e)
def testProxyRoles(self):
# Proxy roles should cover missing view permission for all but the
# most unusual workflows.
proxy_roles = self.folder.check_id._proxy_roles
self.failUnless('Manager' in proxy_roles)
self.failUnless('Owner' in proxy_roles)
self.failUnless('Authenticated' in proxy_roles)
self.failUnless('Anonymous' in proxy_roles)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestCheckId))
suite.addTest(makeSuite(TestIsIDAutoGenerated))
return suite
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
4ea7324dc735700864355bf887688d287410ce00 | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/681.next-closest-time/681.next-closest-time_120259498.py | e5e0ccb34cb9903d4bb82ad4979a9600dfb35202 | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | from datetime import datetime, timedelta
def check(dt, time_str):
dt_str = datetime.strftime(dt, "%H%M")
_time_str = set(list(dt_str))
if _time_str & time_str == _time_str:
return True
return False
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
time_str = set(list(time.replace(':', '')))
time = datetime.strptime(time, "%H:%M")
while True:
time = time + timedelta(minutes=1)
if check(time, time_str):
return time.strftime("%H:%M")
| [
"[email protected]"
] | |
12dc6835334446f6a874d6a58eaed4579d21533c | 489a45659476fafb66934427e42bfce3d60a0116 | /Assets/Python/BUG/CityUtil.py | f796f4f687860987bd082b19c841cbcfb1108044 | [] | no_license | billw2012/Caveman2Cosmos | 3a8c6ea347e75dbe2de9519fe70e6b38e0cf6dbe | 2382877536e1669972dd024ce2d0f3d0d5ffd988 | refs/heads/master | 2020-07-19T00:14:48.856106 | 2019-09-03T23:20:42 | 2019-09-03T23:21:02 | 197,989,388 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | ## CityUtil
##
## Collection of utility functions for dealing with cities.
##
## Copyright (c) 2009 The BUG Mod.
##
## Author: EmperorFool
from CvPythonExtensions import *
## Globals
gc = CyGlobalContext()
## Growth and Starvation
def willGrowThisTurn(city):
"""
Returns True if <city> will increase its population due to growth this turn.
Emphasize No Growth must be off for the city, and its food rate plus storage must reach the growth threshold.
"""
return not city.AI_isEmphasize(5) and city.getFood() + city.foodDifference(True) >= city.growthThreshold()
def willShrinkThisTurn(city):
"""
Returns True if <city> will decrease its population due to starvation this turn.
It must have at least two population, and its food rate plus storage must be negative.
"""
return city.getPopulation() > 1 and city.getFood() + city.foodDifference(True) < 0
| [
"[email protected]"
] | |
3a0de6c4abfd97a3c5efda00e2039e3beaa66d28 | 1587d5444e18bea9b1c9cbe1a01c2f2aa03892d8 | /root/db.py | 075284412ac455bf9d4b2cf442784a5db6f7cda2 | [] | no_license | SofiiaShumel/new_flask | c5593e1df21023695ed287a879e4e77da6321bbc | 62692a7de5eb328b2ba2fec9e1a5ff7f98ccefb2 | refs/heads/master | 2020-09-28T08:56:48.348025 | 2019-12-07T22:31:27 | 2019-12-07T22:31:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,633 | py | import sqlalchemy as db
from sqlalchemy import MetaData, Table, Column
from sqlalchemy import create_engine, Column, String, Integer, ForeignKey, Float, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
import root.credentials
from root.entities import Player, Bet, Bank, Casino, Usernames, Country
class Database():
# replace the user, password, hostname and database according to your configuration according to your information
cstr = 'postgresql://{user}:{password}@{hostname}/{database}'.format(
user=root.credentials.username,
password=root.credentials.password,
hostname=root.credentials.host,
database=root.credentials.database
)
engine = db.create_engine(cstr)
def __init__(self):
self.connection = self.engine.connect()
print("DB Instance created")
# Player
def createPlayer(self, player):
session = Session(bind=self.connection)
session.add(player)
session.commit()
print("Player created successfully!")
def updatePlayer(self, player_id, player_balance, player_passwd):
session = Session(bind=self.connection)
dataToUpdate = {Player.balance: player_balance, Player.passwrd: player_passwd}
playerData = session.query(Player).filter(Player.player_id == player_id)
playerData.update(dataToUpdate)
session.commit()
print("Player updated successfully!")
def fetchAllPlayers(self):
self.session = Session(bind=self.connection)
players = self.session.query(Player).all()
return players
def fetchPlayer(self, player_id):
self.session = Session(bind=self.connection)
player = self.session.query(Player).filter(Player.player_id == player_id).first()
return player
def deletePlayer(self, player_id):
session = Session(bind=self.connection)
playerData = session.query(Player).filter(Player.player_id == player_id).first()
session.delete(playerData)
session.commit()
print("Player deleted successfully!")
# username
def delete_username(self, player_id):
session = Session(bind=self.connection)
playerData = session.query(Usernames).filter(Usernames.player_id == player_id).first()
session.delete(playerData)
session.commit()
# Bet
def createBet(self, bet):
session = Session(bind=self.connection)
session.add(bet)
session.commit()
print("Bet created successfully!")
def updateBet(self, bet_id, bet_money, won_money, won_bet, bet_time):
session = Session(bind=self.connection)
dataToUpdate = {Bet.bet_money: bet_money, Bet.won_money: won_money,
Bet.won_bet: won_bet, Bet.bet_time: bet_time}
betData = session.query(Bet).filter(Bet.bet_id == bet_id)
betData.update(dataToUpdate)
session.commit()
print("Bet updated successfully!")
def fetchAllBets(self):
self.session = Session(bind=self.connection)
bets = self.session.query(Bet).all()
return bets
def fetchBet(self, bet_id):
self.session = Session(bind=self.connection)
bet = self.session.query(Bet).filter(Bet.bet_id == bet_id).first()
return bet
def deleteBet(self, bet_id):
session = Session(bind=self.connection)
betData = session.query(Bet).filter(Bet.bet_id == bet_id).first()
session.delete(betData)
session.commit()
print("Bet deleted successfully!")
# Country
def createCountry(self, country):
session = Session(bind=self.connection)
session.add(country)
session.commit()
print("Country created successfully!")
def fetchAllCountries(self):
self.session = Session(bind=self.connection)
countries = self.session.query(Country).all()
return countries
# Bank
def createBank(self, bank):
session = Session(bind=self.connection)
session.add(bank)
session.commit()
print("Bank created successfully!")
def updateBank(self, player_id, sold_time, sold_coins):
session = Session(bind=self.connection)
dataToUpdate = {Bank.sold_time: sold_time, Bank.sold_coins: sold_coins}
betData = session.query(Bank).filter(Bank.player_id == player_id)
betData.update(dataToUpdate)
session.commit()
print("Bank updated successfully!")
def updateBankWithTime(self, player_id, sold_time, sold_coins):
session = Session(bind=self.connection)
dataToUpdate = {Bank.sold_coins: sold_coins}
bankData = session.query(Bank).filter(Bank.player_id == player_id).filter(Bank.sold_time == sold_time)
bankData.update(dataToUpdate)
session.commit()
print("Bank updated successfully!")
def fetchAllBanks(self):
self.session = Session(bind=self.connection)
banks = self.session.query(Bank).all()
return banks
def fetchBank(self, player_id, sold_time):
self.session = Session(bind=self.connection)
bank = self.session.query(Bank).filter(Bank.player_id == player_id).filter(Bank.sold_time == sold_time).first()
return bank
def deleteBank(self, player_id, sold_time):
session = Session(bind=self.connection)
bankData = session.query(Bank).filter(Bank.player_id == player_id).filter(
Bank.sold_time == sold_time).filter().first()
session.delete(bankData)
session.commit()
print("Bank deleted successfully!")
| [
"[email protected]"
] | |
c13f8b8ffe13989527d72161e64b98b02f165823 | 8038c72e9c05b9a962a78002e1ac32ac5797d33f | /qfcommon/thriftclient/data_engine/__init__.py | 8e71657b2d2d71512424008318d0c83ead7fad72 | [] | no_license | liusiquan/open_test | faaf419dcd5877558e25847edf5e128c5e9e64b2 | 0cb0aaf401234ac3274d34dcf08ff633917db048 | refs/heads/master | 2021-01-11T20:44:25.567470 | 2017-01-18T10:13:54 | 2017-01-18T10:13:54 | 79,174,997 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | __all__ = ['ttypes', 'constants', 'DataEngine']
| [
"qfpay@TEST107.(none)"
] | qfpay@TEST107.(none) |
fe6a27b00a35cf7a6c20a465f2d80f20bdeba796 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03456/s642363983.py | af102a3e07e6002a5cb1a2702d7ff5d68895690d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | import math
a, b = map(int, input().split())
x = int(str(a) + str(b))
if math.sqrt(x) == math.ceil(math.sqrt(x)):
print("Yes")
else :
print("No") | [
"[email protected]"
] | |
118469b2bda66288057c7bdac81c6069f2397253 | 9f6ea967d84c37d04543d72edabce4dea0517a4f | /all_scraper/Service/JingDongScraper/get_keywords_url_id/get_review_productId.py | 91b08b7ba891c843eec95dc408c44859aef45938 | [] | no_license | GongSong/All_Scraper | 8fae34851b8c4b31ab1ae47f39d511a0869c59ef | f2bacc8416ed2e611e5e8515d34ec12fd5f10018 | refs/heads/master | 2020-07-22T08:46:32.347936 | 2017-08-03T02:17:01 | 2017-08-03T02:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | #-*-coding:utf-8*-
from Service.JingDongScraper.db_operation.db_connection import DB_connection
from Service.JingDongScraper.db_operation.db_operation import DB_operation
class get_review_productId():
# 这里获取的是productId
# 评论(json)页的url拼接为https://sclub.jd.com/comment/productPageComments.action?productId=4265472&score=0&sortType=5&page=1&pageSize=10
def get_review_ProductId(self):
try:
Product_Id = []
# 连接数据库
db = DB_connection('localhost', 3306, 'root', '123123', 'All_Scraper', 'utf8')
conn = db.connects()
mapper = DB_operation(conn)
# 将url提取出来
sql_get_ProductId_url = "SELECT sku FROM All_Scraper.jd_keywords;"
Product_Id = mapper.select(sql_get_ProductId_url)
conn.commit()
conn.close()
return Product_Id
except Exception as err:
print err | [
"[email protected]"
] | |
bd2e73826e49ec47995d39f344229610c89341f1 | af7e20b979c198f235ba51a160c81e0856af0cc7 | /tests/test_karma_parser.py | 32af2ec042e41d2bced6250b7acdaeb7d9c40210 | [
"MIT"
] | permissive | the-Bruce/apollo | 1ce3b8abd184b180c24acbe966571c8b294cc04c | 7458785f94cde1dfe3e21451504916d8e5d50ae8 | refs/heads/master | 2020-05-15T14:21:01.324314 | 2019-02-11T13:32:40 | 2019-02-11T13:32:40 | 182,333,921 | 1 | 0 | null | 2019-04-19T22:44:17 | 2019-04-19T22:44:17 | null | UTF-8 | Python | false | false | 5,868 | py | import os
import pytest
from alembic import command
from alembic.config import Config
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from karma.parser import parse_message, RawKarma
from models import Base
@pytest.fixture(scope='module')
def database():
# Locate the testing config for Alembic
config = Config(os.path.join(os.path.dirname(__file__), '../alembic.tests.ini'))
# Start up the in-memory database instance
db_engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(db_engine)
db_session = Session(bind=db_engine)
# Mark it as up-to-date with migrations
command.stamp(config, 'head')
return db_session
def test_empty(database):
assert parse_message('', database) is None
def test_empty_with_code_block(database):
assert parse_message('```FoobarBaz```', database) is None
def test_simple_positive(database):
assert parse_message('Foobar++', database) == [RawKarma(name='Foobar', op='++', reason=None)]
def test_simple_negative(database):
assert parse_message('Foobar--', database) == [RawKarma(name='Foobar', op='--', reason=None)]
def test_simple_neutral_pm(database):
assert parse_message('Foobar+-', database) == [RawKarma(name='Foobar', op='+-', reason=None)]
def test_simple_neutral_mp(database):
assert parse_message('Foobar-+', database) == [RawKarma(name='Foobar', op='-+', reason=None)]
def test_quoted_positive(database):
assert parse_message('"Foobar"++', database) == [RawKarma(name='Foobar', op='++', reason=None)]
def test_quoted_negative(database):
assert parse_message('"Foobar"--', database) == [RawKarma(name='Foobar', op='--', reason=None)]
def test_quoted_neutral_pm(database):
assert parse_message('"Foobar"+-', database) == [RawKarma(name='Foobar', op='+-', reason=None)]
def test_quoted_neutral_mp(database):
assert parse_message('"Foobar"-+', database) == [RawKarma(name='Foobar', op='-+', reason=None)]
def test_simple_positive_with_text_after(database):
assert parse_message('Foobar++ since it\'s pretty cool', database) == [
RawKarma(name='Foobar', op='++', reason=None)
]
def test_simple_positive_with_paren_reason(database):
assert parse_message('Foobar++ (hella cool)', database) == [
RawKarma(name='Foobar', op='++', reason='hella cool')
]
def test_simple_positive_with_empty_paren_reason(database):
assert parse_message('Foobar++ ()', database) == [RawKarma(name='Foobar', op='++', reason=None)]
def test_simple_positive_with_compound_reason(database):
assert parse_message('Foobar++ because it is (hella cool)', database) == [
RawKarma(name='Foobar', op='++', reason='it is (hella cool)')
]
def test_simple_positive_with_reason(database):
assert parse_message('Foobar++ because baz', database) == [
RawKarma(name='Foobar', op='++', reason='baz')
]
def test_simple_negative_with_reason(database):
assert parse_message('Foobar-- because baz', database) == [
RawKarma(name='Foobar', op='--', reason='baz')
]
def test_simple_neutral_pm_with_reason(database):
assert parse_message('Foobar+- because baz', database) == [
RawKarma(name='Foobar', op='+-', reason='baz')
]
def test_simple_neutral_mp_with_reason(database):
assert parse_message('Foobar-+ because baz', database) == [
RawKarma(name='Foobar', op='-+', reason='baz')
]
def test_quoted_positive_with_reason(database):
assert parse_message('Foobar++ because baz', database) == [
RawKarma(name='Foobar', op='++', reason='baz')
]
def test_quoted_negative_with_reason(database):
assert parse_message('Foobar-- because baz', database) == [
RawKarma(name='Foobar', op='--', reason='baz')
]
def test_quoted_neutral_pm_with_reason(database):
assert parse_message('Foobar+- because baz', database) == [
RawKarma(name='Foobar', op='+-', reason='baz')
]
def test_quoted_neutral_mp_with_reason(database):
assert parse_message('Foobar-+ because baz', database) == [
RawKarma(name='Foobar', op='-+', reason='baz')
]
def test_simple_multiple_karma(database):
assert parse_message('Foobar++, Baz-- Blat+-', database) == [
RawKarma(name='Foobar', op='++', reason=None),
RawKarma(name='Baz', op='--', reason=None),
RawKarma(name='Blat', op='+-', reason=None)
]
def test_simple_multiple_karma_with_reasons_and_quotes(database):
assert parse_message('Foobar++ because baz blat, "Hello world"--', database) == [
RawKarma(name='Foobar', op='++', reason='baz blat'),
RawKarma(name='Hello world', op='--', reason=None)
]
def test_karma_op_no_token(database):
assert parse_message('++', database) is None
def test_simple_invalid(database):
assert parse_message('Foo+', database) is None
def test_simple_invalid_with_reason(database):
assert parse_message('Foo+ because baz', database) is None
def test_start_simple_mid_message(database):
assert parse_message('Hello, world! Foo++', database) == [
RawKarma(name='Foo', op='++', reason=None)
]
def test_start_simple_mid_message_with_reason(database):
assert parse_message('Hello, world! Foo++ because bar', database) == [
RawKarma(name='Foo', op='++', reason='bar')
]
def test_code_block_with_internal_reason(database):
assert parse_message('```Foobar baz because foo```', database) is None
def test_code_block_with_karma_op_after(database):
assert parse_message('```Foobar baz```++', database) is None
def test_code_block_external_reason(database):
assert parse_message('```Foobar baz``` because foo', database) is None
def test_code_block_with_karma_op_after_and_external_reason(database):
assert parse_message('```Foobar baz```++ because foo', database) is None
| [
"[email protected]"
] | |
8909cbc55ed2f07c029ca60c0a49f99e63764afb | 61b0a6041d96c1b977f07aed9e4f6ee3229f85e6 | /fetch_wikipedia.py | d1de9c92094b4f0d3da64d08c1770d7aa6228e38 | [] | no_license | Hadryan/choir_music_data | e1d163cd8fe7e5f286b46b0bd51d5c66dc73d3e6 | 2d72a8f76c40915ffff3fc0929ad498b8b232136 | refs/heads/master | 2023-01-01T01:06:48.196179 | 2020-10-20T17:47:11 | 2020-10-20T17:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,074 | py | import logging
import pandas as pd
import regex
import yaml
logging.basicConfig(level=logging.INFO, format='{asctime} {message}', style='{')
BASE_URL = "https://en.wikipedia.org/wiki"
lists = [
"List of Medieval composers",
"List of Renaissance composers",
"List of Baroque composers",
"List of Classical-era composers",
"List of Romantic-era composers",
"List of 20th-century classical composers",
"List of 21st-century classical composers",
]
era_config = """
Medieval:
type: list
Renaissance:
type: list
Baroque:
type: list
Classical-era:
type: table
birth_col: Date born
died_col: Date died
Romantic-era:
type: table
birth_col: Date born
died_col: Date died
20th-century classical:
type: table
birth_col: Year of birth
died_col: Year of death
21st-century classical:
type: table
birth_col: Date born
died_col: Date died
"""
era_config = yaml.load(era_config)
def fetch_composer_list(era_name: str, era_config: dict = era_config) -> pd.DataFrame:
config = era_config[era_name]
assert config["type"] == "table"
list_name = f"List of {era_name} composers".replace(' ', '%20')
url = f"{BASE_URL}/{list_name}"
logging.info(f"Fetching url {url}")
df = find_composer_table(url)
df.rename(columns={config["birth_col"]: "birth", config["died_col"]: "death", }, inplace=True)
df["era"] = era_name
# df = dfs[2]
logging.info(df.head())
return df
def find_composer_table(url):
dfs = pd.read_html(url)
logging.info(f"{len(dfs)} tables on the page")
for i, df in enumerate(dfs):
print(i, df.columns)
print(df.head(2))
if "Name" in df.columns and "Nationality" in df.columns:
break
logging.info(i)
return df
def get_table_eras(config: dict) -> list:
return [era_name for era_name in config if config[era_name]['type'] == 'table']
def main():
dfs = []
columns = 'Name birth death Nationality era'.split()
exclude = ["Classical-era", "21st-century classical"]
for era_name in get_table_eras(era_config):
if era_name in exclude:
continue
df = fetch_composer_list(era_name)
print(df.head(1), df.columns)
df = df[columns]
dfs.append(df)
print(len(df), df.columns)
full_df = pd.concat(dfs, sort=False)
print([len(df) for df in dfs], len(full_df))
full_df.to_csv('composers.csv', index=False)
print(full_df.head())
def fetch_from_wiki(era_name: str) -> pd.DataFrame:
with open(f"List of {era_name} composers.wiki".replace(" ", "_")) as f:
data = []
for line in f:
pattern = rf"""\*\s*
\[\[
(?:([^)|]*)\|)?
([^)|]*)
\]\]
\s*
\(
(
([^)-–]*)
[-–]
([^)]*)
)
"""
match = regex.match(pattern, line, regex.VERBOSE)
# if line.startswith("*"):
# print(line, match)
if match:
article, name, _, birth, death = match.groups()
# if name2 is None:
# name2 = name
data.append((article, name, birth, death))
different_article = [(name, a, birth, death) for a, name, birth, death in data if a is not None and a != name]
print(*different_article, sep="\n")
data = [(a if a else name, name, birth, death) for a, name, birth, death in data]
dat = list(zip(['article', 'name', 'birth', 'death'], list(zip(*data))))
print(dat[:4])
df = pd.DataFrame(dict(dat))
df['era'] = era_name
print(df.head())
df.to_csv(f'composers_{era_name}.csv', index=False)
if __name__ == '__main__':
# main()
# df = fetch_composer_list("Classical-era")
# print(df.columns, df.head())
# fetch_from_wiki("Baroque")
# fetch_from_wiki("Classical-era")
fetch_from_wiki("Medieval")
| [
"[email protected]"
] | |
25f490d9ba2c1722d96adef48bebc6b2f19eb091 | 6d85d154048d526c9b073b7a75c0ca6298111ded | /nadine/tests/test_profile.py | 4b9a194392684dfbb58b9838c26e7bc2377e3350 | [
"Apache-2.0"
] | permissive | switchdin/nadine | 02595a0a749e97840c756e23827793a5d732de0f | 42b4377d7a5f64320d83c4d5f27981428a111e1f | refs/heads/master | 2021-05-06T17:56:31.441213 | 2017-11-24T00:47:51 | 2017-11-24T00:47:51 | 111,856,045 | 0 | 0 | null | 2017-11-24T00:28:34 | 2017-11-23T22:54:23 | JavaScript | UTF-8 | Python | false | false | 10,085 | py | import traceback
from datetime import datetime, timedelta, date
from django.test import TestCase, override_settings
from django.core import management
from django.contrib.auth.models import User
from django.conf import settings
from django.utils import timezone
from nadine.models import *
@override_settings(SUSPEND_MEMBER_ALERTS=True)
class ProfileTestCase(TestCase):
def setUp(self):
self.neighborhood1 = Neighborhood.objects.create(name="Beggar's Gulch")
# Basic Packages = just days
self.basicPackage = MembershipPackage.objects.create(name="Basic")
SubscriptionDefault.objects.create(
package = self.basicPackage,
resource = Resource.objects.day_resource,
monthly_rate = 50,
allowance = 3,
overage_rate = 20,
)
# PT-5 Package + Key + Mail
self.pt5Package = MembershipPackage.objects.create(name="PT5")
SubscriptionDefault.objects.create(
package = self.pt5Package,
resource = Resource.objects.day_resource,
monthly_rate = 75,
allowance = 5,
overage_rate = 20,
)
SubscriptionDefault.objects.create(
package = self.pt5Package,
resource = Resource.objects.key_resource,
monthly_rate = 100,
allowance = 1,
overage_rate = 0,
)
SubscriptionDefault.objects.create(
package = self.pt5Package,
resource = Resource.objects.mail_resource,
monthly_rate = 35,
allowance = 1,
overage_rate = 0,
)
# Resident Package with a desk
self.residentPackage = MembershipPackage.objects.create(name="Resident")
SubscriptionDefault.objects.create(
package = self.residentPackage,
resource = Resource.objects.day_resource,
monthly_rate = 0,
allowance = 5,
overage_rate = 20,
)
SubscriptionDefault.objects.create(
package = self.residentPackage,
resource = Resource.objects.desk_resource,
monthly_rate = 475,
allowance = 1,
overage_rate = 0,
)
self.user1 = User.objects.create(username='member_one', first_name='Member', last_name='One')
self.profile1 = self.user1.profile
self.profile1.neighborhood = self.neighborhood1
self.profile1.valid_billing = True
self.profile1.save()
# Basic from 2/26/2008 to 6/25/2010
# Resident from 6/26/2010 to date
self.user1.membership.set_to_package(self.basicPackage, start_date=date(2008, 2, 26), end_date=date(2010, 6, 25))
self.user1.membership.set_to_package(self.residentPackage, start_date=date(2010, 6, 26))
self.user2 = User.objects.create(username='member_two', first_name='Member', last_name='Two')
self.profile2 = self.user2.profile
# PT-5 since 1/1/2009
self.user2.membership.set_to_package(self.pt5Package, start_date=date(2009, 1, 1))
self.user3 = User.objects.create(username='member_three', first_name='Member', last_name='Three')
self.profile3 = self.user3.profile
self.profile3.neighborhood = self.neighborhood1
self.profile3.save()
# No subscriptions
self.user4 = User.objects.create(username='member_four', first_name='Member', last_name='Four')
self.profile4 = self.user4.profile
self.profile4.neighborhood = self.neighborhood1
self.profile4.save()
# Was a PT-5 from 1/1/2009 to 1/1/2010
self.user4.membership.set_to_package(self.pt5Package, start_date=date(2009, 1, 1), end_date=date(2010, 1, 1))
self.user5 = User.objects.create(username='member_five', first_name='Member', last_name='Five')
self.profile5 = self.user5.profile
self.profile5.valid_billing = False
self.profile5.save()
# PT-5 from 1/1/2009, paid by User1
self.user5.membership.set_to_package(self.pt5Package, start_date=date(2009, 1, 1), paid_by=self.user1)
############################################################################
# Tests
############################################################################
def test_active_subscriptions(self):
# Resident membership has 2 resource subscriptions
self.assertEqual(2, self.user1.profile.active_subscriptions().count())
# Our PT-5 membership has 3 resource subscriptions
self.assertEqual(3, self.user2.profile.active_subscriptions().count())
# User3, and 4 have no subscriptions
self.assertEqual(0, self.user3.profile.active_subscriptions().count())
self.assertEqual(0, self.user4.profile.active_subscriptions().count())
def test_by_package(self):
self.assertTrue(self.user1 in User.helper.members_by_package(self.residentPackage))
self.assertFalse(self.user1 in User.helper.members_by_package(self.basicPackage))
self.assertTrue(self.user2 in User.helper.members_by_package(self.pt5Package))
self.assertFalse(self.user2 in User.helper.members_by_package(self.residentPackage))
def test_by_neighborhood(self):
self.assertTrue(self.user1 in User.helper.members_by_neighborhood(self.neighborhood1))
self.assertFalse(self.user2 in User.helper.members_by_neighborhood(self.neighborhood1))
self.assertFalse(self.user3 in User.helper.members_by_neighborhood(self.neighborhood1))
self.assertFalse(self.user4 in User.helper.members_by_neighborhood(self.neighborhood1))
self.assertTrue(self.user3 in User.helper.members_by_neighborhood(self.neighborhood1, active_only=False))
self.assertTrue(self.user4 in User.helper.members_by_neighborhood(self.neighborhood1, active_only=False))
def test_by_resource(self):
# User1 has a desk
self.assertTrue(self.user1 in User.helper.members_with_desks())
self.assertFalse(self.user1 in User.helper.members_with_keys())
# User2 has key and mail
self.assertTrue(self.user2 in User.helper.members_with_keys())
self.assertTrue(self.user2 in User.helper.members_with_mail())
self.assertFalse(self.user2 in User.helper.members_with_desks())
# User3 doesn't have any resources
self.assertFalse(self.user3 in User.helper.members_with_keys())
self.assertFalse(self.user3 in User.helper.members_with_mail())
self.assertFalse(self.user3 in User.helper.members_with_desks())
# User4 doesn't have any resources
self.assertFalse(self.user4 in User.helper.members_with_keys())
self.assertFalse(self.user4 in User.helper.members_with_mail())
self.assertFalse(self.user4 in User.helper.members_with_desks())
# User5 has key and mail
self.assertTrue(self.user5 in User.helper.members_with_keys())
self.assertTrue(self.user5 in User.helper.members_with_mail())
self.assertFalse(self.user5 in User.helper.members_with_desks())
def test_valid_billing(self):
# Member 1 has valid billing
self.assertTrue(self.user1.profile.valid_billing)
self.assertTrue(self.user1.profile.has_valid_billing())
# Member 2 does not have valid billing
self.assertFalse(self.user2.profile.valid_billing)
self.assertFalse(self.user2.profile.has_valid_billing())
# Member 5 does not have valid billing but is a guest of Member 1
self.assertFalse(self.user5.profile.valid_billing)
self.assertTrue(self.user5.profile.has_valid_billing())
def test_is_guest(self):
self.assertFalse(self.user1.profile.is_guest())
self.assertFalse(self.user2.profile.is_guest())
self.assertFalse(self.user3.profile.is_guest())
self.assertFalse(self.user4.profile.is_guest())
self.assertTrue(self.user5.profile.is_guest())
def test_guests(self):
guests = self.user1.profile.guests()
self.assertEqual(1, len(guests))
self.assertTrue(self.user5 in guests)
self.assertEqual(0, len(self.user2.profile.guests()))
self.assertEqual(0, len(self.user3.profile.guests()))
self.assertEqual(0, len(self.user4.profile.guests()))
self.assertEqual(0, len(self.user5.profile.guests()))
def test_hosts(self):
hosts = self.user5.profile.hosts()
self.assertEqual(1, len(hosts))
self.assertTrue(self.user1 in hosts)
self.assertEqual(0, len(self.user1.profile.hosts()))
self.assertEqual(0, len(self.user2.profile.hosts()))
self.assertEqual(0, len(self.user3.profile.hosts()))
self.assertEqual(0, len(self.user4.profile.hosts()))
def test_tags(self):
self.user1.profile.tags.add(u'coworking', u'books', u'beer')
self.user2.profile.tags.add(u'beer', u'cars', u'women')
self.user3.profile.tags.add(u'knitting', u'beer', u'travel')
self.assertTrue(self.user1.profile in UserProfile.objects.filter(tags__name__in=[u'beer']))
self.assertTrue(self.user2.profile in UserProfile.objects.filter(tags__name__in=[u'beer']))
self.assertTrue(self.user3.profile in UserProfile.objects.filter(tags__name__in=[u'beer']))
self.assertFalse(self.user1.profile in UserProfile.objects.filter(tags__name__in=[u'knitting']))
self.assertFalse(self.user3.profile in UserProfile.objects.filter(tags__name__in=[u'books']))
# Copyright 2017 Office Nomads LLC (http://www.officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| [
"[email protected]"
] | |
fe81ad7960d5b1d25e0585a0663772e6856dbfc5 | 9eb08685de453d8c099015adcc3a2ff29041fdf3 | /examples/app/dijkstra/cthread_write_node_visited.py | fe4c54310af1366a2bdd6f5e3fcd00737a2d566d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Python3pkg/PyCoRAM | 938b6b4308e7ef04ec01f6c2e08c7eae7d0f5d3f | cd546ffe6cc6fbdabd49ba204a098b8feb6e7e97 | refs/heads/master | 2021-01-21T17:46:35.469599 | 2017-05-21T20:40:41 | 2017-05-21T20:40:41 | 91,986,108 | 0 | 0 | null | 2017-05-21T20:40:28 | 2017-05-21T20:40:28 | null | UTF-8 | Python | false | false | 339 | py | COMM_SIZE = 2 ** 4
STREAM_SIZE= 2 ** 10
DATA_BITWIDTH = 32
outstream = CoramOutStream(idx=0, datawidth=DATA_BITWIDTH, size=STREAM_SIZE)
channel = CoramChannel(idx=0, datawidth=DATA_BITWIDTH, size=COMM_SIZE)
def write_node_visited():
addr = channel.read()
outstream.read_nonblocking(addr, 1)
while True:
write_node_visited()
| [
"[email protected]"
] | |
43246448601d4be7c5377c2e63e5de5eb06bdf8a | e296f0f3d7db598aba5658de3ff8c767634e533e | /zoo/migrations/092_zoo_animals_models.py | ef6a523b495b98b75535c3c8c4944d6553ec77e5 | [] | no_license | devfort/wildlifenearyou | b2ac05070aa6face60156d6e7c85f98f00013c25 | 8e618aea90bbcedc45a4e30199e31880ea9e6dca | refs/heads/master | 2021-01-13T01:25:29.467549 | 2010-06-10T06:37:43 | 2010-06-10T06:37:43 | 7,874,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from django.conf import settings
if settings.DATABASE_ENGINE == 'mysql':
from dmigrations.mysql import migrations as m
elif settings.DATABASE_ENGINE == 'sqlite3':
from dmigrations.sqlite3 import migrations as m
import datetime
migration = m.Migration(sql_up=["""
DROP TABLE `animals_superspecies`;
""", """
CREATE TABLE `animals_superspecies` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`common_name` varchar(500) NOT NULL,
`description` longtext NOT NULL,
`slug` varchar(255) NOT NULL UNIQUE,
`species_group_id` integer NULL,
`type` varchar(10) NOT NULL,
`latin_name` varchar(500) NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8
;
"""], sql_down=["""
DROP TABLE `animals_superspecies`;
""", """
CREATE TABLE `animals_superspecies` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`common_name` varchar(500) NOT NULL,
`description` longtext NOT NULL,
`slug` varchar(255) NOT NULL UNIQUE,
`species_group_id` integer NULL,
`type` varchar(10) NOT NULL,
`latin_name` varchar(500) NULL,
) ENGINE=InnoDB DEFAULT CHARSET=utf8
;
"""])
# Dirty fake this - the sql_down should recerate the table as it used to be
# but doesn't. | [
"[email protected]"
] | |
3e47ecd125a7c7fc3a15c5fe0529ae8fdf94115c | de08fd5306c61a797c24bda927bd809acd7a22e7 | /socket_message.py | 5ebd79abf7863b7eb311d5e9bc187ba709ce01df | [] | no_license | benjsonzhang/shield | 847175c4cb0746d6c047e08e529973b6748eefd1 | c737d8cdd231139fdda94675a4f68dfc671fdf4e | refs/heads/master | 2023-06-06T23:52:08.696106 | 2021-07-02T08:24:56 | 2021-07-02T08:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | # coding=utf-8
import requests
import json
import time
import socket
import base64
import socks
base_url = "http://127.0.0.1:18080"
# proxies = {"http": "127.0.0.1:8888", "https": "127.0.0.1:8888"}
proxies = None
class XhsSocketClient:
def __init__(self):
self.client = None
def connect(self):
if self.client is not None:
return
HOST = 'apppush.xiaohongshu.com'
PORT = 5333
socket.socket = socks.socksocket
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((HOST, PORT))
def send(self, based64ed):
if self.client is None:
self.connect()
content = base64.b64decode(based64ed)
user_input = content
print content
print "================= sent =========================="
self.client.sendall(user_input)
def close(self):
if self.client is not None:
self.client.close()
print "================= close =========================="
def __del__(self):
self.close()
def test():
url = base_url + "/s/login"
params = {
"uid": "60ddb0d10000000001015f01",
"sid": "session.1594515706332388740313",
"deviceId": "353CE2F-0131-474E-A093-DF39D12E4515",
"fingerprint": "202006261454019d1b1a0db8172b59cbe25925c1c3900001ab4b27b14c4883",
}
text = requests.get(url, params=params, proxies=proxies).json()
print json.dumps(text, ensure_ascii=False)
client = XhsSocketClient()
client.connect()
client.send(text.get("data").get("body"))
url = base_url + "/s/send"
params = {
"receiver": "9f775f5f3cf7000000000100",
"sender": "60ddb0d10000000001015f01",
"content": "hi",
}
text = requests.get(url, params=params, proxies=proxies).json()
client.send(text.get("data").get("body"))
print json.dumps(text, ensure_ascii=False)
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
615841aab6c9a74f46e393e9f7ee1893d81e286d | f7b6d64aafdd3d711c0c5d4f9d6a2565944e7dc6 | /magnifico_ranks/forms.py | 59884f13011b7713376529093499824de736913c | [] | no_license | Robert-Moringa/magnifico_ranks | 488d94c8c20f6f333b0e7e831f1f0feca508f331 | 06ae6dfc2b7c5ed4068c04010223d09a8ebd43a5 | refs/heads/master | 2023-08-04T18:31:04.134837 | 2021-09-23T12:03:29 | 2021-09-23T12:03:29 | 407,243,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from django import forms
from .models import Profile,Project, Review
class EditProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user','project']
class AddProjectForm(forms.ModelForm):
class Meta:
model=Project
exclude=['user']
class AddReviewForm(forms.ModelForm):
class Meta:
model=Review
fields=['design_rating','usability_rating', 'content_rating', 'comment'] | [
"[email protected]"
] | |
3c61ed8f1fb9adeb440a74e760b56771db2fe28a | 71e43068e82c91acbb3849169d1723f1375ac27f | /talon_one/models/campaign_entity.py | d18644a5c7e9f97ee933c25ece57b7a788d886dd | [
"MIT"
] | permissive | talon-one/talon_one.py | aa08a1dbddd8ea324846ae022e43d441c57028f6 | 917dffb010e3d3e2f841be9cccba5bba1ea6c5c3 | refs/heads/master | 2023-05-11T18:50:00.041890 | 2023-05-03T20:17:39 | 2023-05-03T20:17:39 | 79,575,913 | 1 | 7 | MIT | 2023-05-03T15:10:14 | 2017-01-20T16:29:46 | Python | UTF-8 | Python | false | false | 4,313 | py | # coding: utf-8
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document:
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class CampaignEntity(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'campaign_id': 'int'
}
attribute_map = {
'campaign_id': 'campaignId'
}
def __init__(self, campaign_id=None, local_vars_configuration=None): # noqa: E501
"""CampaignEntity - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._campaign_id = None
self.discriminator = None
self.campaign_id = campaign_id
@property
def campaign_id(self):
"""Gets the campaign_id of this CampaignEntity. # noqa: E501
The ID of the campaign that owns this entity. # noqa: E501
:return: The campaign_id of this CampaignEntity. # noqa: E501
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this CampaignEntity.
The ID of the campaign that owns this entity. # noqa: E501
:param campaign_id: The campaign_id of this CampaignEntity. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and campaign_id is None: # noqa: E501
raise ValueError("Invalid value for `campaign_id`, must not be `None`") # noqa: E501
self._campaign_id = campaign_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignEntity):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CampaignEntity):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
96e61e2cdbe7a1d8025135c55c7b6a562a6712ce | be39b38dddcdc98cab0c29189f5fb74f8c709f16 | /fast-news/tinyimg.py | 0518aa353865dd9b19099ab125ddf986c49e3998 | [] | no_license | nate-parrott/fast-news | 80bd371a56188475a0bb647fc630ee54f388bf96 | 45ed735744bd138e65e643fd1713fd8e2d107b1e | refs/heads/master | 2020-04-06T05:53:44.724331 | 2017-01-02T09:05:44 | 2017-01-02T09:05:44 | 50,979,865 | 0 | 1 | null | 2016-09-19T05:03:22 | 2016-02-03T06:41:10 | Python | UTF-8 | Python | false | false | 223 | py | from PIL import Image
def tinyimg(img):
size = list(img.size)
img = img.convert('RGB')
img = img.resize((2,2), Image.ANTIALIAS)
return {"size": [2,2], "pixels": map(list, img.getdata()), "real_size": size}
| [
"[email protected]"
] | |
ac19473c96d750e019a99cc9116eeac478f5a6f6 | c2c8915d745411a0268ee5ce18d8bf7532a09e1a | /stix-1.1.1.0/stix/common/identity.py | fb2cab262ee10fd12f1a630979928c1e562cc9e3 | [
"BSD-3-Clause"
] | permissive | asealey/crits_dependencies | 581d44e77f297af7edb78d08f0bf11ad6712b3ab | a8049c214c4570188f6101cedbacf669168f5e52 | refs/heads/master | 2021-01-17T11:50:10.020346 | 2014-12-28T06:53:01 | 2014-12-28T06:53:01 | 28,555,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,659 | py | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from __future__ import absolute_import
import stix
import stix.bindings.stix_common as common_binding
import stix.bindings.extensions.identity.ciq_identity_3_0 as ciq_identity_binding
import stix.utils
# import of RelatedIdentity is below
class Identity(stix.Entity):
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
def __init__(self, id_=None, idref=None, name=None, related_identities=None):
self.id_ = id_ or stix.utils.create_id("Identity")
self.idref = idref
self.name = name
self.related_identities = RelatedIdentities()
@property
def id_(self):
return self._id
@id_.setter
def id_(self, value):
if not value:
self._id = None
else:
self._id = value
self.idref = None
@property
def idref(self):
return self._idref
@idref.setter
def idref(self, value):
if not value:
self._idref = None
else:
self._idref = value
self.id_ = None # unset id_ if idref is present
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value if value else None
def to_obj(self, return_obj=None):
if not return_obj:
return_obj = self._binding.IdentityType()
return_obj.set_id(self.id_)
return_obj.set_idref(self.idref)
if self.name:
return_obj.set_Name(self.name)
if self.related_identities:
return_obj.set_Related_Identities(self.related_identities.to_obj())
return return_obj
@staticmethod
def lookup_class(xsi_type):
if not xsi_type:
raise ValueError("xsi:type is required")
for (k, v) in _EXTENSION_MAP.iteritems():
# TODO: for now we ignore the prefix and just check for
# a partial match
if xsi_type in k:
return v
raise ValueError("Unregistered xsi:type %s" % xsi_type)
@classmethod
def from_obj(cls, obj, return_obj=None):
import stix.extensions.identity.ciq_identity_3_0
if not obj:
return None
if not return_obj:
try:
klass = Identity.lookup_class(obj.xml_type)
return_obj = klass.from_obj(obj)
except AttributeError:
return_obj = Identity.from_obj(obj, cls())
else:
return_obj.id_ = obj.get_id()
return_obj.idref = obj.get_idref()
return_obj.name = obj.get_Name()
return_obj.related_identities = RelatedIdentities.from_obj(obj.get_Related_Identities())
return return_obj
def to_dict(self):
d = {}
if self.name:
d['name'] = self.name
if self.id_:
d['id'] = self.id_
if self.idref:
d['idref'] = self.idref
if self.related_identities:
d['related_identities'] = self.related_identities.to_dict()
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
import stix.extensions.identity.ciq_identity_3_0
if not dict_repr:
return None
if not return_obj:
xsi_type = dict_repr.get('xsi:type')
if xsi_type:
klass = Identity.lookup_class(dict_repr.get('xsi:type'))
return_obj = klass.from_dict(dict_repr)
else:
return_obj = Identity.from_dict(dict_repr, cls())
else:
return_obj.name = dict_repr.get('name')
return_obj.id_ = dict_repr.get('id')
return_obj.idref = dict_repr.get('idref')
return_obj.related_identities = RelatedIdentities.from_dict(dict_repr.get('related_identities'))
return return_obj
# We can't import RelatedIdentity until we have defined the Identity class.
from stix.common.related import RelatedIdentity
class RelatedIdentities(stix.EntityList):
_namespace = 'http://stix.mitre.org/common-1'
_binding = common_binding
_binding_class = common_binding.RelatedIdentitiesType
_binding_var = "Related_Identity"
_contained_type = RelatedIdentity
_inner_name = "identities"
_EXTENSION_MAP = {}
def add_extension(cls):
_EXTENSION_MAP[cls._XSI_TYPE] = cls
| [
"[email protected]"
] | |
ac6f4dc12017e63d1d823e917c619ad903fa43c1 | 6406c60d42a243e3566cb0864c14453b686809a6 | /plugins/geoip/geoip.py | bddb6c1cf3d51e178318ee5515199df9d8c90934 | [
"Apache-2.0"
] | permissive | Srungaram/alerta-contrib | 79d29d6aa7f6b2bc0e102ef3a4b52a93b9029317 | 3a507dee5cd20fac22676c70da0b8c7364562b3d | refs/heads/master | 2021-01-18T13:53:12.550536 | 2016-01-12T01:17:54 | 2016-01-12T01:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py |
import os
import requests
from alerta.app import app
from alerta.plugins import PluginBase
LOG = app.logger
GEOIP_URL = os.environ.get('GEOIP_URL') or app.config.get('GEOIP_URL', 'http://freegeoip.net/json')
class GeoLocation(PluginBase):
def pre_receive(self, alert):
if 'ip' in alert.attributes:
url = '%s/%s' % (GEOIP_URL, alert.attributes['ip'])
else:
raise RuntimeWarning("IP address must be included as an alert attribute.")
r = requests.get(url, headers={'Content-type': 'application/json'}, timeout=2)
try:
alert.attributes.update(r.json())
except Exception as e:
raise RuntimeError("GeoIP lookup failed: %s" % str(e))
return alert
def post_receive(self, alert):
pass
| [
"[email protected]"
] | |
b6b46f3b5c490d5ed428d489edc8cf451ffe1eaf | 10c26e25f7da2289d50b1138b7da48bf9a02d42f | /Oj/problemset/migrations/0007_problem_constraints.py | 900aa1cc6aea2d37b2658f990e3bb3290cf41cb8 | [] | no_license | ParitoshAggarwal/OJ | e1392a02dd95d42b4d72ba69b891db9df5e406ad | 1a4acb5e620b0575d744fd8e4c13148062d1670c | refs/heads/master | 2022-10-19T21:18:02.512008 | 2017-12-27T06:53:46 | 2017-12-27T06:53:46 | 97,516,099 | 0 | 1 | null | 2022-10-13T00:05:44 | 2017-07-17T19:50:06 | JavaScript | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-06 07:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('problemset', '0006_problem_created_at'),
]
operations = [
migrations.AddField(
model_name='problem',
name='constraints',
field=models.CharField(default='null', max_length=100),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
daf4d91f0be35c88b68c33c6b8c3a99f1f647dcb | 327ec5f11dff7a034e32735fb9bfb3ca4d82569d | /examples_keras/attention_lstm.py | 9752b36a591f1558a41b997f0592958716504f2b | [] | no_license | liyi19950329/attention-mechanism | 2a75894b9221bf6a887a81f3a507df5210bedf53 | 551aa72ac503a56354cd47a795874f49ffb6d097 | refs/heads/master | 2020-03-26T07:24:55.648061 | 2018-05-30T13:35:11 | 2018-05-30T13:35:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | """"""
import numpy as np
from data_helper import gen_time_data
from config import config_lstm as config
from keras.models import Model
from keras.layers import Input, Dense, LSTM
from keras.layers import Flatten
from attention.attention_keras import attention2d
np.random.seed(config.seed)
def build_model():
""""""
inputs = Input(shape=(config.time_steps, config.input_dim))
lstm_out = LSTM(config.lstm_units, return_sequences=True)(inputs)
attn = attention2d(lstm_out)
attn = Flatten()(attn)
output = Dense(1, activation='sigmoid')(attn)
model = Model(inputs=inputs, outputs=output)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
return model
if __name__ == '__main__':
""""""
x, y = gen_time_data()
model = build_model()
model.summary()
model.fit(x, y,
epochs=config.epochs,
batch_size=config.batch_size,
validation_split=0.8)
| [
"[email protected]"
] | |
a8403682560101da15183e35985b03afdaf00907 | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_quantizer/xnnc4xir/xnnc/utils/pytorch_graph.py | a552cfad8c1e16d3e32d3d50290b24a965903be9 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 18,017 | py | """
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from collections import OrderedDict
from typing import Any, Dict, List, NoReturn, Optional
import numpy as np
import torch
from torch.onnx.utils import OperatorExportTypes
import graphviz
methods_OP = [
"attributeNames",
"hasMultipleOutputs",
"hasUses",
"inputs",
"kind",
"outputs",
"outputsSize",
"scopeName",
]
# Some additional methods to explure for methods_IO are
#
# 'unique' (type int)
# 'type' (type <Tensor<class 'torch._C.Type'>>)
#
# But the below are sufficient for now.
methods_IO = ["node", "offset", "debugName", "unique", "type"]
class NodePy(object):
def __init__(self, node_cpp):
self.inputs: List[str] = []
self.outputs: List[str] = []
self.kind: str = ""
self.scope: str = ""
def __repr__(self):
repr = []
repr.append(str(type(self)))
for m in dir(self):
if "__" not in m:
repr.append(
m + ": " + str(getattr(self, m)) + str(type(getattr(self, m)))
)
return "\n".join(repr) + "\n\n"
class NodePyIO(NodePy):
def __init__(self, node_cpp, input_or_output=None):
super(NodePyIO, self).__init__(node_cpp)
self.offset: int = 0
self.debug_name: str = ""
self.id: int = -1
self.input_or_output: str = ""
self.tensor_shape: List[int] = None
self.tensor: np.ndarray = None
if node_cpp is not None:
# offset
self.offset = node_cpp.offset() # getattr(node_cpp, 'offset')()
# debugName
self.debug_name = node_cpp.debugName() # getattr(node_cpp, 'debugName')()
# unique
self.id = node_cpp.unique() # getattr(node_cpp, 'unique')()
# kind
self.kind = "Parameter"
if input_or_output:
self.input_or_output = input_or_output
self.kind = "IO Node"
# type
try:
tensor_size = node_cpp.type().sizes()
except RuntimeError:
tensor_size = [
1,
] # fail when constant model is used.
self.tensor_shape = tensor_size
class NodePyOP(NodePy):
def __init__(self, node_cpp):
super(NodePyOP, self).__init__(node_cpp)
self.attributes: Dict[str, Any] = {}
# self.attributes = ""
self.has_multiple_outputs: bool = False
self.has_users: bool = False
self.outputs_size: int = 0
self.inputs_tensor_shape: List[List[int]] = []
self.outputs_tensor_shape: List[List[int]] = []
self.alias: str = ""
self.op_name: str = ""
self.op_type: str = ""
if node_cpp is not None:
# attributeNames
# Replace single quote which causes strange behavior in TensorBoard
for attr_name in node_cpp.attributeNames():
assert (
attr_name not in self.attributes
), f"[ERROR] duplicated attribute name: {attr_name}"
self.attributes[attr_name] = node_cpp[attr_name]
# self.attributes = str(
# {k: node_cpp[k] for k in node_cpp.attributeNames()}
# ).replace("'", " ")
# hasMultipleOutputs
self.has_multiple_outputs = node_cpp.hasMultipleOutputs()
# hasUses
self.has_users = node_cpp.hasUses()
# kind
self.kind = node_cpp.kind()
# scopeName
self.scope = node_cpp.scopeName()
# outputsSize
self.outputs_size = node_cpp.outputsSize()
for m in ["inputs", "outputs"]:
list_of_node = list(getattr(node_cpp, m)())
io_debug_names = []
io_tensor_shapes = []
for n in list_of_node:
io_debug_names.append(n.debugName())
if n.type().kind() == "TensorType":
io_tensor_shapes.append(n.type().sizes())
else:
io_tensor_shapes.append(None)
setattr(self, m, io_debug_names)
setattr(self, m + "_tensor_shape", io_tensor_shapes)
class GraphPy(object):
def __init__(self, root_scope_name="default"):
self.nodes_op = []
self.nodes_io = OrderedDict()
self.debug_name_to_scoped_name = {}
self.shallowest_scope_name = root_scope_name
self.scope_name_appeared = []
self.alias_to_scope = {}
def append(self, x):
if isinstance(x, NodePyIO): # append NodePyIO
self.nodes_io[x.debug_name] = x
elif isinstance(x, NodePyOP): # append NodePyOP
self.nodes_op.append(x)
# deal with outputs
for output_debug_name, output_shape in zip(
x.outputs, x.outputs_tensor_shape
):
# self.scope_name_appeared.append(x.scope)
node_io = NodePyIO(None)
node_io.debug_name = output_debug_name
node_io.scope = x.scope
node_io.kind = x.kind
node_io.inputs = x.inputs
node_io.outputs = x.outputs
self.nodes_io[output_debug_name] = node_io
else:
raise TypeError(f"[ERROR] Unsupported node type: {x.__class__.__name__}")
def printall(self):
print("all nodes")
for node in self.nodes_op:
print(node)
for debug_name in self.nodes_io:
print(self.nodes_io[debug_name])
def find_common_root(self):
for fullscope in self.scope_name_appeared:
if fullscope:
self.shallowest_scope_name = fullscope.split("/")[0]
def populate_namespace_from_OP_to_IO(self):
for node in self.nodes_op:
for inode_debug_name in node.inputs:
self.debug_name_to_scoped_name[inode_debug_name] = (
node.scope + "/" + inode_debug_name
)
for debug_name, node in self.nodes_io.items():
if hasattr(node, "input_or_output") and node.input_or_output in [
"input",
"output",
]: # input or output nodes
if debug_name in self.debug_name_to_scoped_name:
continue
self.debug_name_to_scoped_name[debug_name] = (
node.input_or_output + "/" + node.debug_name
)
elif hasattr(node, "scope"):
# self.debug_name_to_scoped_name[debug_name] = node.scope + '/' + node.uniqueName
if node.scope == "" and self.shallowest_scope_name:
self.debug_name_to_scoped_name[debug_name] = (
self.shallowest_scope_name + "/" + debug_name
)
else:
self.debug_name_to_scoped_name[debug_name] = (
node.scope + "/" + debug_name
)
# replace debug name in 'inputs' with scope name
for debug_name, node in self.nodes_io.items():
if debug_name in self.debug_name_to_scoped_name:
node.scope = self.debug_name_to_scoped_name[debug_name]
# def build_alias_to_scope_dict(self):
# assert self.nodes_op is not None and len(self.nodes_op) > 0
# for node_op in self.nodes_op:
# assert (
# node_op.alias not in self.alias_to_scope
# ), f"[ERROR] Duplicated alias: {node_op.alias}"
# self.alias_to_scope[node_op.alias] = node_op.scope
def parse_model(
model: torch.nn.Module,
inputs_shape: List[List[int]],
ignore_useless_nodes: bool = True,
dump_image: bool = False,
) -> GraphPy:
assert model is not None, "'model' should not be None."
assert inputs_shape is not None, "'input_shape' should not be None."
dummy_inputs = []
for shape in inputs_shape:
dummy_inputs.append(torch.randn(shape))
model.eval()
trace, _ = torch.jit.get_trace_graph(model, tuple(dummy_inputs))
optimize_trace(trace)
graph = trace.graph()
# number of input nodes
n_inputs = len(dummy_inputs)
state_dict = torch.jit._unique_state_dict(model)
# state_names: List[str] = list(state_dict.keys())
state_values = list(state_dict.values())
graph_py = GraphPy(root_scope_name=model.__class__.__name__)
# NodePyIO for inputs and parameters
for i, node in enumerate(graph.inputs()):
if ignore_useless_nodes:
if (
len(node.uses()) == 0
): # number of user of the node (= number of outputs/ fanout)
continue
if i < n_inputs: # the first n nodes are input nodes
node_io = NodePyIO(node, "input")
else:
node_io = NodePyIO(node)
node_io.tensor = state_values[i - n_inputs]
graph_py.append(node_io) # parameter
# NodePyOP for cases except for iputs, parameters, and outputs
for node in graph.nodes():
graph_py.append(NodePyOP(node))
# NodePyIO for outputs
for node in graph.outputs(): # must place last.
graph_py.append(NodePyIO(node, "output"))
graph_py.find_common_root()
graph_py.populate_namespace_from_OP_to_IO()
# update the 'outputs' fields
for debug_name, node in graph_py.nodes_io.items():
if len(node.inputs) > 0:
for iname in node.inputs:
inode = graph_py.nodes_io.get(iname)
assert inode is not None, "[ERROR] Not found input node: name {iname}"
inode.outputs.append(debug_name)
def extract_alias(scope: str) -> str:
if "[" not in scope:
return ""
res = []
start = -1
for i, ch in enumerate(scope):
if ch == "[":
start = i + 1
elif ch == "]":
res.append(scope[start:i])
start = -1
return ".".join(res)
# * extract inputs and outputs
input_dict: Dict[str, NodePyIO] = {}
output_dict: Dict[str, NodePyIO] = {}
keys: List[str] = graph_py.nodes_io.keys()
for key in keys:
node = graph_py.nodes_io[key]
if node.input_or_output == "input":
input_dict[key] = node
elif node.input_or_output == "output":
output_dict[key] = node
# * extract ops
op_dict = {}
for node in graph_py.nodes_op:
node.alias = node.outputs.pop(0)
# set the 'op_type' field
node.op_type = node.kind.split("::")[-1].replace("_", "")
# set the 'op_name' field
node.op_name = extract_alias(node.scope)
# add current node to op_dict
assert node.alias not in op_dict
op_dict[node.alias] = node
# * update op type of nodes_op in graph_py
revise_op_type(graph_py.nodes_op)
# * update scope name
revise_scope(graph_py.nodes_op)
# * update the 'outputs' field of the input nodes
for _, inode in input_dict.items():
inode.outputs = [x for x in inode.outputs if x in op_dict]
# * update the 'outputs' field of each node_op
for node in graph_py.nodes_op:
if len(node.inputs) > 0:
for iname in node.inputs:
inode = op_dict.get(iname)
if inode is not None:
if node.alias not in inode.outputs:
inode.outputs.append(node.alias)
else:
if iname not in graph_py.nodes_io:
raise KeyError(f"input name: {iname}")
# ! debug: set op name
update_op_name(graph_py, op_dict)
# * dump graph
if dump_image:
dump(op_dict)
return graph_py
def optimize_trace(trace):
trace.set_graph(optimize_graph(trace.graph()))
def optimize_graph(graph):
# we record some ops like ones/zeros
# into a trace where we previously recorded constants
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
torch._C._jit_pass_constant_propagation(graph)
torch.onnx.utils._split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
# torch._C._jit_pass_canonicalize_ops(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_lint(graph)
# onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0
torch._C._jit_pass_prepare_division_for_onnx(graph)
# onnx only supports tensors, so we turn all out number types into tensors
torch._C._jit_pass_erase_number_types(graph)
# onnx does not support tuples, so try to remove them
torch._C._jit_pass_lower_all_tuples(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_fixup_onnx_loops(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
return graph
def dump(op_dict, filename="test") -> NoReturn:
# create a DG instance with svg format
graph = graphviz.Digraph(format="svg")
# create nodes for graph
for op_name, node in op_dict.items():
graph.node(op_name, label=node.op_type)
# create edges for graph
for op_name, node in op_dict.items():
if len(node.outputs) > 0:
for cname in node.outputs:
graph.edge(op_name, cname)
# render and save the graph
graph.render(filename, directory=None, view=False, cleanup=False)
def revise_op_type(nodes_op) -> NoReturn:
assert nodes_op is not None, "'nodes_op' should not be None."
for node in nodes_op:
assert node.op_type is not None and len(node.op_type) > 0
if node.op_type == "PythonOp" and node.kind.startswith("prim::"):
if "PythonOp" in node.kind:
# extract class name from the scope field and use it as the op type
class_name = node.scope.split("/")[-1]
if "[" in class_name:
class_name = class_name.split("[")[0]
node.op_type = class_name.lower()
def revise_scope(nodes_op) -> NoReturn:
assert nodes_op is not None, "'nodes_op' should not be None."
seq_counter = 1
blk_counter = 0
for node_op in nodes_op:
scope_name = node_op.scope
if "DetNet/Sequential" in scope_name:
names = [x for x in scope_name.split("/")]
assert "Quant_BasicBlock" in names[2]
start = names[2].index("[") + 1
end = names[2].index("]")
assert end > start
curr_blk_counter = int(names[2][start:end])
if blk_counter == curr_blk_counter:
names[1] = names[1] + "[layer" + str(seq_counter) + "]"
elif blk_counter + 1 == curr_blk_counter:
blk_counter = curr_blk_counter
names[1] = names[1] + "[layer" + str(seq_counter) + "]"
elif blk_counter > curr_blk_counter:
blk_counter = curr_blk_counter
seq_counter += 1
names[1] = names[1] + "[layer" + str(seq_counter) + "]"
else:
raise ValueError(
f"[ERROR] blk_counter: {blk_counter}, curr_blk_counter: {curr_blk_counter}"
)
node_op.scope = "/".join(names)
def update_op_name(graph_py, op_dict):
assert graph_py is not None, "'graph_py' should not be None."
assert op_dict is not None, "'op_dict' should not be None."
# update op name
for node in graph_py.nodes_op:
node.op_name = node.scope + "/" + node.alias
# update inputs and outputs of node_op
for node in graph_py.nodes_op:
if node.inputs is not None and len(node.inputs) > 0:
for i, iname in enumerate(node.inputs):
if iname in op_dict:
inode = op_dict.get(iname)
# update iname
node.inputs[i] = inode.scope + "/" + inode.alias
if node.outputs is not None and len(node.outputs) > 0:
for i, oname in enumerate(node.outputs):
if oname in op_dict:
onode = op_dict.get(oname)
# update oname
node.outputs[i] = onode.scope + "/" + onode.alias
# update inputs and outputs of node_io
for _, node_io in graph_py.nodes_io.items():
# update inputs
if node_io.inputs is not None and len(node_io.inputs) > 0:
for i, iname in enumerate(node_io.inputs):
if iname in op_dict:
inode = op_dict.get(iname)
# update iname
node_io.inputs[i] = inode.op_name
# update outputs
if node_io.outputs is not None and len(node_io.outputs) > 0:
for i, oname in enumerate(node_io.outputs):
if oname in op_dict:
onode = op_dict.get(oname)
# update oname
node_io.outputs[i] = onode.op_name
| [
"[email protected]"
] | |
d6da81bb8230bb0d23e43c83f2b8f0d1d236fb6e | 1511782b2cc3dcf1f7e058e5046ec67a5561ba51 | /2020/0820/abc048_b.py | 9ff929bc2dfcc189523e62da80581147dc6590ec | [] | no_license | keiouok/atcoder | 7d8a053b0cf5b42e71e265450121d1ad686fee6d | 9af301c6d63b0c2db60ac8af5bbe1431e14bb289 | refs/heads/master | 2021-09-07T11:48:55.953252 | 2021-07-31T15:29:50 | 2021-07-31T15:29:50 | 186,214,079 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
from bisect import bisect, bisect_left, bisect_right
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
a, b, x = MAP()
c = b // x
d = (a - 1) // x
print(c - d) | [
"[email protected]"
] | |
34e2f783a8d885369f4c0a834d7a310488fc97eb | 7a1243f229dd1ff671b26d5035c39219c9fa9586 | /785A - Anton and Polyhedrons.py | e357a1510c8468cfdbb6915ef3df63565885f072 | [] | no_license | henseljahja/code-forces | ce4063f30754bdee0e4d6ebc58b55f0874bf2cf9 | 1ca196636073331507b9bf48cb78cff625f44def | refs/heads/main | 2023-03-21T10:00:39.986314 | 2021-03-08T15:45:22 | 2021-03-08T15:45:22 | 339,361,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | n = int(input())
count = 0
for i in range(n):
s = input()
if s == "Tetrahedron":
count+=4
elif s == "Cube":
count+=6
elif s == "Octahedron":
count+=8
elif s == "Dodecahedron":
count+=12
elif s == "Icosahedron":
count+=20
print(count) | [
"[email protected]"
] | |
431ae555a024847331eb19fa7fc1d105bbf339cf | 046df94b4f437b2e30b80d24193fcd5380ee7b54 | /finger_exercise/3some_simple_numerical_programs/root_and_pwr(chapter3.1).py | c6a14b81e9766eaa106309a2f2f514a88f048ee1 | [] | no_license | LordBao666/MITLecture6.0001_Introduction_To_CS_Programing_In_Python | 570565a3a931269f47fe15fd83527567a24fc134 | e9fca10ad0226c8620ae36d063c2bc49da114ca4 | refs/heads/master | 2023-04-02T10:40:48.564479 | 2021-04-06T15:19:47 | 2021-04-06T15:19:47 | 344,118,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,107 | py | """
@Author : Lord_Bao
@Date : 2021/3/4
"""
"""
Finger exercise: Write a program that asks the user to enter an integer and prints
two integers, root and pwr, such that 0 < pwr < 6 and root**pwr is equal to the integer
entered by the user. If no such pair of integers exists, it should print a mes-
sage to that effect.
我觉得pwr的范围应该是 1<pwr<6,毕竟任何数字的1次方都是本身,后面的代码也是基于此改变写的。
"""
"""
1.输入的数字可能是正数,也可能是负数,为了简便思考,先考虑正数。
"""
# ele = int(input("Enter a integer: "))
# root = 0
# pwr = 2
# is_found = False
# # 如果root的平方大于ele,显然root的3次方,4次方等等都会大于ele。而(root+1)的平方也会大于ele,依次类推。
# # 所以 循环的必要条件之一可以设置为 root **2<= ele
# while not is_found and root ** 2 <= ele:
# for pwr in range(2, 6):
# if root ** pwr > ele:
# break
# elif root ** pwr == ele:
# is_found = True
# break
#
# if not is_found:
# root += 1
#
# if not is_found:
# print("no such pair of integers exists")
# else:
# print("root :" + str(root) + " ,pwr :" + str(pwr))
"""
2.在1的基础上,考虑负数的情况。
如果是负数,那么不用想,它的pwr只能是奇数,也就是说for循环那里修改一下。
然后针对 负数写一个新的循环即可
"""
# ele = int(input("Enter a integer: "))
# root = 0
# pwr = 2
# is_found = False
#
# if ele >= 0:
# while not is_found and root ** 2 <= ele:
# for pwr in range(2, 6):
# if root ** pwr > ele:
# break
# elif root ** pwr == ele:
# is_found = True
# break
#
# if not is_found:
# root += 1
# else:
# while not is_found and root ** 3 >= ele:
# for pwr in range(3, 6, 2): # 与正数的不同
# if root ** pwr < ele:
# break
# elif root ** pwr == ele:
# # 找到合适的数字,设置不再继续循环
# is_found = True
# break
#
# if not is_found:
# root -= 1
# if not is_found:
# print("no such pair of integers exists")
# else:
# print("root :" + str(root) + " ,pwr :" + str(pwr))
"""
3. 2的代码比较复杂,考虑能不能整合一下。我们发现一点,其实差异就在负数上。
可以先将负数取绝对值,也就是当做正数处理,然后再做差异化处理。其实发现,还是挺麻烦的。
感觉可读性来上,还不如2.
"""
# ele = int(input("Enter a integer: "))
# root = 0
# pwr = 2
#
# is_fond = False
# guess_num = 0
# while not is_fond and root ** 2 <= abs(ele):
# for pwr in range(2, 6):
# guess_num += 1
# if root ** pwr > abs(ele):
# break
# elif root ** pwr == abs(ele):
# if ele >= 0:
# is_fond = True
# else:
# # -1 比较特殊,因为 -1 的 2次方的绝对值为1,如果按照elif来处理,那么将是一个错误。
# # 毕竟 -1的3次方的绝对值也为1
# if ele == -1:
# root = -root
# pwr = 3
# is_fond = True
# elif pwr % 2 != 0:
# root = -root
# is_fond = True
# """这里将做修改
# """
# break
#
# if not is_fond:
# root += 1
#
# print("guess num is " + str(guess_num))
# if not is_fond:
# print("no such pair of integers exists")
# else:
# print("root :" + str(root) + " ,pwr :" + str(pwr))
"""
4.3的部分代码需要优化一下,那就是-16 这种特殊情况,应该直接跳出循环。为了满足这种情况,应该设置一个
标志位 can_be_found. 这样可以减少猜测的次数。
"""
ele = int(input("Enter a integer: "))
root = 0
pwr = 2
is_fond = False
can_be_found = True
guess_num = 0
while can_be_found and not is_fond and root ** 2 <= abs(ele):
for pwr in range(2, 6):
guess_num += 1
if root ** pwr > abs(ele):
break
elif root ** pwr == abs(ele):
if ele >= 0:
is_fond = True
else:
# -1 比较特殊,因为 -1 的 2次方的绝对值为1,如果按照elif来处理,那么将是一个错误。
# 毕竟 -1的3次方的绝对值也为1
if ele == -1:
root = -root
pwr = 3
is_fond = True
elif pwr % 2 != 0:
root = -root
is_fond = True
else:
can_be_found = False
break
if not is_fond and can_be_found:
root += 1
print("guess num is " + str(guess_num))
if not is_fond:
print("no such pair of integers exists")
else:
print("root :" + str(root) + " ,pwr :" + str(pwr))
| [
"[email protected]"
] | |
e76b055b43caaad4b0d6d1a83662bdceba8a7781 | e875742da7480b3277d0f34606e55a95c009c966 | /sage/database/db_iterator.py | cb16aeb1e09745f2f73021d68945688529ee70f5 | [
"MIT"
] | permissive | sage-org/sage-engine | b10a621c25b938b21a33e8f6273299ab8798118a | 33b3c775f6932d0e61bcce2c763f2d63846dba40 | refs/heads/master | 2022-09-03T10:25:42.121293 | 2021-05-05T16:15:37 | 2021-05-05T16:15:37 | 128,745,071 | 34 | 16 | MIT | 2021-04-19T18:26:51 | 2018-04-09T09:11:10 | Python | UTF-8 | Python | false | false | 1,809 | py | # db_iterator.py
# Author: Thomas MINIER - MIT License 2017-2020
from abc import ABC, abstractmethod
from typing import Dict, Tuple
class DBIterator(ABC):
"""
A DBIterator follows the iterator protocol and evaluates a triple pattern against a RDF dataset.
Typically, a subclass of this iterator is returned by a call to DBConnector#search_pattern.
"""
def __init__(self, pattern: Dict[str, str]):
super(DBIterator, self).__init__()
self._pattern = pattern
@property
def subject(self) -> str:
return self._pattern["subject"]
@property
def predicate(self) -> str:
return self._pattern["predicate"]
@property
def object(self) -> str:
return self._pattern["object"]
def __iter__(self):
return self
def __next__(self):
return self.next()
@abstractmethod
def last_read(self) -> str:
"""Return the index ID of the last element read"""
pass
@abstractmethod
def next(self) -> Tuple[str, str, str]:
"""Return the next RDF triple or raise `StopIteration` if there are no more triples to scan"""
pass
@abstractmethod
def has_next(self) -> bool:
"""Return True if there is still results to read, and False otherwise"""
pass
class EmptyIterator(DBIterator):
"""An iterator that yields nothing and completes immediatly"""
def last_read(self) -> str:
"""Return the index ID of the last element read"""
return ''
def next(self) -> None:
"""Return the next solution mapping or raise `StopIteration` if there are no more solutions"""
return None
def has_next(self) -> bool:
"""Return True if there is still results to read, and False otherwise"""
return False
| [
"[email protected]"
] | |
aa3278469beb7aa0d87b269d60de80611431af8b | 65bc6de5088d989b24571213fb16ebc557f922b4 | /for_beginners/render.py | 3522b818c2a2f7351f64dfa6f9191e102dc2dc86 | [] | no_license | vpj/for_beginners | 76f71ab780194c47ecd5cd67c21359b5d22bebd2 | 45af7567c821ffe329d7db64390aaa391176d974 | refs/heads/master | 2020-03-09T01:44:44.353629 | 2019-02-23T05:42:01 | 2019-02-23T05:42:01 | 128,522,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | from IPython.core.display import display,HTML,Markdown
import random
import string
def diagram_tensor(shape):
dom_id = ''.join(random.choices(string.ascii_lowercase, k=10))
without_none = []
for s in shape:
if s is not None and str(s) != '?':
without_none.append(str(s))
js = 'main.renderTensor("%s", [%s]);' % (dom_id, ', '.join(without_none))
js = 'require(["main"], function(main) { ' + js + ' });'
display(HTML('<div id="%s"></div><script>%s</script>' % (dom_id, js)))
def latex(string):
display(Markdown("\\begin{align}\n%s\n\\end{align}" % string))
def init():
file = open("./js/main.js")
display(HTML('<script>' + file.read() + '</script>'))
file = open("./js/styles.css")
display(HTML('<style>' + file.read() + '</style>'))
| [
"[email protected]"
] | |
e23b16ab111fbaf9c461df611408f424da327b87 | 3b1053ea38fee9a59d335dd75bb6a6906d298594 | /tests/history/test_db.py | b89ddb4fc414fd676c723f8f83aff718c8588661 | [
"MIT"
] | permissive | tianshengsui/virtool | 8c59bb36c7e2924586be34fabc6b861e16691b7d | eb75637eb6ca9dcba647ad8acad5d316877dd55e | refs/heads/master | 2023-04-19T16:36:54.894894 | 2021-04-23T19:09:33 | 2021-04-23T19:09:33 | 295,793,679 | 0 | 0 | MIT | 2020-09-30T23:53:54 | 2020-09-15T16:55:59 | null | UTF-8 | Python | false | false | 4,959 | py | import datetime
from aiohttp.test_utils import make_mocked_coro
import pytest
import virtool.history.db
class TestAdd:
async def test(self, snapshot, dbi, static_time, test_otu_edit, test_change):
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
old, new = test_otu_edit
change = await virtool.history.db.add(
app,
"edit",
old,
new,
"Edited {}".format(new["name"]),
"test"
)
document = await dbi.history.find_one()
snapshot.assert_match(change, "change")
snapshot.assert_match(document, "document")
async def test_create(self, snapshot, dbi, static_time, test_otu_edit, test_change):
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
# There is no old document because this is a change document for a otu creation operation.
old = None
new, _ = test_otu_edit
description = "Created {}".format(new["name"])
change = await virtool.history.db.add(
app,
"create",
old,
new,
description,
"test"
)
document = await dbi.history.find_one()
snapshot.assert_match(change)
snapshot.assert_match(document)
async def test_remove(self, snapshot, dbi, static_time, test_otu_edit, test_change):
"""
Test that the addition of a change due to otu removal inserts the expected change document.
"""
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
# There is no new document because this is a change document for a otu removal operation.
new = None
old, _ = test_otu_edit
description = "Removed {}".format(old["name"])
change = await virtool.history.db.add(
app,
"remove",
old,
new,
description,
"test"
)
document = await dbi.history.find_one()
snapshot.assert_match(change)
snapshot.assert_match(document)
@pytest.mark.parametrize("file", [True, False])
async def test_get(file, mocker, snapshot, dbi):
await dbi.history.insert_one({
"_id": "baz.2",
"diff": "file" if file else {
"foo": "bar"
}
})
mocker.patch("virtool.history.utils.read_diff_file", make_mocked_coro(return_value="loaded"))
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
document = await virtool.history.db.get(app, "baz.2")
assert document == {
"id": "baz.2",
"diff": "loaded" if file else {
"foo": "bar"
}
}
@pytest.mark.parametrize("exists", [True, False])
async def test_get_most_recent_change(exists, snapshot, dbi, static_time):
"""
Test that the most recent change document is returned for the given ``otu_id``.
"""
# First change is 3 days before the second
delta = datetime.timedelta(3)
if exists:
await dbi.history.insert_many([
{
"_id": "6116cba1.1",
"description": "Description",
"method_name": "update",
"created_at": static_time.datetime - delta,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
},
"index": {
"id": "unbuilt"
}
},
{
"_id": "6116cba1.2",
"description": "Description number 2",
"method_name": "update",
"created_at": static_time.datetime,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 2
},
"index": {
"id": "unbuilt"
}
}
])
most_recent = await virtool.history.db.get_most_recent_change(dbi, "6116cba1")
snapshot.assert_match(most_recent)
@pytest.mark.parametrize("remove", [True, False])
async def test_patch_to_version(remove, snapshot, dbi, create_mock_history):
await create_mock_history(remove=remove)
app = {
"db": dbi
}
current, patched, reverted_change_ids = await virtool.history.db.patch_to_version(
app,
"6116cba1",
1
)
snapshot.assert_match(current)
snapshot.assert_match(patched)
snapshot.assert_match(reverted_change_ids)
| [
"[email protected]"
] | |
e705e73a92cfe656ad0214556898e7e4b23a554e | 3603f8f76ff81ea75bfc916888bdcfa55b7f12e4 | /alds/alds1_3_c_3.py | 8f430994eab3e0fa497613b4711b4f742fd06f07 | [] | no_license | kimotot/aizu | 4de0319959a3b166b8c2c4940ab7b701b6ee3395 | 315be1240cff733e1c6a7cd98942a95b3bd7ec96 | refs/heads/master | 2021-07-24T12:37:41.935302 | 2021-03-10T09:05:05 | 2021-03-10T09:05:05 | 91,927,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | class dllist:
def __init__(self):
self._list = []
self._fp = 0
self._bp = 0
def insert(self, x):
self._list.append(x)
self._fp += 1
def delete(self, x):
t = self._list[::-1]
if x in t[:-self._bp]:
self._list.remove(x)
self._fp -= 1
def deleteFirst(self):
self._list.pop()
def deleteLast(self):
self._bp += 1
def disp(self):
print(" ".join([str(x) for x in self._list[self._bp:][::-1]]))
if __name__ == '__main__':
q = dllist()
n = int(input())
for _ in range(n):
inst = input().split()
if inst[0] == "insert":
q.insert(int(inst[1]))
elif inst[0] == "delete":
q.delete(int(inst[1]))
elif inst[0] == "deleteFirst":
q.deleteFirst()
elif inst[0] == "deleteLast":
q.deleteLast()
q.disp()
| [
"[email protected]"
] | |
03aea4efe7d08e5382591defb2dbff580c6377bb | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /net/data/verify_certificate_chain_unittest/generate-violates-basic-constraints-pathlen-0.py | 3a5d481a89b5dd5ba56fe6f423bd03a657836f27 | [
"BSD-3-Clause"
] | permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 1,436 | py | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 2 intermediates. The first intermediate has a basic
constraints path length of 0, so it is a violation for it to have a subordinate
intermediate."""
import common
# Self-signed root certificate (used as trust anchor).
root = common.create_self_signed_root_certificate('Root')
# Intermediate with pathlen 0
intermediate1 = common.create_intermediate_certificate('Intermediate1', root)
intermediate1.get_extensions().set_property('basicConstraints',
'critical,CA:true,pathlen:0')
# Another intermediate (with the same pathlen restriction)
intermediate2 = common.create_intermediate_certificate('Intermediate2',
intermediate1)
intermediate2.get_extensions().set_property('basicConstraints',
'critical,CA:true,pathlen:0')
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediate2)
chain = [target, intermediate2, intermediate1]
trusted = common.TrustAnchor(root, constrained=False)
time = common.DEFAULT_TIME
verify_result = False
errors = ['max_path_length reached']
common.write_test_file(__doc__, chain, trusted, time, verify_result, errors)
| [
"[email protected]"
] | |
92fdbccdfd0fc9f6975c00ee840161f5e714294f | 965ef7770b0efdf28ba1ab74e72598353060d256 | /ex19.2.py | bba7bf0979f9673338439ffb38f22e4cdda3d9a1 | [] | no_license | DikranHachikyan/CPYT210409-PLDA | 87dfca698212905b33b50a0564ae904911d7ff00 | 7ec99a7ef6082e8b58d5a79a66a7875837520d21 | refs/heads/master | 2023-04-18T04:46:38.231406 | 2021-04-28T10:05:08 | 2021-04-28T10:05:08 | 356,246,772 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 145 | py |
if __name__ == '__main__':
tp = 11, 12, 34, 56, 66
for value in tp:
print(f'value = {value}')
print('--- ---') | [
"[email protected]"
] | |
0a34dd857a7b93bf5b7bda441a669b2fd4af80c6 | a189360771d93aa4bcfdfb9f7a794f770b557528 | /ch04/04math.py | 1e27e49063acd87d3434ea09921f2e113d401a0d | [] | no_license | kevin510610/Book_Python-Tensorflow_PoWen-Ko | 88402a6a9ae3da9fdba7858e56f9c364264854c0 | cbeede8ab566214da8fa5c5953f8ab85c2d23bb8 | refs/heads/master | 2023-04-04T05:37:52.283802 | 2021-04-13T03:09:24 | 2021-04-13T03:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/env
__author__ = "Powen Ko, www.powenko.com"
a=5
b=2.2
print(a+b)
print(a-b)
print(a*2)
print(a/2)
print(a<<1)
print(a>>1)
print(a%3)
d=4.3
print(d/3)
print(d//3)
| [
"[email protected]"
] | |
c119987946003ffee661f92e47bec4950eb56b4a | 40c890270ff8dcdcce4006b4cfbc2ce9d7488992 | /accounts/migrations/0003_remove_city_city_id.py | 679ebf454dbd3248bef81a12d3ac1de9b949daab | [] | no_license | khanansha/Concierge_healthcare | d084cabcb0ad5a8fe6914357f31df26a678bfbbd | a7178797233ccccc2918b4f602eb2086239c1e3a | refs/heads/master | 2022-08-15T23:33:46.492016 | 2020-06-01T05:15:46 | 2020-06-01T05:15:46 | 264,127,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # Generated by Django 2.0.2 on 2020-05-05 08:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_city'),
]
operations = [
migrations.RemoveField(
model_name='city',
name='city_id',
),
]
| [
"[email protected]"
] | |
43f97e33e7e0ffe81805cf0f366dc587951fd7a7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03242/s186722755.py | 1e12736a618aee1a0f1febb374b8dc0561b0b758 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #k = int(input())
#s = input()
#a, b = map(int, input().split())
#s, t = map(str, input().split())
#l = list(map(int, input().split()))
#l = [list(map(int,input().split())) for i in range(n)]
#a = [list(input()) for _ in range(n)]
#a = [input() for _ in range(n)]
n = input()
for i in range(3):
if n[i] == "9":
print("1",end="")
else:
print("9",end="")
| [
"[email protected]"
] | |
ae7c02d0634e195e2f5f16fff2e7f39cf0af80bf | a2b7fba22a16f379ccca2e38d9d6291b9562abc3 | /Graph Theory/Shortest Path/Dijkstra_Adj_List.py | 5c2f782bdc48fbbc9154089714557f45d4c33b37 | [] | no_license | neelamy/Algorithm | 565c1cea72715745653e90a3dabbba1e9e283fd8 | 7c9f53ff27bcb840b9dbc20d520f003f4d76fe17 | refs/heads/master | 2020-06-10T15:53:12.967832 | 2017-07-18T07:59:32 | 2017-07-18T07:59:32 | 75,953,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | # Python program for Dijkstra algorithm to find shortest path
# from source in undirected graph
# The program is for adjacency list representation of the graph
#Complexity : O(V(V+E))
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
# function to add an edge to graph
def addEdge(self,u,v,w):
self.graph[u].append((v,w))
self.graph[v].append((u,w))
# A utility function to find the vertex with minimum dist value, from
# the set of vertices still in queue
def get_min(self,dist,queue):
# Initialize min value and index as -1
minimum = float("Inf")
index =-1
#from the dist array,pick one which has min value and is till in queue
for i in range(len(dist)):
if dist[i] < minimum and i in queue:
minimum = dist[i]
index =i
return index
# print the solution
def printSolution(self, dist):
print("Vertex Distance from Source")
for i in range(self.V):
print("%d \t\t %d" % (i, dist[i]))
# Function to construct and print MST for a graph represented using adjacency
# matrix representation
def dijkstra(self, src):
# Initialize all dist/distance as INFINITE
dist = [float("Inf")] * self.V
# Always include first 1st vertex in MST
dist[src] = 0 # Make dist 0 so that this vertex is picked as first vertex
# Add all vertices in queue
queue = []
for i in range(self.V):
queue.append(i)
while queue:
# Pick the minimum dist vertex from the set of vertices
# still in queue
u = self.get_min(dist,queue)
# remove min element and print it
queue.remove(u)
# Update dist value and parent index of the adjacent vertices of
# the picked vertex. Consider only those vertices which are still in
# queue
for node,weight in self.graph[u]:
if node in queue and dist[u] + weight < dist[node]:
dist[node] = dist[u] + weight
# print all distance
self.printSolution(dist)
g = Graph(9)
g.addEdge(0, 1, 4)
g.addEdge(0, 7, 8)
g.addEdge(1, 2, 8)
g.addEdge(1, 7, 11)
g.addEdge(2, 3, 7)
g.addEdge(2, 8, 2)
g.addEdge(2, 5, 4)
g.addEdge(3, 4, 9)
g.addEdge(3, 5, 14)
g.addEdge(4, 5, 10)
g.addEdge(5, 6, 2)
g.addEdge(6, 7, 1)
g.addEdge(6, 8, 6)
g.addEdge(7, 8, 7)
#Print the solution
g.dijkstra(0)
| [
"[email protected]"
] | |
4e2e690cffc6ea0a5b52591b5a5e0a009dcd358c | 882c2b3c410b838372d43e431d1ccd6e02ba45f6 | /CE/AlMgSiX_FCC/fit_normal_ce.py | 511a540b3be842951dfe083c03285d25100530dc | [] | no_license | davidkleiven/GPAWTutorial | d46f7b8750172ba5ff36ccc27f97089cac94fd95 | 0bffc300df1d048142559855d3ccb9d0d8074d2e | refs/heads/master | 2021-06-08T05:44:42.784850 | 2021-02-25T10:23:28 | 2021-02-25T10:23:28 | 98,557,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | from clease.data_manager import CorrFuncEnergyDataManager
from clease import PhysicalRidge
from clease.physical_ridge import random_cv_hyper_opt
import numpy as np
import re
import json
db_name = "data/almgsiX_clease_voldep.db"
manager = CorrFuncEnergyDataManager(
db_name, "binary_linear_cf"
)
X, y = manager.get_data([('struct_type', '=', 'initial')])
names = manager._feat_names
sizes = [int(n[1]) for n in names]
prog = re.compile(r"d(\d+)")
dia = []
for n in names:
res = prog.findall(n)
if not res:
dia.append(0.0)
else:
dia.append(float(res[0]))
regressor = PhysicalRidge(normalize=False)
regressor.sizes = sizes
regressor.diameters = dia
params = {
'lamb_dia': np.logspace(-6, 6, 5000).tolist(),
'lamb_size': np.logspace(-6, 6, 5000).tolist(),
'size_decay': ['linear', 'exponential', 'poly2', 'poly4', 'poly6'],
'dia_decay': ['linear', 'exponential', 'poly2', 'poly4', 'poly6']
}
res = random_cv_hyper_opt(regressor, params, X, y, cv=5, num_trials=10000)
outfile = "data/almgsix_normal_ce.json"
data = {
'names': manager._feat_names,
'coeff': res['best_coeffs'].tolist(),
'X': X.tolist(),
'y': y.tolist(),
'cv': res['best_cv'],
'eci': {n: c for n, c in zip(manager._feat_names, res['best_coeffs'])},
}
with open(outfile, 'w') as out:
json.dump(data, out)
print(f"Results written to: {outfile}") | [
"[email protected]"
] | |
d257b5b87299f18aed6ffda8d0085bb46c30103e | 0c6f666fdf7e2ba22f5a3ae16748920a3b8583ff | /main/forms.py | 7ec71db826a82359caf4bdd5587cf98660550eba | [] | no_license | rrabit42/Seoul1ro | 8e9f07fab5bbe247998beeea6b2776fb1e6016d5 | fdb30ef184cba553d3baaaabcceca2644c9dea78 | refs/heads/master | 2023-04-30T12:20:11.785273 | 2021-05-24T12:22:59 | 2021-05-24T12:22:59 | 369,833,280 | 1 | 3 | null | 2021-09-19T16:42:52 | 2021-05-22T14:49:04 | CSS | UTF-8 | Python | false | false | 159 | py | from django import forms
from main.models import Search
class InputForm(forms.ModelForm):
class Meta:
model = Search
fields = '__all__'
| [
"[email protected]"
] | |
8c718a33f548a45817a6cd05154b9cb7c371f9bf | 1af49694004c6fbc31deada5618dae37255ce978 | /build/fuchsia/common_args.py | f23e8eb74946cbee6e9fc4235e82efbcc3bc8002 | [
"BSD-3-Clause"
] | permissive | sadrulhc/chromium | 59682b173a00269ed036eee5ebfa317ba3a770cc | a4b950c23db47a0fdd63549cccf9ac8acd8e2c41 | refs/heads/master | 2023-02-02T07:59:20.295144 | 2020-12-01T21:32:32 | 2020-12-01T21:32:32 | 317,678,056 | 3 | 0 | BSD-3-Clause | 2020-12-01T21:56:26 | 2020-12-01T21:56:25 | null | UTF-8 | Python | false | false | 6,550 | py | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import importlib
import logging
import os
import sys
from common import GetHostArchFromPlatform
def _AddTargetSpecificationArgs(arg_parser):
"""Returns a parser that handles the target type used for the test run."""
device_args = arg_parser.add_argument_group(
'target',
'Arguments specifying the Fuchsia target type. To see a list of '
'arguments available for a specific target type, specify the desired '
'target to use and add the --help flag.')
device_args.add_argument('--target-cpu',
default=GetHostArchFromPlatform(),
help='GN target_cpu setting for the build. Defaults '
'to the same architecture as host cpu.')
device_args.add_argument('--device',
default=None,
choices=['aemu', 'qemu', 'device', 'custom'],
help='Choose to run on aemu|qemu|device. '
'By default, Fuchsia will run on AEMU on x64 '
'hosts and QEMU on arm64 hosts. Alternatively, '
'setting to custom will require specifying the '
'subclass of Target class used via the '
'--custom-device-target flag.')
device_args.add_argument('-d',
action='store_const',
dest='device',
const='device',
help='Run on device instead of emulator.')
device_args.add_argument('--custom-device-target',
default=None,
help='Specify path to file that contains the '
'subclass of Target that will be used. Only '
'needed if device specific operations such as '
'paving is required.')
device_args.add_argument('--fuchsia-out-dir',
help='Path to a Fuchsia build output directory. '
'Setting the GN arg '
'"default_fuchsia_build_dir_for_installation" '
'will cause it to be passed here.')
def _GetTargetClass(args):
"""Gets the target class to be used for the test run."""
if args.device == 'custom':
if not args.custom_device_target:
raise Exception('--custom-device-target flag must be set when device '
'flag set to custom.')
target_path = args.custom_device_target
else:
if not args.device:
args.device = 'aemu' if args.target_cpu == 'x64' else 'qemu'
target_path = '%s_target' % args.device
try:
loaded_target = importlib.import_module(target_path)
except ImportError:
logging.error('Cannot import from %s. Make sure that --ext-device-path '
'is pointing to a file containing a target '
'module.' % target_path)
raise
return loaded_target.GetTargetType()
def AddCommonArgs(arg_parser):
"""Adds command line arguments to |arg_parser| for options which are shared
across test and executable target types.
Args:
arg_parser: an ArgumentParser object."""
_AddTargetSpecificationArgs(arg_parser)
# Parse the args used to specify target
module_args, _ = arg_parser.parse_known_args()
# Determine the target class and register target specific args.
target_class = _GetTargetClass(module_args)
target_class.RegisterArgs(arg_parser)
package_args = arg_parser.add_argument_group('package', 'Fuchsia Packages')
package_args.add_argument(
'--package',
action='append',
help='Paths of the packages to install, including '
'all dependencies.')
package_args.add_argument(
'--package-name',
help='Name of the package to execute, defined in ' + 'package metadata.')
common_args = arg_parser.add_argument_group('common', 'Common arguments')
common_args.add_argument('--runner-logs-dir',
help='Directory to write test runner logs to.')
common_args.add_argument('--exclude-system-logs',
action='store_false',
dest='include_system_logs',
help='Do not show system log data.')
common_args.add_argument('--verbose', '-v', default=False,
action='store_true',
help='Enable debug-level logging.')
def ConfigureLogging(args):
"""Configures the logging level based on command line |args|."""
logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO),
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
# The test server spawner is too noisy with INFO level logging, so tweak
# its verbosity a bit by adjusting its logging level.
logging.getLogger('chrome_test_server_spawner').setLevel(
logging.DEBUG if args.verbose else logging.WARN)
# Verbose SCP output can be useful at times but oftentimes is just too noisy.
# Only enable it if -vv is passed.
logging.getLogger('ssh').setLevel(
logging.DEBUG if args.verbose else logging.WARN)
# TODO(crbug.com/1121763): remove the need for additional_args
def GetDeploymentTargetForArgs(additional_args=None):
"""Constructs a deployment target object using command line arguments.
If needed, an additional_args dict can be used to supplement the
command line arguments."""
# Determine target type from command line arguments.
device_type_parser = argparse.ArgumentParser()
_AddTargetSpecificationArgs(device_type_parser)
module_args, _ = device_type_parser.parse_known_args()
target_class = _GetTargetClass(module_args)
# Process command line args needed to initialize target in separate arg
# parser.
target_arg_parser = argparse.ArgumentParser()
target_class.RegisterArgs(target_arg_parser)
known_args, _ = target_arg_parser.parse_known_args()
target_args = vars(known_args)
# target_cpu is needed to determine target type, and fuchsia_out_dir
# is needed for devices with Fuchsia built from source code.
target_args.update({'target_cpu': module_args.target_cpu})
target_args.update({'fuchsia_out_dir': module_args.fuchsia_out_dir})
if additional_args:
target_args.update(additional_args)
return target_class(**target_args)
| [
"[email protected]"
] | |
9a29514981517e14e9d56da464497502f36a5b60 | fbe787892572c911a3dad0aacf11e0edf42bec25 | /actor_critic/actor_critic_baselines.py | 0b5e89ed012a03428429125bc460bb332b73c42f | [] | no_license | vwxyzjn/tensorflow-beginner | edebed5238cc687d96bd2cd5120de0a135a159a5 | 4b76d2dae96ca57ac90a4a6cf0c2935d6f390be8 | refs/heads/master | 2020-05-09T23:23:17.459116 | 2019-04-15T14:21:29 | 2019-04-15T14:21:29 | 181,499,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import argparse
import gym
import numpy as np
from stable_baselines.deepq import DQN, MlpPolicy
def callback(lcl, _glb):
"""
The callback function for logging and saving
:param lcl: (dict) the local variables
:param _glb: (dict) the global variables
:return: (bool) is solved
"""
# stop training if reward exceeds 199
if len(lcl['episode_rewards'][-101:-1]) == 0:
mean_100ep_reward = -np.inf
else:
mean_100ep_reward = round(float(np.mean(lcl['episode_rewards'][-101:-1])), 1)
print("mean_100ep_reward", mean_100ep_reward)
is_solved = lcl['step'] > 100 and mean_100ep_reward >= 199
return not is_solved
def main(args):
"""
Train and save the DQN model, for the cartpole problem
:param args: (ArgumentParser) the input arguments
"""
env = gym.make("CartPole-v0")
model = DQN(
env=env,
policy=MlpPolicy,
learning_rate=1e-3,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
)
model.learn(total_timesteps=args.max_timesteps, callback=callback, seed=1)
print("Finished")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train DQN on cartpole")
parser.add_argument('--max-timesteps', default=100000, type=int, help="Maximum number of timesteps")
args = parser.parse_args()
main(args) | [
"[email protected]"
] | |
5a6e8273d7b26abe7ad8034b181f781338670eb7 | 45a506c5622f366e7013f1276f446a18fc2fc00d | /kedro/extras/transformers/__init__.py | 5a9b3e9e214377fd9b451e3ec79e86b6822e459b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sbrugman/kedro | 3e48bcc56cc61fbe575d1a52c4f5bf3e84b6f974 | 25c92b765fba4605a748bdaaa801cee540da611e | refs/heads/develop | 2023-07-20T11:24:07.242114 | 2021-10-08T14:05:03 | 2021-10-08T14:05:03 | 404,517,683 | 1 | 2 | NOASSERTION | 2021-09-08T22:53:09 | 2021-09-08T22:53:09 | null | UTF-8 | Python | false | false | 1,679 | py | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``kedro.extras.transformers`` is the home of Kedro's dataset transformers."""
from .memory_profiler import ProfileMemoryTransformer
from .time_profiler import ProfileTimeTransformer
__all__ = ["ProfileMemoryTransformer", "ProfileTimeTransformer"]
| [
"[email protected]"
] | |
1d66174523025aff8645ae370b76a48e039bd57f | 3de6f7f6d8497e728101c368ec778e67f769bd6c | /notes/algo-ds-practice/problems/list/copy_list_random_pointer.py | 50fdca295a2b01ae01f62fd4ce46c8ebe212ba04 | [
"MIT"
] | permissive | arnabs542/interview-notes | 1fceae0cafa74ef23d0ce434e2bc8e85c4c76fdd | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | refs/heads/master | 2023-01-03T06:38:59.410704 | 2020-10-25T06:49:43 | 2020-10-25T06:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | '''
You are given a linked list with one pointer of each node pointing to the next node just like the usual.
Every node, however, also has a second pointer that can point to any node in the list.
Now write a program to deep copy this list.
Solution 1:
First backup all the nodes' next pointers node to another array.
next_backup = [node1, node2, node3 ... None]
Meaning next_backup[0] = node[0].next = node1.
Note that these are just references.
Now just deep-copy the original linked list, only considering the next pointers.
While copying (or after it), point
`original_0.next = copy_0`
and
`copy_0.random = original_0`
Now, while traversing the copy list, set the random pointers of copies correctly:
copy.random = copy.random.random.next
Now, traverse the original list and fix back the next pointers using the next_backup array.
Total complexity -> O(n+n+n) = O(n)
Space complexity = O(n)
SOLUTION 2:
We can also do it in space complexity O(1).
This is actually easier to understand. ;)
For every node original_i, make a copy of it just in front of it.
For example, if original_0.next = original_1, then now it will become
`original_0.next = copy_0`
`copy_0.next = original_1`
Now, set the random pointers of copies:
`copy_i.random = original_i.random.next`
We can do this because we know that the copy of a node is just after the original.
Now, fix the next pointers of all the nodes:
original_i.next = original_i.next.next
copy_i.next = copy_i.next.next
Time complexity = O(n)
Space complexity = O(1)
'''
| [
"[email protected]"
] | |
4911aa2d0d900d248648eafe69addf6919b82d92 | 5e5a8270f07ac3ca2017b2c4c0fdc903bb4fc25e | /src/pybel/parser/parse_control.py | 68c6188e7122a05ea42131ba5919233217c87b46 | [
"Apache-2.0"
] | permissive | nsoranzo/pybel | 37f2553103ba721925d49a7fafbd1b0b3177a936 | 3663d24614124509871043d9d411ed400ddba385 | refs/heads/master | 2021-01-18T11:33:28.069016 | 2017-03-08T00:00:32 | 2017-03-08T00:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,538 | py | # -*- coding: utf-8 -*-
"""
Control Parser
~~~~~~~~~~~~~~
This module handles parsing control statement, which add annotations and namespaces to the document.
See: https://wiki.openbel.org/display/BLD/Control+Records
"""
import logging
import re
from pyparsing import Suppress, MatchFirst
from pyparsing import pyparsing_common as ppc
from .baseparser import BaseParser, quote, delimitedSet, And, oneOf
from .parse_exceptions import *
from .utils import is_int
from ..constants import BEL_KEYWORD_STATEMENT_GROUP, BEL_KEYWORD_CITATION, BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT, \
BEL_KEYWORD_ALL, ANNOTATIONS
from ..constants import CITATION_ENTRIES, EVIDENCE, CITATION_TYPES, BEL_KEYWORD_SET, BEL_KEYWORD_UNSET, CITATION
log = logging.getLogger('pybel')
class ControlParser(BaseParser):
def __init__(self, annotation_dicts=None, annotation_expressions=None, citation_clearing=True):
"""Builds parser for BEL valid_annotations statements
:param annotation_dicts: A dictionary of {annotation: set of valid values} for parsing
:type annotation_dicts: dict
:param annotation_expressions: A dictionary of {annotation: regular expression string}
:type annotation_expressions: dict
:param citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations?
:type citation_clearing: bool
"""
self.citation_clearing = citation_clearing
self.valid_annotations = {} if annotation_dicts is None else annotation_dicts
self.annotations_re = {} if annotation_expressions is None else annotation_expressions
self.annotations_re_compiled = {k: re.compile(v) for k, v in self.annotations_re.items()}
self.statement_group = None
self.citation = {}
self.evidence = None
self.annotations = {}
annotation_key = ppc.identifier('key').setParseAction(self.handle_annotation_key)
self.set_statement_group = And([Suppress(BEL_KEYWORD_STATEMENT_GROUP), Suppress('='), quote('group')])
self.set_statement_group.setParseAction(self.handle_set_statement_group)
self.set_citation = And([Suppress(BEL_KEYWORD_CITATION), Suppress('='), delimitedSet('values')])
self.set_citation.setParseAction(self.handle_set_citation)
supporting_text_tags = oneOf([BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT])
self.set_evidence = And([Suppress(supporting_text_tags), Suppress('='), quote('value')])
self.set_evidence.setParseAction(self.handle_set_evidence)
set_command_prefix = And([annotation_key('key'), Suppress('=')])
self.set_command = set_command_prefix + quote('value')
self.set_command.setParseAction(self.handle_set_command)
self.set_command_list = set_command_prefix + delimitedSet('values')
self.set_command_list.setParseAction(self.handle_set_command_list)
self.unset_command = annotation_key('key')
self.unset_command.addParseAction(self.handle_unset_command)
self.unset_evidence = supporting_text_tags(EVIDENCE)
self.unset_evidence.setParseAction(self.handle_unset_evidence)
self.unset_citation = Suppress(BEL_KEYWORD_CITATION)
self.unset_citation.setParseAction(self.handle_unset_citation)
self.unset_statement_group = Suppress(BEL_KEYWORD_STATEMENT_GROUP)
self.unset_statement_group.setParseAction(self.handle_unset_statement_group)
self.unset_list = delimitedSet('values')
self.unset_list.setParseAction(self.handle_unset_list)
self.unset_all = Suppress(BEL_KEYWORD_ALL)
self.unset_all.setParseAction(self.handle_unset_all)
set_tag = Suppress(BEL_KEYWORD_SET)
unset_tag = Suppress(BEL_KEYWORD_UNSET)
self.set_statements = set_tag + MatchFirst([
self.set_statement_group,
self.set_citation,
self.set_evidence,
self.set_command,
self.set_command_list,
])
self.unset_statements = unset_tag + MatchFirst([
self.unset_all,
self.unset_citation,
self.unset_evidence,
self.unset_statement_group,
self.unset_command,
self.unset_list
])
self.language = self.set_statements | self.unset_statements
BaseParser.__init__(self, self.language)
def validate_annotation_key(self, key):
if key not in self.valid_annotations and key not in self.annotations_re_compiled:
raise UndefinedAnnotationWarning(key)
def validate_value(self, key, value):
if key in self.valid_annotations and value not in self.valid_annotations[key]:
raise IllegalAnnotationValueWarning(value, key)
elif key in self.annotations_re_compiled and not self.annotations_re_compiled[key].match(value):
raise MissingAnnotationRegexWarning(value, key)
def handle_annotation_key(self, s, l, tokens):
"""Called on all annotation keys before parsing to validate that it's either enumerated or as a regex"""
key = tokens['key']
if self.citation_clearing and not self.citation:
raise MissingCitationException(s)
self.validate_annotation_key(key)
return tokens
def handle_set_statement_group(self, s, l, tokens):
self.statement_group = tokens['group']
return tokens
def handle_set_citation(self, s, l, tokens):
self.clear_citation()
values = tokens['values']
if not (3 <= len(values) <= 6):
raise InvalidCitationException(s)
if values[0] not in CITATION_TYPES:
raise InvalidCitationType(values[0])
if values[0] == 'PubMed' and not is_int(values[2]):
raise InvalidPubMedIdentifierWarning(values[2])
self.citation = dict(zip(CITATION_ENTRIES, values))
return tokens
def handle_set_evidence(self, s, l, tokens):
self.evidence = tokens['value']
return tokens
def handle_set_command(self, s, l, tokens):
key = tokens['key']
value = tokens['value']
self.validate_value(key, value)
self.annotations[key] = value
return tokens
def handle_set_command_list(self, s, l, tokens):
key = tokens['key']
values = tokens['values']
for value in values:
self.validate_value(key, value)
self.annotations[key] = set(values)
return tokens
def handle_unset_statement_group(self, s, l, tokens):
if self.statement_group is None:
raise MissingAnnotationKeyWarning(BEL_KEYWORD_STATEMENT_GROUP)
self.statement_group = None
return tokens
def handle_unset_citation(self, s, l, tokens):
if not self.citation:
raise MissingAnnotationKeyWarning(BEL_KEYWORD_CITATION)
self.clear_citation()
return tokens
def handle_unset_evidence(self, s, l, tokens):
if self.evidence is None:
raise MissingAnnotationKeyWarning(tokens[EVIDENCE])
self.evidence = None
return tokens
def validate_unset_command(self, key):
if key not in self.annotations:
raise MissingAnnotationKeyWarning(key)
def handle_unset_command(self, s, l, tokens):
key = tokens['key']
self.validate_unset_command(key)
del self.annotations[key]
return tokens
def handle_unset_list(self, s, l, tokens):
for key in tokens['values']:
if key in {BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT}:
self.evidence = None
else:
self.validate_unset_command(key)
del self.annotations[key]
return tokens
def handle_unset_all(self, s, l, tokens):
self.clear()
return tokens
def get_annotations(self):
"""
:return: The currently stored BEL annotations
:rtype: dict
"""
return {
EVIDENCE: self.evidence,
CITATION: self.citation.copy(),
ANNOTATIONS: self.annotations.copy()
}
def clear_citation(self):
self.citation.clear()
if self.citation_clearing:
self.evidence = None
self.annotations.clear()
def clear(self):
"""Clears the statement_group, citation, evidence, and annotations"""
self.statement_group = None
self.citation.clear()
self.evidence = None
self.annotations.clear()
| [
"[email protected]"
] | |
cbe2f4fb4bc9585cb7d499ad66ecb249f6693441 | be7949a09fa8526299b42c4c27adbe72d59d2201 | /cnns/nnlib/robustness/channels/channels_svd_examples.py | 2a1d990426dabdcc224b92b08bdf79151201e3ba | [
"Apache-2.0"
] | permissive | adam-dziedzic/bandlimited-cnns | 375b5cccc7ab0f23d2fbdec4dead3bf81019f0b4 | 81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a | refs/heads/master | 2022-11-25T05:40:55.044920 | 2020-06-07T16:14:34 | 2020-06-07T16:14:34 | 125,884,603 | 17 | 5 | Apache-2.0 | 2022-11-21T21:01:46 | 2018-03-19T16:02:57 | Jupyter Notebook | UTF-8 | Python | false | false | 419 | py | import sys
import numpy
import foolbox
from numpy.linalg import svd
numpy.set_printoptions(threshold=sys.maxsize)
image, label = foolbox.utils.samples(
dataset='cifar10', index=0, batchsize=1, data_format='channels_first')
image = image / 255 # # division by 255 to convert [0, 255] to [0, 1]
u, s, vh = svd(a=image, full_matrices=False)
print('label: ', label)
print('u: ', u)
print('s: ', s)
print('vh: ', vh)
| [
"[email protected]"
] | |
27c3cf0cfc2899972cb9c466686a6a8e0a9822a2 | 385295df7d11f258efb0500401e9e2837e143b37 | /django/st01/blog/views.py | 1458eb2af3cc920c3659e93203de4397bf806b03 | [] | no_license | ysjwdaypm/study | 7f4b2a032f30ee6c9481ef3d9f180f947c8167c1 | 61059a4363928e023f3a0fa9f7b3ea726b953f96 | refs/heads/master | 2020-12-25T05:54:56.392792 | 2016-07-06T08:24:39 | 2016-07-06T08:24:39 | 62,702,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py | # coding=utf-8
from django.shortcuts import render,render_to_response
from django.http import HttpResponse
from django.template import loader,Context,Template
import json,time,sys
import pytz, datetime
from models import Person,BagManager
from django.template import RequestContext
# Create your views here.
reload(sys)
sys.setdefaultencoding('utf8')
logPath = 'log.txt'
"""
python manage.py createsuperuser
python manage.py runserver
"""
def index(req):
print "ip : %s"%req.META
d = {'req':req,'d':datetime.datetime.now(),"title":"python","name":"guest","list":[1,2,3,4,5,6,7],"l":['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]}
return render_to_response('index.html',d,RequestContext(req))
def getNowTime():
return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
def log(req):
print req.GET['i']
if 'i' in req.GET:
f = open(logPath,'a')
f.write(req.GET['i'] + ' time:' + getNowTime()+ "\n<br>")
f.close()
print "write success"
dic = {"ret":True}
return HttpResponse(json.dumps(dic))
def readLog(req):
f = open(logPath,'r')
log = f.read()
return HttpResponse(log)
def register(req):
u = req.GET["u"]
pwd = req.GET["p"]
ret = {"ret":True}
plist = Person.objects.filter(name=u)
if len(plist) == 0:
Person.objects.create(name=u,password=pwd,age=30)
ret["msg"] = r"用户名 %s 创建成功"%u
else:
ret["ret"] = False
ret["msg"] = r"用户名 %s 已经存在"%u
return HttpResponse(json.dumps(ret,ensure_ascii=False))
def main(req):
action = req.GET["action"]
if action == "register":
return register(req)
elif action == "login":
return login(req)
elif action == "log":
log(req)
ret = {"msg":"undefin action %s"%action}
return HttpResponse(json.dumps(ret,ensure_ascii=False))
def wel(req):
ret = "";
for k in req.GET:
ret += k + ":" + req.GET[k] + "</br>"
t = Template("<h1>{{user.name}} welcome to my page</h1>");
c = Context({"user":{"name":"ysjwdaypm"}})
# return HttpResponse(t.render(c))
users = []
for user in Person.objects.all():
users.append({"name":user.name,"password":user.password})
BagManager.addItem(123);
return HttpResponse(json.dumps({"users":users},ensure_ascii=True))
def login(req):
u = req.GET['u']
pwd = req.GET['p']
ret = {"ret":True}
plist = Person.objects.filter(name=u)
if len(plist) == 0 or not plist[0].password == pwd:
ret["ret"] = False
ret["msg"] = " 帐号或密码错误"
return HttpResponse(json.dumps(ret,ensure_ascii=False))
| [
"[email protected]"
] | |
90d030e0aa07b3e43d0a019006b657649c9e1a90 | 69bf012ca88897cd87535701369f2b87c6522d57 | /modules/templates/Turkey/controllers.py | 116b2c9dde78a45c4e7ab6f6036bd9c90510e8ba | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sahana/eden | e2cc73f6b34a2ab6579094da09367a9f0be10fd1 | 1cb5a76f36fb45fa636577e2ee5a9aa39f35b391 | refs/heads/master | 2023-08-20T20:56:57.404752 | 2023-02-24T17:16:47 | 2023-02-24T17:16:47 | 3,021,325 | 227 | 253 | NOASSERTION | 2023-01-10T10:32:33 | 2011-12-20T17:49:16 | Python | UTF-8 | Python | false | false | 2,364 | py | # -*- coding: utf-8 -*-
from gluon import *
from s3 import S3CustomController
THEME = "Turkey"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
# Allow editing of page content from browser using CMS module
if current.deployment_settings.has_module("cms"):
system_roles = current.auth.get_system_roles()
ADMIN = system_roles.ADMIN in current.session.s3.roles
s3db = current.s3db
table = s3db.cms_post
ltable = s3db.cms_post_module
module = "default"
resource = "index"
query = (ltable.module == module) & \
((ltable.resource == None) | \
(ltable.resource == resource)) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.body,
table.id,
limitby=(0, 1)).first()
if item:
if ADMIN:
item = DIV(XML(item.body),
BR(),
A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[item.id, "update"]),
_class="action-btn"))
else:
item = DIV(XML(item.body))
elif ADMIN:
if current.response.s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
item = A(current.T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module": module,
"resource": resource
}),
_class="%s cms-edit" % _class)
else:
item = ""
else:
item = ""
output["item"] = item
self._view(THEME, "index.html")
return output
# END =========================================================================
| [
"[email protected]"
] | |
31aec23ecdfa187a48c29120276e4f8366771eae | 038af1bfd275530413a7b4e28bf0e40eddf632c6 | /parsifal/apps/reviews/migrations/0032_auto_20151006_0619.py | 1bbe293f14044c44f5fc91de120586c344cc84f0 | [
"MIT"
] | permissive | vitorfs/parsifal | 5c5345ff75b48c5596977c8e0a9c4c537ed4726c | 68c3ce3623a210a9c649a27f9d21ae6130541ea9 | refs/heads/dev | 2023-05-24T16:34:31.899776 | 2022-08-14T16:30:06 | 2022-08-14T16:30:06 | 11,648,402 | 410 | 223 | MIT | 2023-05-22T10:47:20 | 2013-07-25T00:27:21 | Python | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reviews', '0031_article_selection_criteria'),
]
operations = [
migrations.RemoveField(
model_name='customarticlestatus',
name='review',
),
migrations.DeleteModel(
name='CustomArticleStatus',
),
]
| [
"[email protected]"
] | |
210584bd6d6c063d2901bfc6bfd97577749d7d89 | cd3c9415d279d2545106f559ab3117aa55ed17ef | /L02 运算符、if判断、while、for循环、字符串相关函数/课件/09 while循环例子.py | e459f1ef6baee3d648ec9554d6743e82ccb0c891 | [] | no_license | yuanchangwang/yuan | ad418609e6415a6b186b3d95a48e2bd341f2d07f | 22a43c09af559e9f6bdf6e8e3727c1b290bc27d4 | refs/heads/master | 2020-08-27T02:17:40.544162 | 2019-11-19T10:25:51 | 2019-11-19T10:25:51 | 217,216,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,924 | py | # (1)打印一行十个小星星
#**********
i = 0
while i<10:
print("*",end="")
i+=1
#help 查看帮助文档 help(print)
# help(print)
# (2)用一个变量打印出一行十个小星星 (十个小星星塞在一个变量中,最后达因变量)
print("<===>")
i = 0
strvar = ''
while i<10:
strvar += "*"
i+=1
# strvar = strvar + "*" + "*" + "*"
print(strvar)
# (3)打印一行十个小星星 奇数个打印★ 偶数个打印☆
'''
0 % 2 = 0
1 % 2 = 1
2 % 2 = 0
3 % 2 = 1
4 % 2 = 0
任意数n 与 2 取余 取值范围是0 , 1
0 % 3 = 0
1 % 3 = 1
2 % 3 = 2
3 % 3 = 0
4 % 3 = 1
5 % 3 = 2
任意数n 与 3 取余 取值范围是0,1,2
任意数n 与 m 取余 取值范围是 0 ~ (m-1)
'''
i = 0
while i<10:
# 代码写在这
# 余数为0 打印黑猩
if i % 2 == 0:
print("★",end="")
else:
#否则打印白星
print("☆",end="")
i+=1
# (4)用 一个循环 打印十行十列小星星
print()
i = 0
while i<100:
# 输出小星星
print("*",end="")
# i 从0开始到99结束
# 任意数n与10取余 范围0 ~ 9 0代表第一个星星 9代表最后一个,正好10 , 如果是10个选择换行
if i % 10 == 9:
# 打印换行
print()
i+=1
"""
**********
**********
**********
**********
**********
**********
**********
**********
**********
**********
"""
# (5)一个循环 打印十行十列隔列变色小星星(一个循环)
i = 0
while i<100:
# 输出小星星
if i % 2 == 0:
print("★",end="")
else:
print("☆",end="")
# 最后换行
if i % 10 == 9:
print()
i+=1
"""
# 格列变色
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
"""
# (6)一个循环 打印十行十列隔行变色小星星(一个循环)
"""
# 地板除算法
0 // 10 0
1 // 10 0
2 // 10 0
3 // 10 0
...
9 // 10 0
10 // 10 1
11 // 10 1
12 // 10 1
...
19 // 10 1
20 // 10 2
21 // 10 2
..
29 // 10 2
...
...
90 // 10 9
91 // 10 9
..
99 // 10 9
0 // 3 0
1 // 3 0
2 // 3 0
3 // 3 1
4 // 3 1
5 // 3 1
10个0
10个1
10个2
10个3
...
10个9
=> 任意数和n进行地板除 : 会出现n个相同的数字
"""
#★☆
i = 0
while i<100:
# 利用地板除与取余的规律 产生十个相同的数字,并且按10个相同的余数取花色
if i // 10 % 2 == 0:
print("★",end="")
else:
print("☆",end="")
# 控制换行
if i % 10 == 9:
print()
i+=1
'''
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
'''
| [
"[email protected]"
] | |
758237f387f0ed09696e3ddefa728eaadd792a79 | 4aa7a4d0525095725eb99843c83827ba4806ceb1 | /tf/tf08_mv2.py | 07920ed1a465784c7cf577b6538dd87eef4b0862 | [] | no_license | seonukim/Study | 65a70f5bdfad68f643abc3086d5c7484bb2439d4 | a5f2538f9ae8b5fc93b5149dd51704e8881f0a80 | refs/heads/master | 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # mv : Multi Variables
import os
import tensorflow as tf
path = 'D:/Study/data/csv/'
os.chdir(path)
tf.compat.v1.set_random_seed(777)
x_data = [[73., 51., 65.],
[92., 98., 11.],
[89., 31., 33.],
[99., 33., 100.],
[17., 66., 79.]]
y_data = [[152.],
[185.],
[180.],
[205.],
[142.]]
x = tf.compat.v1.placeholder(tf.float32, shape = [None, 3])
y = tf.compat.v1.placeholder(tf.float32, shape = [None, 1])
w = tf.compat.v1.Variable(tf.random.normal([3, 1]), name = 'weight')
b = tf.compat.v1.Variable(tf.random.normal([1]), name = 'bias')
hypothesis = tf.compat.v1.add(tf.compat.v1.matmul(x, w), b) # wx + b
cost = tf.compat.v1.reduce_mean(tf.compat.v1.square(hypothesis - y_data))
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate = 5e-6)
train = optimizer.minimize(cost)
fedict = {x: x_data, y: y_data}
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
for step in range(2000 + 1):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict = fedict)
if step % 50 == 0:
print(f'{step}, cost : {cost_val}, \n{step} 예측값 : \n{hy_val}\n') | [
"[email protected]"
] | |
3ce72250025b94e8864b7f1f307db8f7b8c7ef73 | 3a0430831f3f9fc551ce02f625318754c17a5357 | /app/database/tables.py | e26bf8c2f915d4db3512c9b8a8e20ed0ced8fc7a | [
"Apache-2.0",
"MIT"
] | permissive | victor-iyi/heart-disease | 8589409388495029a2219c08fad57e0941bfbff1 | 06540b582e8752d2bb6a32366077872d32d7c0e4 | refs/heads/master | 2023-08-03T11:18:37.711933 | 2021-09-19T16:30:05 | 2021-09-19T16:30:05 | 363,746,469 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,535 | py | # Copyright 2021 Victor I. Afolabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from passlib.context import CryptContext
from sqlalchemy import Column, DateTime, Enum
from sqlalchemy import Integer, Numeric, String, Text
from app.database import Base
class Category(Enum):
patient = 'Patient'
practitioner = 'Medical Practitioner'
class User(Base):
__tablename__ = 'user'
# User ID column.
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
password_hash = Column(String(64), nullable=False)
first_name = Column(String(32), index=True)
last_name = Column(String(32), index=True)
category = Column(Category, index=True,
nullable=False,
default=Category.patient)
__mapper_args__ = {
'polymorphic_identity': 'user',
'polymorphic_on': category,
}
# Password context.
pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')
def __repr__(self) -> str:
return f'User({self.email}, {self.category})'
@staticmethod
def hash_password(password: str) -> str:
return User.pwd_context.hash(password)
@staticmethod
def verify_password(password: str, hash_password: str) -> bool:
return User.pwd_context.verify(password, hash_password)
class Patient(User):
# Patient info.
age = Column(Integer)
contact = Column(String(15), index=True)
history = Column(Text)
aliment = Column(Text)
last_visit_diagnosis = Column(DateTime)
guardian_fullname = Column(String(64))
guardian_email = Column(String)
guardian_phone = Column(String(15))
occurences_of_illness = Column(Text)
last_treatment = Column(DateTime)
__mapper_args__ = {
'polymorphic_identity': 'patient',
'inherit_condition': User.category == Category.patient
}
def __repr__(self) -> str:
return f'Patient({self.email})'
class Practitoner(User):
practitioner_data = Column(String)
__mapper_args__ = {
'polymorphic_identity': 'practitioner',
'inherit_condition': User.category == Category.practitioner
}
def __repr__(self) -> str:
return f'Practitioner({self.email})'
class Feature(Base):
__tablename__ = 'features'
# Primary key.
id = Column(Integer, primary_key=True, index=True)
# Features.
age = Column(Integer, nullable=False)
sex = Column(Integer, nullable=False)
cp = Column(Integer, nullable=False)
trestbps = Column(Integer, nullable=False)
chol = Column(Integer, nullable=False)
fbs = Column(Integer, nullable=False)
restecg = Column(Integer, nullable=False)
thalach = Column(Integer, nullable=False)
exang = Column(Integer, nullable=False)
oldpeak = Column(Numeric, nullable=False)
slope = Column(Integer, nullable=False)
ca = Column(Integer, nullable=False)
thal = Column(Integer, nullable=False)
# Target.
target = Column(Integer, nullable=True)
| [
"[email protected]"
] | |
7063e3a2f690e52339d69ce4edbc432271d79b30 | 223f8feb7b9ff6334ca7d047636fbbcb598c824c | /src/web/web/settings.py | bfdabc775015d1c9cde45d4c5489e1882893226d | [] | no_license | cluster311/backend.cluster311.com | 169b3c621c7f0231844c4e9b6ad51a9feada4608 | f5ab1ebbd220d3ab4bae253cc61fddbe1153f8d8 | refs/heads/master | 2023-08-06T20:01:00.563492 | 2020-04-10T20:02:22 | 2020-04-10T20:02:22 | 254,720,332 | 0 | 0 | null | 2021-09-22T18:52:07 | 2020-04-10T19:36:39 | Python | UTF-8 | Python | false | false | 3,079 | py | """
Django settings for web project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@+wq2cosila9tbeg0vpul0-0xvlsm)1(+g0llgz7e+-2_m22st'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
80e46d30ab05b2895d952c71dce8447f1266d6a9 | 78d23de227a4c9f2ee6eb422e379b913c06dfcb8 | /LeetCode/41.py | 4121f226d68d1e982fa42e6e49ff768f1509886a | [] | no_license | siddharthcurious/Pythonic3-Feel | df145293a3f1a7627d08c4bedd7e22dfed9892c0 | 898b402b7a65073d58c280589342fc8c156a5cb1 | refs/heads/master | 2020-03-25T05:07:42.372477 | 2019-09-12T06:26:45 | 2019-09-12T06:26:45 | 143,430,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from typing import List
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
minimum = nums[0]
maximum = nums[0]
for e in nums:
if e > maximum:
maximum = e
if e < minimum:
minimum = e
print(maximum, minimum)
if __name__ == "__main__":
s = Solution()
arr = [3, 4, -1, 1]
r = s.firstMissingPositive(arr)
print(r)
| [
"[email protected]"
] | |
9815dae1a781a0753475d33dc8c2dfb696bc31a4 | 97e37192d4a695777c538596086c0be826b721e1 | /vedastr/lr_schedulers/base.py | 95d0f2e637e52d41a0e3cddfded57efb211e5a7c | [
"Apache-2.0"
] | permissive | Sayyam-Jain/vedastr | 1b587adc1ff4dc79ab7acc71d7ee08fe600c8933 | 83511a408b68c264561a30daff5154cd0148bebd | refs/heads/master | 2022-12-13T08:06:21.304845 | 2020-09-10T05:05:50 | 2020-09-10T05:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,825 | py | import warnings
import weakref
from functools import wraps
import numpy as np
from torch.optim import Optimizer
class _Iter_LRScheduler(object):
def __init__(self, optimizer, niter_per_epoch, last_iter=-1, iter_based=True):
self._iter_based = iter_based
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
self.niter_per_epoch = niter_per_epoch
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_iter = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = int(last_iter / niter_per_epoch)
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.iter_nums(last_iter)
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def iter_nums(self, iter_=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule."
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
if iter_ is None:
iter_ = self.last_iter + 1
self.last_iter = iter_
self.last_epoch = np.ceil(iter_ / self.niter_per_epoch)
def step(self):
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
| [
"[email protected]"
] | |
30b8677941dafba525a06bf773593a0852b3b768 | f6786f5f51c0a71a09213e2f729766d1a04dffa2 | /두근두근_파이썬/11_File/Labs/328_hangman_game.py | 50ceef354db69f90bce3e63a80c529b8ec789d86 | [] | no_license | SuperstarterJaeeun/Learn-Programming-Book | 4f075fdec386a0449da8d0d08bb8f1b6d6b2f304 | f768acfffcb20b9fc97946ca491f6ffb20671896 | refs/heads/master | 2023-07-24T07:13:24.374240 | 2021-09-06T14:56:02 | 2021-09-06T14:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | import random
guesses = ''
turns = 10
infile = open("word.txt", "r", encoding="UTF8")
lines = infile.readlines()
word = random.choice(lines)
while turns > 0 :
failed = 0
for char in word :
if char in guesses :
print(char, end = "")
else :
print("_", end = "")
failed += 1
if failed == 0:
print("사용자 승리")
break
print("")
guess = input("단어를 추측하시오 : ")
guesses += guess
if guess not in word :
turns -= 1
print("틀렸음!")
print(str(turns) + "기회가 남았음!")
if turns == 0:
print("사용자 패배 정답은 " + word)
infile.close() | [
"[email protected]"
] | |
c7139cdba6d0bdc5caf556866c895ed914db146f | 6a96d6c5ba06ef175ebeed773fc925fcad7ddbd2 | /MaLongHui_Django/apps/users/urls.py | 0a64e123fc1b9b6346ce505192f5960b68fa429a | [] | no_license | AmirHuang/MaLongHui_Django | 38934c3de34f705a70458ff8c644efce69854435 | 0bcff7f0311d6bddd504d088ad52e8217f5c8c74 | refs/heads/master | 2020-05-04T04:48:10.149549 | 2019-04-02T01:24:01 | 2019-04-02T01:24:01 | 178,974,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # _*_ coding: utf-8 _*_
# @time : 2019/03/30
# @Author : Amir
# @Site :
# @File : urls.py
# @Software : PyCharm
from django.urls import path
from rest_framework import routers
from . import views
from rest_framework_jwt.views import obtain_jwt_token
routers = routers.DefaultRouter()
routers.register(r'work_experiences', views.UserWorkExperienceViewSet, base_name='work_experiences')
routers.register(r'education_experiences', views.UserEducationExperienceViewSet, base_name='education_experiences')
urlpatterns = [
path(r'user/', views.UserDetailView.as_view()),
path(r'usernames/<str:username>/count/', views.UsernameCountView.as_view()),
path(r'mobiles/<str:mobile>/count/', views.MobileCountView.as_view()),
path(r'users/', views.UserView.as_view()),
path(r'authorizations/', obtain_jwt_token),
]
urlpatterns += routers.urls | [
"[email protected]"
] | |
ff40095f45e1ccca7b27d17b2d0dac4606979175 | 453302ff0c16614eb820c7153835bb549d903ebe | /build/wheeltec_robot_urdf/catkin_generated/pkg.installspace.context.pc.py | 311427dd50139dbb9bc857bdff6fe2d65c145f8e | [] | no_license | SimonSongg/catkin_ws | 591a8bee4864b2e31cc2abb041c082e2d4dc9d83 | afbf5346be2fdfa5aace7ed949d5023b3016363d | refs/heads/main | 2023-07-13T13:28:00.827983 | 2021-08-22T07:18:01 | 2021-08-22T07:18:01 | 380,931,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "wheeltec_robot_urdf"
PROJECT_SPACE_DIR = "/home/adv/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
c8014a758b3a175f52c3012fcc28b6369c99270a | e415e4cdab3d1cd04a4aa587f7ddc59e71977972 | /builtin/comprehension_syntax.py | fa9db9d682556dea663d08cffe6ccd076c54a921 | [] | no_license | nixawk/hello-python3 | 8c3ebba577b39f545d4a67f3da9b8bb6122d12ea | e0680eb49d260c5e3f06f9690c558f95a851f87c | refs/heads/master | 2022-03-31T23:02:30.225702 | 2019-12-02T10:15:55 | 2019-12-02T10:15:55 | 84,066,942 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Comprehension Syntax
# A very common programming task is to produce one series of values based upon
# the processing of another series. Often, this task can be accomplished quite
# simply in Python using what is known as a comprehension syntax.
# [k * k for k in range(1, n+1)] # list comprehension
# {k * k for k in range(1, n+1)} # set comprehension
# (k * k for k in range(1, n+1)) # generator comprehension
# {k: k * k for k in range(1, n+1)} # dictionary comprehension
LIST_A = [1, 2, 3, 4]
print(sum([k * 10 for k in LIST_A])) # output: 100
print(sum({k * 10 for k in LIST_A})) # output: 100
print(sum((k * 10 for k in LIST_A))) # output: 100
| [
"[email protected]"
] | |
7bc8151289ec59feecad2dda19ea8f99e5c30580 | 22ffa2a2df4218c12d6324fe602ec20617445dc1 | /draw/nn/__init__.py | 335db6fbd6c550347ba81e70b0e29f943221d53b | [] | no_license | musyoku/convolutional-draw | 367f0269f2df69ac6480d1de7edf2bdf80259ef0 | 3f384feb7b0d1a2138bd758713ed2745c216df3c | refs/heads/master | 2020-03-21T08:55:18.630043 | 2018-10-09T05:19:17 | 2018-10-09T05:19:17 | 138,373,875 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | from . import single_layer
from . import functions | [
"[email protected]"
] | |
7ada2586a0ccebebfa6b784143f3f8ab5e1602f0 | 63bacb52d016cf7a237dacd79ba2861842c49ca9 | /zuora_client/models/get_entities_user_accessible_response_type.py | cd250a178562f5a663b37538120c230860fcc65e | [] | no_license | arundharumar-optimizely/zuora-client-python | ee9667956b32b64b456920ad6246e02528fe6645 | a529a01364e41844c91f39df300c85c8d332912a | refs/heads/master | 2020-07-05T23:09:20.081816 | 2019-07-30T21:46:47 | 2019-07-30T21:46:47 | 202,811,594 | 0 | 0 | null | 2019-08-16T23:26:52 | 2019-08-16T23:26:52 | null | UTF-8 | Python | false | false | 43,810 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Zuora_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Responses_and_Errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | | creditTaxItems | 238.0 and earlier | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\") | Container for the taxation items of the credit memo item. | | taxItems | 238.0 and earlier | [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the debit memo item. | | taxationItems | 239.0 and later | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\"); [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the memo item. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation, Export ZOQL queries, and Data Query. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Feature | `Feature` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Feature | `ProductFeature` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Subscription Product Feature | `SubscriptionProductFeature` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2019-07-26
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from zuora_client.models.get_entities_type import GETEntitiesType # noqa: F401,E501
class GETEntitiesUserAccessibleResponseType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'entities': 'list[GETEntitiesType]',
'success': 'bool'
}
attribute_map = {
'entities': 'entities',
'success': 'success'
}
def __init__(self, entities=None, success=None): # noqa: E501
"""GETEntitiesUserAccessibleResponseType - a model defined in Swagger""" # noqa: E501
self._entities = None
self._success = None
self.discriminator = None
if entities is not None:
self.entities = entities
if success is not None:
self.success = success
@property
def entities(self):
"""Gets the entities of this GETEntitiesUserAccessibleResponseType. # noqa: E501
Container for one or more entities in a multi-entity hierarchy. # noqa: E501
:return: The entities of this GETEntitiesUserAccessibleResponseType. # noqa: E501
:rtype: list[GETEntitiesType]
"""
return self._entities
@entities.setter
def entities(self, entities):
"""Sets the entities of this GETEntitiesUserAccessibleResponseType.
Container for one or more entities in a multi-entity hierarchy. # noqa: E501
:param entities: The entities of this GETEntitiesUserAccessibleResponseType. # noqa: E501
:type: list[GETEntitiesType]
"""
self._entities = entities
@property
def success(self):
"""Gets the success of this GETEntitiesUserAccessibleResponseType. # noqa: E501
Returns `true` if the request is successful. # noqa: E501
:return: The success of this GETEntitiesUserAccessibleResponseType. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this GETEntitiesUserAccessibleResponseType.
Returns `true` if the request is successful. # noqa: E501
:param success: The success of this GETEntitiesUserAccessibleResponseType. # noqa: E501
:type: bool
"""
self._success = success
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GETEntitiesUserAccessibleResponseType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GETEntitiesUserAccessibleResponseType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
528b14dfcb491864e05ebf063da0f90fb820507c | 4f08d2dd7901ae5dfbb8e1ecb1e58da9a4875f50 | /planilla/models/planilla_gratificacion.py | 3764c91c533d50300a28e9d28ddbc3e1594145a6 | [] | no_license | azvolamza/erp | 65bdc025b6cbe4f3053618d471c998c4abc883cb | 7f9671b98bd0a9e70464d08c1ef53516fdd8e665 | refs/heads/master | 2023-05-11T22:09:24.673095 | 2021-01-28T14:34:55 | 2021-01-28T14:34:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,122 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import ValidationError
from odoo.addons.base.res.res_request import referenceable_models
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
from odoo.exceptions import UserError
import sys
import io
from xlsxwriter.workbook import Workbook
import base64
class PlanillaGratificacion(models.Model):
_name = "planilla.gratificacion"
_rec_name = 'year'
year = fields.Selection([
('2017', '2017'),
('2018', '2018'),
('2019', '2019'),
('2020', '2020'),
('2021', '2021'),
('2022', '2022'),
('2023', '2023'),
('2024', '2024'),
('2025', '2025'),
('2026', '2026'),
('2027', '2027'),
('2028', '2028'),
('2029', '2029'),
('2030', '2030'),
], string=u"Año")
tipo = fields.Selection([('07', u"Gratificación Fiestas Patrias"),
('12', u"Gratificación Navidad")], "Mes", required=1)
date_start = fields.Date()
date_end = fields.Date()
plus_9 = fields.Boolean("Considerar Bono 9%")
planilla_gratificacion_lines = fields.One2many(
'planilla.gratificacion.line', 'planilla_gratificacion_id', "Lineas")
deposit_date = fields.Date(u'Fecha depósito')
@api.multi
@api.depends('tipo','year')
@api.onchange('tipo','year')
def change_dates(self):
self.ensure_one()
if self.year:
if self.tipo == '07':
self.date_start = date(int(self.year), 6, 1)
self.date_end = date(int(self.year), 6, 30)
else:
self.date_start = date(int(self.year), 12, 1)
self.date_end = date(int(self.year), 12, 31)
@api.multi
def write(self, vals):
print "vals ", vals
if vals and "tipo"in vals:
if vals['tipo'] == '07':
vals['date_start'] = date(int(self.year), 6, 1)
vals['date_end'] = date(int(self.year), 6, 30)
else:
vals['date_start'] = date(int(self.year), 12, 1)
vals['date_end'] = date(int(self.year), 12, 31)
return super(PlanillaGratificacion, self).write(vals)
@api.model
def create(self, vals):
print vals
if len(self.search([('year', '=', vals['year']), ('tipo', '=', vals['tipo'])])) >= 1:
raise UserError(
"Ya existe un registros %s %s" % (vals['year'], vals['tipo']))
else:
print "MIS AÑOS ",self.year
if vals['tipo'] == '07':
vals['date_start'] = date(int(vals['year']), 6, 1)
vals['date_end'] = date(int(vals['year']), 6, 30)
else:
vals['date_start'] = date(int(vals['year']), 12, 1)
vals['date_end'] = date(int(vals['year']), 12, 31)
return super(PlanillaGratificacion, self).create(vals)
@api.model
def get_parametros_gratificacion(self):
parametros_gratificacion = self.env['planilla.parametros.gratificacion'].search([
], limit=1)
# if not parametros_gratificacion.cod_he25.codigo:
# raise UserError(
# 'Debe configurar parametros de gratificacion cod_he25 Nomina->configuracion->parametros gratificacion')
# elif not parametros_gratificacion.cod_he35.codigo:
# raise UserError(
# 'Debe configurar parametros de gratificacion cod_he35 Nomina->configuracion->parametros gratificacion')
# elif not parametros_gratificacion.cod_he100.codigo:
# raise UserError(
# 'Debe configurar parametros de gratificacion cod_he100 Nomina->configuracion->parametros gratificacion')
if not parametros_gratificacion.cod_gratificacion.codigo:
raise UserError(
'Debe configurar parametros de gratificacion cod_gratificacion Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_bonificaciones.code:
raise UserError(
'Debe configurar parametros de gratificacion cod_bonificaciones Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_basico.code:
raise UserError(
'Debe configurar parametros de gratificacion cod_basico Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_asignacion_familiar.code:
raise UserError(
'Debe configurar parametros de gratificacion cod_asignacion_familiar Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_bonificacion_9.code:
raise UserError(
'Debe configurar parametros de gratificacion cod_bonificacion_9 Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_dias_faltas.codigo:
raise UserError(
'Debe configurar parametros de gratificacion cod_dias_faltas Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_comisiones.code:
raise UserError(
'Debe configurar parametros de gratificacion cod_comisiones Nomina->configuracion->parametros gratificacion')
elif not parametros_gratificacion.cod_sobretiempos.code:
raise UserError(
'Debe configurar parametros de gratificacion cod_sobretiempos Nomina->configuracion->parametros gratificacion')
else:
return parametros_gratificacion
@api.multi
def calcular_gratificacion(self):
print "========0CALCULANDO GRATIFICACION =================="
self.ensure_one()
helper_liquidacion = self.env['planilla.helpers']
self.planilla_gratificacion_lines.unlink()
tabla_montos_primera_mitad = False
parametros_gratificacion = self.get_parametros_gratificacion()
parametros_eps = self.env['planilla.parametros.essalud.eps'].get_parametros_essalud_eps(
)
# print parametros_gratificacion.cod_he100.codigo
# import pudb;pudb.set_trace()
if self.tipo == '07':
# el rango fin deberia ser 30 del mes 6
# pero para asegurarme que al menos haya
# un mes para que se le pague la gratificacion
# le aumentare la fecha final que sea mayor a 31 del mes 7
# ya que si es menor a esa fecha o bien seso y bien sesara el mes 7
# por lo que le corresponderia no gratificacion sino liquidacion
#
# para el rango de inicio para asegurarme de que tenga al menos un mes
# el rango de inicio de resumen_periodo deberia ser como minimo menor o igual a el 1 del mes 6
rango_inicio_contrato = date(int(self.year), 6, 1)
rango_fin_contrato = date(int(self.year), 6, 30)
rango_inicio_planilla = date(int(self.year), 1, 1)
rango_fin_planilla = date(int(self.year), 6, 30)
tabla_montos_primera_mitad = True
else:
rango_inicio_contrato = date(int(self.year), 11, 1)
# rango_fin_contrato = date(int(self.year), 12, 31) #30 nov
rango_fin_contrato = date(int(self.year), 11, 30) # 30 nov
rango_inicio_planilla = date(int(self.year), 7, 1)
# rango_fin_planilla = date(int(self.year), 12, 31)
rango_fin_planilla = date(int(self.year), 11, 30)
query_gratificacion = """
select T.tipo_empresa,T.id,T.employee_id,T.identification_id, T.a_paterno,T.a_materno,T.nombres,T.date_start,T.date_end,sum(faltas) as faltas,max(T.basico) as basico,max(T.afam) as afam,max(T.bo9) as bo9
from (
select hc.id,hc.employee_id, hc.date_start,hc.date_end,
he.identification_id,
he.tipo_empresa,
he.a_paterno,
he.a_materno,
he.nombres,
(case when hp.date_from>='%s' and hp.date_to<='%s' then hp.basico else 0 end) as basico ,
(case when ( date_from>='%s' and date_to<='%s' ) then hp.asignacion_familiar else 0 end) as afam,
(case when ( date_from>='%s' and date_to<='%s' ) then hp.bonificacion_9 else 0 end) as bo9,
hp.dias_faltas as faltas
from hr_payslip hp
inner join hr_contract hc
on hc.id = hp.contract_id
inner join hr_employee he
on he.id = hp.employee_id
where ( date_start <= '%s' ) and (date_end is null or date_end>'%s')
and( date_from>='%s' and date_to<='%s' )
) as T
group by T.id,T.employee_id,T.identification_id, T.a_paterno,T.a_materno,T.nombres,T.date_start,T.date_end,T.tipo_empresa
order by T.id
""" % (rango_inicio_contrato, rango_fin_planilla,
rango_inicio_contrato, rango_fin_planilla,
rango_inicio_contrato, rango_fin_planilla,
rango_inicio_contrato, rango_fin_contrato,
rango_inicio_planilla, rango_fin_planilla)
print query_gratificacion
self.env.cr.execute(query_gratificacion)
contratos = self.env.cr.dictfetchall()
# itero los rangos de fechas de cada resumen_periodo
# el objetivo es encontrar el maximo rango de
# fechas continuas
fechas = list()
for i in range(len(contratos)):
resumen_periodo = contratos[i]
print "mi resumen_periodo actual ", resumen_periodo
contratos_empleado = self.env['hr.contract'].search(
[('employee_id', '=', resumen_periodo['employee_id']), ('date_end', '<=', resumen_periodo['date_end'])], order='date_end desc')
fecha_ini = fields.Date.from_string(
contratos_empleado[0].date_start)
fecha_fin_contrato = fields.Date.from_string(
contratos_empleado[0].date_end)
# 2 busco los contratos anteriores que esten continuos(no mas de un dia de diferencia entre contratos)
for i in range(1, len(contratos_empleado)):
c_empleado = contratos_empleado[i]
fecha_fin = fields.Date.from_string(c_empleado.date_end)
if abs(((fecha_fin)-(fecha_ini)).days) == 1:
fecha_ini = fields.Date.from_string(c_empleado.date_start)
fecha_fin = fecha_fin_contrato
# datetime.combine(d, datetime.min.time()) fecha_ini#datetime(int(fecha_ini[:4]), int(fecha_ini[5:7]), int(fecha_ini[8:10]))
# if fecha_ini < rango_inicio_planilla:
# meses = 6
# elif fecha_ini.day > 1:
# # el mes que esta no cuenta por eso se deja como esta la resta
# meses = helper_liquidacion.diferencia_meses_gratificacion(fecha_ini,rango_fin_planilla)
# else:
# # mas un mes por que el mes que esta iniciando cuenta
meses = helper_liquidacion.diferencia_meses_gratificacion(
fecha_ini, rango_fin_planilla)
if fecha_ini < rango_inicio_planilla:
fecha_computable = rango_inicio_planilla
else:
fecha_computable = fecha_ini
fecha_inicio_nominas = date(
fecha_computable.year, fecha_computable.month, 1)
conceptos = self.env['hr.payslip'].search([('date_from', '>=', fecha_inicio_nominas), (
'date_to', '<=', rango_fin_planilla), ('employee_id', '=', resumen_periodo['employee_id'])], order='date_to desc')
verificar_meses, _ = helper_liquidacion.diferencia_meses_dias(
fecha_inicio_nominas, rango_fin_planilla)
print "VERIFICAR MESES ", verificar_meses
# if dias==0:
# verificar_meses-=1
if len(conceptos) != verificar_meses:
fecha_encontradas = ' '.join(
['\t-'+x.name+'\n' for x in conceptos])
if not fecha_encontradas:
fecha_encontradas = '"No tiene nominas"'
raise UserError(
'Error en GRATIFICACION: El empleado %s debe tener nominas desde:\n %s hasta %s pero solo tiene nominas en las siguientes fechas:\n %s \nfaltan %d nominas, subsanelas por favor ' % (
contratos_empleado[0].employee_id.name_related, fecha_inicio_nominas, rango_fin_planilla, fecha_encontradas, abs(len(
conceptos) - (verificar_meses))
))
basico = helper_liquidacion.getBasicoByDate(date(rango_fin_planilla.year, rango_fin_planilla.month, 1),rango_fin_planilla,resumen_periodo['employee_id'],parametros_gratificacion.cod_basico.code ) # conceptos[0].basico if conceptos else 0.0
faltas = helper_liquidacion.getSumFaltas(date(rango_fin_planilla.year, rango_fin_planilla.month, 1),rango_fin_planilla,resumen_periodo['employee_id'],parametros_gratificacion.cod_dias_faltas.codigo ) #sum([x.dias_faltas for x in conceptos])
afam = helper_liquidacion.getAsignacionFamiliarByDate(fecha_inicio_nominas,rango_fin_planilla,resumen_periodo['employee_id'],parametros_gratificacion.cod_asignacion_familiar.code ) #conceptos[0].asignacion_familiar if conceptos else 0.0
comisiones_periodo, promedio_bonificaciones, promedio_horas_trabajo_extra = helper_liquidacion.calcula_comision_gratificacion_hrs_extras(
contratos_empleado[0], fecha_computable, rango_fin_planilla, meses, rango_fin_planilla)
bonificacion_9 = 0
bonificacion = promedio_bonificaciones
comision = comisiones_periodo
dias = 0
# faltas = float(resumen_periodo['faltas']
# ) if resumen_periodo['faltas'] else 0.0
rem_computable = basico + \
bonificacion+comision + \
afam+promedio_horas_trabajo_extra
monto_x_mes = round(rem_computable/6.0, 2)
monto_x_dia = round(monto_x_mes/30.0, 2)
monto_x_meses = round(
monto_x_mes*meses, 2) if meses != 6 else rem_computable
monto_x_dias = round(monto_x_dia*dias, 2)
total_faltas = round(monto_x_dia*faltas, 2)
total_gratificacion = (monto_x_meses+monto_x_dias)-total_faltas
if resumen_periodo['tipo_empresa']=='microempresa':
total_gratificacion=0
elif resumen_periodo['tipo_empresa']=='pequenhaempresa':
total_gratificacion/=2.0
print "mi resumen_periodo ", resumen_periodo
print "mi plus9 ", self.plus_9
if self.plus_9:
print contratos_empleado[0].tipo_seguro
if contratos_empleado[0].tipo_seguro == 'essalud':
bonificacion_9 = parametros_eps.ratio_essalud / \
100.0*float(total_gratificacion)
else:
bonificacion_9 = parametros_eps.ratio_eps / \
100.0*float(total_gratificacion)
vals = {
'planilla_gratificacion_id': self.id,
'employee_id': resumen_periodo['employee_id'],
'identification_number': resumen_periodo['identification_id'],
'last_name_father': resumen_periodo['a_paterno'],
'last_name_mother': resumen_periodo['a_materno'],
'names': resumen_periodo['nombres'],
'fecha_ingreso': fecha_ini,
'meses': meses,
'faltas': faltas,
'basico': basico,
'a_familiar': afam,
'comision': comisiones_periodo,
'bonificacion': bonificacion,
'horas_extras_mean': promedio_horas_trabajo_extra,
'remuneracion_computable': rem_computable,
'monto_x_mes': monto_x_mes,
'monto_x_dia': monto_x_dia,
'monto_x_meses': monto_x_meses,
'monto_x_dias': monto_x_dias,
'total_faltas': total_faltas,
'total_gratificacion': total_gratificacion,
'plus_9': bonificacion_9,
'total': total_gratificacion+bonificacion_9
}
print "datos finales para ", resumen_periodo['nombres']
print "datos finales para ", vals
self.planilla_gratificacion_lines.create(vals)
return True
# abs(datetime(2017,2,1).month-datetime(2017,6,30).month)
# # return date(self._default_fecha_fin().year, 1, 1)
# todayDate = date.today()
# # if todayDate.day > 25:
# # todayDate += timedelta(7)
# return todayDate.replace(day=1)
@api.multi
def get_excel(self):
# -------------------------------------------Datos---------------------------------------------------
reload(sys)
sys.setdefaultencoding('iso-8859-1')
output = io.BytesIO()
# direccion = self.env['main.parameter'].search([])[0].dir_create_file
workbook = Workbook('CTS%s-%s.xlsx' % (self.year, self.tipo))
worksheet = workbook.add_worksheet(
'CTS%s-%s.xlsx' % (self.year, self.tipo))
lines = self.env['planilla.gratificacion.line'].search(
[('planilla_gratificacion_id', "=", self.id)])
self.getSheetGratificacion(workbook,worksheet,lines)
workbook.close()
f = open('CTS%s-%s.xlsx' % (self.year, self.tipo), 'rb')
vals = {
'output_name': 'CTS%s-%s.xlsx' % (self.year, dict(self._fields['tipo'].selection).get(self.tipo)),
'output_file': base64.encodestring(''.join(f.readlines())),
}
sfs_id = self.env['planilla.export.file'].create(vals)
# mod_obj = self.env['ir.model.data']
# act_obj = self.env['ir.actions.act_window']
# sfs_id = self.env['export.file.save'].create(vals)
return {
"type": "ir.actions.act_window",
"res_model": "planilla.export.file",
"views": [[False, "form"]],
"res_id": sfs_id.id,
"target": "new",
}
@api.multi
def getSheetGratificacion(self,workbook,worksheet,lines):
# ----------------Formatos------------------
basic = {
'align' : 'left',
'valign' : 'vcenter',
'text_wrap' : 1,
'font_size' : 9,
'font_name' : 'Calibri'
}
basic_center = basic.copy()
basic_center['align'] = 'center'
numeric = basic.copy()
numeric['align'] = 'right'
numeric['num_format'] = '0.00'
numeric_bold_format = numeric.copy()
numeric_bold_format['bold'] = 1
bold = basic.copy()
bold['bold'] = 1
header = bold.copy()
header['bg_color'] = '#A9D0F5'
header['border'] = 1
header['align'] = 'center'
title = bold.copy()
title['font_size'] = 15
basic_format = workbook.add_format(basic)
basic_center_format = workbook.add_format(basic_center)
numeric_format = workbook.add_format(numeric)
bold_format = workbook.add_format(bold)
numeric_bold_format = workbook.add_format(numeric_bold_format)
header_format = workbook.add_format(header)
title_format = workbook.add_format(title)
nro_columnas = 17
tam_col = [0]*nro_columnas
# ----------------------------------------------Título--------------------------------------------------
rc = self.env['res.company'].search([])[0]
cabecera = rc.name
worksheet.merge_range('A1:B1', cabecera, title_format)
# ---------------------------------------------Cabecera------------------------------------------------
worksheet.merge_range('A2:D2', "CTS", bold_format)
worksheet.write('A3', u"Año :", bold_format)
worksheet.write('B3', self.year, bold_format)
columnas = ["Orden",
"Nro Documento",
"Apellido\nPaterno",
"Apellido\nMaterno",
"Nombres",
"Fecha\nIngreso",
"Meses",
"Faltas",
u"Básico",
"A.\nFamiliar",
u"Comision",
u"Bonificación",
"PROM. HRS\n EXTRAS",
"Rem.\nCom.",
"M. por\nMes",
u"M. por\nDía",
"Grat. Por\nlos Meses",
u"Grat. Por\nlos Días",
"Total\nFaltas",
u"Total\nGratificación",
"Bonif.\n9%", "Total\nPagar"]
fil = 4
for col in range(len(columnas)):
worksheet.write(fil, col, columnas[col], header_format)
worksheet.set_row(fil, 22)
# ------------------------------------------Insertando Data----------------------------------------------
fil = 5
totals = [0]*14
for line in lines:
col = 0
worksheet.write(fil, col, line.id, basic_format)
col += 1
worksheet.write(fil, col, line.identification_number, basic_format)
col += 1
worksheet.write(fil, col, line.last_name_father, basic_format)
col += 1
worksheet.write(fil, col, line.last_name_mother, basic_format)
col += 1
worksheet.write(fil, col, line.names, basic_format)
col += 1
worksheet.write(fil, col, line.fecha_ingreso, basic_center_format)
col += 1
worksheet.write(fil, col, line.meses, basic_center_format)
col += 1
worksheet.write(fil, col, line.faltas, basic_center_format)
col += 1
worksheet.write(fil, col, line.basico, numeric_format)
totals[col-8] += line.basico
col += 1
worksheet.write(fil, col, line.a_familiar, numeric_format)
totals[col-8] += line.a_familiar
col += 1
worksheet.write(fil, col, line.comision, numeric_format)
totals[col-8] += line.comision
col += 1
worksheet.write(fil, col, line.bonificacion, numeric_format)
totals[col-8] += line.bonificacion
col += 1
worksheet.write(fil, col, line.horas_extras_mean, numeric_format)
totals[col-8] += line.horas_extras_mean
col += 1
worksheet.write(
fil, col, line.remuneracion_computable, numeric_format)
totals[col-8] += line.remuneracion_computable
col += 1
worksheet.write(fil, col, line.monto_x_mes, numeric_format)
totals[col-8] += line.monto_x_mes
col += 1
worksheet.write(fil, col, line.monto_x_dia, numeric_format)
totals[col-8] += line.monto_x_dia
col += 1
worksheet.write(fil, col, line.monto_x_meses, numeric_format)
totals[col-8] += line.monto_x_meses
col += 1
worksheet.write(fil, col, line.monto_x_dias, numeric_format)
totals[col-8] += line.monto_x_dias
col += 1
worksheet.write(fil, col, line.total_faltas, numeric_format)
totals[col-8] += line.total_faltas
col += 1
worksheet.write(fil, col, line.total_gratificacion, numeric_format)
totals[col-8] += line.total_gratificacion
col += 1
worksheet.write(fil, col, line.plus_9, numeric_format)
totals[col-8] += line.plus_9
col += 1
worksheet.write(fil, col, line.total, numeric_format)
totals[col-8] += line.total
fil += 1
col = 8
for i in range(len(totals)):
worksheet.write(fil, col, totals[i], numeric_bold_format)
col += 1
col_size = [5, 12, 20]
worksheet.set_column('A:A', col_size[0])
worksheet.set_column('B:E', col_size[1])
worksheet.set_column('F:F', col_size[2])
worksheet.set_column('G:U', col_size[1])
class PlanillaGratificacionLine(models.Model):
_name = 'planilla.gratificacion.line'
planilla_gratificacion_id = fields.Many2one(
'planilla.gratificacion', "Planilla Gratificacion")
# fields.Many2one('hr.employee', "Empleado")
employee_id = fields.Integer(index=True)
# order = fields.Integer("Orden", compute='get_order')
identification_number = fields.Char("Nro Documento", size=9)
# code = fields.Char("Código", size=4)
last_name_father = fields.Char("Apellido Paterno")
last_name_mother = fields.Char("Apellido Materno")
names = fields.Char("Nombres")
fecha_ingreso = fields.Date("Fecha Ingreso")
meses = fields.Integer("Meses")
faltas = fields.Integer("Faltas")
basico = fields.Float(u"Básico", digits=(10, 2))
a_familiar = fields.Float("A. Familiar", digits=(10, 2))
comision = fields.Float(u"Comision", digits=(10, 2))
bonificacion = fields.Float(u"Bonificación", digits=(10, 2))
horas_extras_mean = fields.Float("Prom. Hras extras", digits=(10, 2))
remuneracion_computable = fields.Float("Rem. Com.", digits=(10, 2))
monto_x_mes = fields.Float("M. por Mes", digits=(10, 2))
monto_x_dia = fields.Float(u"M. por Día", digits=(10, 2))
monto_x_meses = fields.Float("Grat. Por los\nMeses", digits=(10, 2))
monto_x_dias = fields.Float(u"Grat. Por los\nDías", digits=(10, 2))
total_faltas = fields.Float(u"Total Faltas", digits=(10, 2))
total_gratificacion = fields.Float(u"Total\nGratificación", digits=(10, 2))
plus_9 = fields.Float(u"Bonif. 9%", digits=(10, 2))
# adelanto = fields.Float(u'Adelanto', digits=(10, 2))
# total_to_pay = fields.Float(u'Gratificación a pagar', digits=(10, 2))
total = fields.Float(u"Total Pagar", digits=(10, 2))
| [
"[email protected]"
] | |
338f19efb58b55b42cd987e0e1ddec5ce0c6c3ca | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/cdn/v20200415/get_profile.py | 1eded7cd80fac2a555ea3ca6f8ca513c8be3e5e2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,875 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetProfileResult',
'AwaitableGetProfileResult',
'get_profile',
]
@pulumi.output_type
class GetProfileResult:
"""
CDN profile is a logical grouping of endpoints that share the same settings, such as CDN provider and pricing tier.
"""
def __init__(__self__, location=None, name=None, provisioning_state=None, resource_state=None, sku=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning status of the profile.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status of the profile.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The pricing tier (defines a CDN provider, feature list and rate) of the CDN profile.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetProfileResult(GetProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProfileResult(
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_state=self.resource_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_profile(profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProfileResult:
"""
Use this data source to access information about an existing resource.
:param str profile_name: Name of the CDN profile which is unique within the resource group.
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:cdn/v20200415:getProfile', __args__, opts=opts, typ=GetProfileResult).value
return AwaitableGetProfileResult(
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_state=__ret__.resource_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] | |
8a70830e256d8eeef43740b7dfed605cc776df39 | 09c39de5aad7b283cfac2f09a2b93e43086846d2 | /Unit 04 Functions/01 Functions/Built In Functions/15-abs.py | 52e6ea899312736b86311883bd835e64c64742ab | [
"MIT"
] | permissive | lpython2006e/python-samples | b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa | b94ba67ce0d7798ecf796dadae206aa75da58301 | refs/heads/master | 2023-01-21T13:16:13.295163 | 2020-11-29T11:01:50 | 2020-11-29T11:01:50 | 278,653,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py |
absolute = abs(-42)
print(absolute)
| [
"[email protected]"
] | |
d3c27c23bfc320eceb72571839e9427a8cc1be52 | 07ecbc9bd09637defb25011f16816a5f4b9b7358 | /FOLTranslation.py | 940a8d0a12e2ed0cfc38c348daac963a12642262 | [] | no_license | pritomrajkhowa/webapp_loop_bound | c55d3621644dec28ec58a2db18a28288d62dd2b0 | 2f6ca4addf806f80d18ef7fe492b31bdfbf0d9e1 | refs/heads/master | 2022-12-07T22:19:34.790707 | 2020-09-03T00:17:25 | 2020-09-03T00:17:25 | 291,570,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101,212 | py | import sys
import os
currentdirectory = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currentdirectory+"/packages/ply/")
sys.path.append(currentdirectory+"/packages/plyj/")
sys.path.append(currentdirectory+"/packages/pyparsing/")
sys.path.append(currentdirectory+"/packages/pycparser1/")
sys.path.append(currentdirectory+"/packages/pycparserext/")
#sys.path.append(currentdirectory+"/packages/regex/")
sys.path.append(currentdirectory+"/packages/mpmath/")
sys.path.append(currentdirectory+"/packages/sympy/")
sys.path.append(currentdirectory+"/packages/z3/python/")
from pyparsing import *
import copy
import regex
from sympy import *
from sympy.core.relational import Relational
from pycparser1 import parse_file,c_parser, c_ast, c_generator
from pycparserext.ext_c_parser import GnuCParser
from pycparserext.ext_c_generator import GnuCGenerator
from itertools import permutations
#import execnet
ParserElement.enablePackrat()
def is_empty(any_structure):
if any_structure:
return False
else:
return True
def is_number(s):
if s=='j':
return False
try:
float(s) # for int, long and float
except ValueError:
try:
complex(s) # for complex
except ValueError:
return False
except TypeError:
return False
return True
def is_hex(input_string):
flag=True
if input_string is None:
return None
try:
flag=int(input_string, 16)
except ValueError:
return None
if flag:
if '0x' in input_string:
return str(int(input_string, 16))
else:
return None
else:
return None
#Find Intersection of Two lists
def intersect3(c1,c2,c3):
return list(set(list(set(c1) & set(c2)))-set(c3))
def intersect2(c1,c2):
return list(set(c1) & set(c2))
def merge_two_dicts(x,y):
z=x.copy()
z.update(y)
return z
# base language (non dynamic, not changed by the program)
# do not use name with number in the end
# these names are not supposed to be used as prorgam variables
_base = ['=','==','!=','<','<=','>','>=','*','**','!','+','-','/', '%', 'ite', 'and', 'or', 'not', 'implies', 'all', 'some', 'null','>>','<<','&','|','^']
_infix_op = ['=','==','!=','<','<=','>','>=','*','**','+','-','/', '%', 'implies','<<','>>','&','|','^']
_relation_op = ['==','!=','<','<=','>','>=']
# variables introduced in the translation
def isvariable(x):
if x.startswith('_x') or x.startswith('_y') or x.startswith('_n') or x.startswith('_s'):
return True
else:
return False
# program variables and temporary program variables and big N
def is_program_var(x,v):
if x.startswith('_N'):
return True
for y in v:
if x==y or x.startswith(y+OUT) or (x.startswith(y+TEMP) and
x[len(y+TEMP):].isdigit()) or x.startswith(y+LABEL):
return True
return False
current_milli_time = lambda: int(round(time.time() * 1000))
"""
#Get timestap
"""
def getTimeStamp():
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return "\n***********************\n"+str(st)+"\n***********************\n"
"""
RET for return value of a function
Temporary function names are constructed as:
variable-name + TEMP + TC
Output function names are:
variable-name + LABEL
for those with label, or
variable-name + OUT
for those without label.
TC: TempCount, a global counter for naming temporary variables
LC: LoopCount, a global counter for naming loop constants and variables
"""
RET='RET'
#OUT='Z' #so don't name variables xZ, yZ...
OUT='1' #so don't name variables x1, y1...
#TEMP = 'T' #so if x is a variable, then don't name variables xT,
TEMP = '' #so if x is a variable, then don't name variables x2,x3,... (temp starts at 2)
LABEL = '_' #so if x is a variable, then don't name variables x_1,x_2,
TC = 1 # for generating temporary functions to yield xt1,xt2,...
LC = 0 # for generating smallest macro constants in a loop _N1, _N2,... as well as
# natural number variables _n1,_n2,...
"""
Expressions: (f e1 ... ek) is represented as [f,e1,...,ek].
Example: a+1 is ['+', ['a'],['1']]; constant a is ['a'];
sum(i+1,j) is ['sum', ['+', ['i'], ['1']], ['j']]
"""
#constructor: functor - a string like '+', '*', 'and',
# or constants like '1', 'x'; args - a list of exprs
def expres(functor,args=[]):
return [functor]+args
#accessor
def expr_op(e):
return e[0]
def expr_args(e):
return e[1:]
#prefix printing
def expr2string(e):
if len(e)==1:
return e[0]
else:
return '(' + e[0] +' '+ ' '.join(list(expr2string(x) for x in e[1:]))+ ')'
#normal infix printing
def expr2string1(e):
args=expr_args(e)
op=expr_op(e)
if len(args)==0:
return op
else:
if op=='and' or op=='or':
if len(args)==1:
return expr2string1(args[0])
else:
return '('+(' '+op+' ').join(list(expr2string1(x) for x in args))+')'
elif op=='not' and len(args)==1:
return 'not '+expr2string1(args[0])
elif op=='implies' and len(args)==2:
return expr2string1(args[0])+ ' -> '+expr2string1(args[1])
elif op in _infix_op and len(args)==2:
return '(' + expr2string1(args[0])+ op+expr2string1(args[1])+')'
else:
return op +'('+ ','.join(list(expr2string1(x) for x in args))+ ')'
#return the list of program variables in an expression
def expr_func(e,v): #e - expr
ret = []
f = expr_op(e)
if is_program_var(f,v) or '__VERIFIER_nondet' in f:
ret.append(f)
for e1 in expr_args(e):
ret = ret + expr_func(e1,v)
return ret
#substitution of functors: in e, replace functor n1 by n2
def expr_sub(e,n1,n2): # e - expr; n1,n2 - strings
e1=list(expr_sub(x,n1,n2) for x in e[1:])
if e[0]==n1:
return [n2]+e1
else:
return e[:1]+e1
#substitution of functors in a set: in e, for all x in v1 but not in v2, replace x+n1 by x+n2
def expr_sub_set(e,n1,n2,v1,v2): #e - expr; n1,n2 - strings, v1, v2 - sets of strings
e1 = list(expr_sub_set(e2,n1,n2,v1,v2) for e2 in e[1:])
if e[0].endswith(n1):
x = e[0][:len(e[0])-len(n1)]
if (x in v1) and (not x in v2):
return [x+n2]+e1
else:
return e[:1]+e1
else:
return e[:1]+e1
# expr_replace(e,e1,e2): replace all subterm e1 in e by e2
def expr_replace(e,e1,e2): #e,e1,e2: expr
if e==e1:
return e2
else:
return e[:1]+list(expr_replace(x,e1,e2) for x in expr_args(e))
# expr_sub_dict(e,d): d is a dictonary of substitutions: functor 'f' to e1=d['f'] so that in e, each f term f(t1,...,tk) is replaced by e1(_x1/t1,...,_xk/tk)
def expr_sub_dict(e,d):
args = expr_args(e)
args1 = list(expr_sub_dict(x,d) for x in args)
if expr_op(e) in d:
return expr_sub_var_list(d[expr_op(e)],list(expres('_x'+str(i+1)) for i in range(len(args))),args1)
else:
return expres(expr_op(e),args1)
# expr_sub_var_list(e,l1,l2): in e, replace all terms in l1 by the corresponding terms in l2
def expr_sub_var_list(e,l1,l2): #e: expr, l1,l2: lists of the same length of exprs
for i,x in enumerate(l1):
if e==x:
return l2[i]
return e[:1]+list(expr_sub_var_list(y,l1,l2) for y in expr_args(e))
# compute E[n] extend(e,n,excl,v). n is an expr like ['_n1'], excl is a container of strings that are not to be extended
def extend(e,n,excl,v):
op = expr_op(e)
x = [n] if (is_program_var(op,v) and not (op in excl)) or '__VERIFIER_nondet' in op else []
return expres(op, list(extend(e1,n,excl,v) for e1 in expr_args(e)) + x)
#A dictionary of dependencies para is such that, if x is an input variable, then para[x] is a list whose first element is 1 and the second element is the variable's parameter name; otherwise, para[x] is the list of input variables that x is depended on.
#example: para = { 'X':[1,['_y1']], 'X11':[0,['_y1','_y2'], ['X','Y']],...} meaning 'X' is an input variable parameterized as '_y1' and 'X11' is a function depending on X and Y whose corresponding parameters are '_y1' and '_y2', respectively.
#So after parameterization, X11(a,X) will become X11(a,_y1,_y1,_y2)
def parameterize_expres(e,para):
if e[0] in para:
if para[e[0]][0] == 1:
return para[e[0]][1]+list(parameterize_expres(x,para) for x in e[1:])
else:
return e[:1]+list(parameterize_expres(x,para) for x in e[1:])+para[e[0]][1]
else:
return e[:1]+list(parameterize_expres(x,para) for x in e[1:])
#parameterize non-input functions then restore the input variables to its name
#given above para, X11(a,X) will become X11(a,X,X,Y), assuming that _y2 corresponds to Y
def parameterize_expr_sub(e,para):
if e[0] in para:
if para[e[0]][0] == 1:
return [e[0]]+list(parameterize_expr_sub(x,para) for x in e[1:])
else:
return e[:1]+list(parameterize_expr_sub(x,para) for x in e[1:])+para[e[0]][2]
else:
return e[:1]+list(parameterize_expr_sub(x,para) for x in e[1:])
"""
Formulas:
1. equations f(x) = e: ['e',e1,e2],
where e1 is expression for f(x) and e2 for e
2. inductive definition:
- base case f(x1,...,xk,0,...,xm)=e: ['i0',k,e1,e2]
where e1 is Expr for f(x1,...,xk,0,...,xm) and e2 the Expr for e
- inductive case f(x1,...,xk,n+1,...,xm)=e: ['i1',k,'n',e1,e2]
where e1 is Expr for f(x1,...,xk,n+1,...,xm) and e2 the Expr for e
3. inductive definition for functions that return natural numbers
(like N in smallest macro):
- base case f(x) = 0 iff C: ['d0',e,c]
where e is the Expr for f(x) and c an expression for condition C
- inductive case f(x) = n+1 iff C(n): ['d1','n',e,c]
where e is the Expr for f(x) and c an Expr for condition C
4. any other axioms: A: ['a',e], where e is the Expr for A
5. constraints from smallest macro smallest(N,n,e):
['s0', e(N)]
['s1', forall n<N -> not e]
Examples: a' = a+1: ['e', ['a\''], ['+',['a'],['1']]]
N(x) = 0 if x<I else N(x-1)+1 is divided into two axioms:
N(x) = 0 iff x<I:
['d0', ['N',['x']], ['<', ['x'],['I']]] and
N(x) = n+1 iff n=N(x-1):
['d1','n', ['N',['x']], ['=',['n'], ['N', ['-', ['x'],['1']]]]]
"""
# constructors
def wff_e(e1,e2): #e1,e2: expr
return ['e',e1,e2]
def wff_i0(k,e1,e2): #k: int; e1,e2: expr
return ['i0',k,e1,e2]
def wff_i1(k,v,e1,e2): #k: int; v: string; e1,e2: expr
return ['i1',k,v,e1,e2]
def wff_d0(e,c): #e: expr; c: expr
return ['d0',e,c]
def wff_d1(v,e,c): #v: string, e and c: expr
return ['d1',v,e,c]
def wff_a(e): #e: expr
return ['a',e]
def wff_s0(e):
return ['s0',e]
def wff_s1(e):
return ['s1',e]
def wff_c1(e):
return ['c1',e]
#print in prefix notation
def wff2string(w):
if w[0] == 'e' or w[0] == 'i0' or w[0] == 'i1' or w[0] == 'R':
return '(= '+expr2string(w[-2])+' '+expr2string(w[-1]) +')'
elif w[0] == 'd0':
return '(iff (= '+expr2string(w[1])+' 0) '+ expr2string(w[2])+')'
elif w[0] == 'd1':
return '(iff (= '+expr2string(w[2])+' (+ '+w[1]+' 1)) '+expr2string(w[3])+')'
elif w[0]=='a' or w[0]=='s0' or w[0]=='s1' or w[0]=='c1' or w[0] == 'R':
return expr2string(w[1])
#print in normal infix notation
def wff2string1(w):
if w[0] == 'e' or w[0] == 'i0' or w[0] == 'i1' or w[0] == 'i2' or w[0] == 'R':
return expr2string1(w[-2])+' = '+ expr2string1(w[-1])
elif w[0] == 'd0':
return expr2string1(w[1])+'=0 <=> '+ expr2string1(w[2])
elif w[0] == 'd1':
return expr2string1(w[2])+'='+w[1]+'+1 <=> '+expr2string1(w[3])
elif w[0]=='a' or w[0]=='s0' or w[0]=='s1' or w[0]=='c1':
return expr2string1(w[1])
#strip '(' at the beginning and matching ')' in the end of a string
def trim_p(s):
if s.startswith('(') and s.endswith(')'):
return trim_p(s[1:-1])
else:
return s
#print in normal infix notation
def wff2subslist(w):
if w[0] == 'e':
return expr2string1(w[-2]),expr2string1(w[-1])
#strip '(' at the beginning and matching ')' in the end of a string
def trim_p(s):
if s.startswith('(') and s.endswith(')'):
return trim_p(s[1:-1])
else:
return s
#for a formula w, compute w[n]
def wff_extend(w,n,excl,v): #w: wff, n: expr, excl: container of strings
if w[0]=='e': #['e',e1,e2]
return ['e',extend(w[1],n,excl,v),extend(w[2],n,excl,v)]
elif w[0]=='i0': #['i0',k,e1,e2]
return ['i0',w[1],extend(w[2],n,excl,v),extend(w[3],n,excl,v)]
elif w[0]=='i1': #['i1',k,v,e1,e2]
return ['i1',w[1],w[2],extend(w[3],n,excl,v),extend(w[4],n,excl,v)]
elif w[0]=='d0': #['d0',e,c]
return ['d0',extend(w[1],n,excl,v),extend(w[2],n,excl,v)]
elif w[0]=='d1': #['d1',v,e,c]
return ['d1',w[1],extend(w[2],n,excl,v),extend(w[3],n,excl,v)]
elif w[0]=='a' or w[0]=='s0' or w[0]=='s1':
return [w[0], extend(w[1],n,excl,v)]
else:
print('Not a wff')
return
#for a formula w, replace functor old by new
def wff_sub(w,old,new): #w - wff; old, new - string
if w[0]=='e': #['e',e1,e2]
return ['e',expr_sub(w[1],old,new),expr_sub(w[2],old,new)]
elif w[0]=='i0': #['i0',k,e1,e2]
return ['i0',w[1],expr_sub(w[2],old,new),expr_sub(w[3],old,new)]
elif w[0]=='i1': #['i1',k,v,e1,e2]
return ['i1',w[1],w[2],expr_sub(w[3],old,new),expr_sub(w[4],old,new)]
elif w[0]=='d0': #['d0',e,c]
return ['d0',expr_sub(w[1],old,new),expr_sub(w[2],old,new)]
elif w[0]=='d1': #['d1',v,e,c]
return ['d1',w[1],expr_sub(w[2],old,new),expr_sub(w[3],old,new)]
elif w[0]=='a' or w[0]=='s0' or w[0]=='s1': #['a',e]
return [w[0],expr_sub(w[1],old,new)]
else:
print('Not a wff')
return
#for a formula w, replace functor x+old by x+new for those in v1 but not in v2
def wff_sub_set(w,old,new,v1,v2): #w - wff; old, new - string; v1,v2: sets
if w[0]=='e': #['e',e1,e2]
return ['e',expr_sub_set(w[1],old,new,v1,v2),expr_sub_set(w[2],old,new,v1,v2)]
elif w[0]=='i0': #['i0',k,e1,e2]
return ['i0',w[1],expr_sub_set(w[2],old,new,v1,v2),expr_sub_set(w[3],old,new,v1,v2)]
elif w[0]=='i1': #['i1',k,v,e1,e2]
return ['i1',w[1],w[2],expr_sub_set(w[3],old,new,v1,v2),expr_sub_set(w[4],old,new,v1,v2)]
elif w[0]=='d0': #['d0',e,c]
return ['d0',expr_sub_set(w[1],old,new,v1,v2),expr_sub_set(w[2],old,new,v1,v2)]
elif w[0]=='d1': #['d1',v,e,c]
return ['d1',w[1],expr_sub_set(w[2],old,new,v1,v2),expr_sub_set(w[3],old,new,v1,v2)]
elif w[0]=='a' or w[0]=='s0' or w[0]=='s1': #['a',e]
return [w[0],expr_sub_set(w[1],old,new,v1,v2)]
else:
print('Not a wff')
return
#like expr_sub_dict(e,d) but on wffs
def wff_sub_dict(w,d): #w - wff; d - a dictionary as in expr_sub_dict(e,d)
if w[0]=='e': #['e',e1,e2]
return w[:2]+[expr_sub_dict(w[2],d)]
elif w[0]=='i0': #['i0',k,e1,e2]
return w[:3]+[expr_sub_dict(w[3],d)]
elif w[0]=='i1': #['i1',k,v,e1,e2]
return w[:4]+[expr_sub_dict(w[4],d)]
elif w[0]=='d0': #['d0',e,c]
return w[:2]+[expr_sub_dict(w[2],d)]
elif w[0]=='d1': #['d1',v,e,c]
return w[:3]+[expr_sub_dict(w[3],d)]
elif w[0]=='a' or w[0]=='s0' or w[0]=='s1': #['a',e]
return [w[0],expr_sub_dict(w[1],d)]
else:
print('Not a wff')
return
#parameterize a set of axioms by making program functions as input variables
#para = { 'X':[1,['_y1']], 'X11':[0,['_y1','_y2'],['X','Y']],...} meaning 'X' is an input variable parameterized as '_y1' and 'X11' is a function taking two new parameters '_y1','_y2'
#X11(a,X)=X11(a+b,1) will become X11(a,_y1,_y1,_y2)=X11(a+b,1,_y1,_y2)
def parameterize_wff(ax,para):
if not (ax[0] == 'a' or ax[0]=='s0' or ax[0]=='s1'):
e1 = parameterize_expres(ax[-2],para)
e2 = parameterize_expres(ax[-1],para)
return ax[:-2]+[e1,e2]
else:
e2 = parameterize_expres(ax[-1],para)
return [ax[0],e2]
#for all x in dep_set, add dep_set[x] as arguments, except when x is RET+OUT,
#replace it by foo()
def parameterize_axioms_fun(axioms,dep_set):
for x in axioms:
parameterize_wff_fun(x,dep_set)
def parameterize_wff_fun(ax,dep_set):
if not (ax[0] == 'a' or ax[0]=='s0' or ax[0]=='s1'):
e1 = parameterize_expres_fun(ax[-2],dep_set)
e2 = parameterize_expres_fun(ax[-1],dep_set)
return ax[:-2]+[e1,e2]
else:
e2 = parameterize_expres_fun(ax[-1],dep_set)
return [ax[0],e2]
def parameterize_expres_fun(e,dep_set):
if e[0]==RET+OUT:
if len(e) != 1:
print('Something is wrong '+RET+OUT+' should not have arguments')
return
else:
return dep_set[RET+OUT]
elif e[0] in dep_set:
return expres(e[0],list(parameterize_expres_fun(x,dep_set) for x in e[1:])+dep_set[e[0]])
else:
return expres(e[0],list(parameterize_expres_fun(x,dep_set) for x in e[1:]))
def eqset2string(d):
for x in d:
print(wff2string(d[x]))
def eqset2string1(d):
for x in d:
print(wff2string1(d[x]))
def eqset2string1Print(d):
output_val=''
for x in d:
output_val+=wff2string1(d[x])+"\n"
return output_val
# translate0(program,set of program variables) returns a dictionary of frame axioms, output equations, a list of other axioms and a label
def translate0(p,v,flag):
if p[1]=='while':
return translateWhile(p,v,flag)
if p[1]=='seq':
return translateSeq(p,v,flag)
if p[1]=='if1':
return translateIf1(p,v,flag)
if p[1]=='if2':
return translateIf2(p,v,flag)
if p[1]=='=':
return translateAssign(p,v,flag)
if p[1]=='fun':
return translateFun(p,v,flag)
if p[1]=='prog':
return translateProgram(p,v,flag)
# function definition
def translateFun(p,v,flag): #p=['-1','fun',['foo','x',..,'y'], b]
#global TC
#global LC
#TC=0
#LC=0
f,o,a,l = translate0(p[-1],v,flag)
axioms=a
for x in f:
axioms=axioms+[f[x]]
for x in o:
axioms=axioms+[o[x]]
g = graph(axioms,v) #construct dependency graph
param = list(expres(a) for a in p[-2][1:]) #parameters of the function
dep_set = {} #dependency set for each variables in the axiom
dep_set[RET+OUT]=expres(p[-2][0],param) #initialize it to the return function
for (x,y) in g:
if (not x in dep_set) and (not expres(x) in param):
dep = []
for x1 in reach_set([x],g):
if (expres(x1) in param) and not (expres(x1) in dep):
dep.append(expres(x1))
dep_set[x] = dep
for x in f:
f[x]=parameterize_wff_fun(f[x],dep_set)
for x in o:
o[x]=parameterize_wff_fun(o[x],dep_set)
for i,ax in enumerate(a):
a[i]=parameterize_wff_fun(ax,dep_set)
return [dep_set[RET+OUT],f,o,a,l]
# program: a set of functions
#p=['-1','prog',[f1,...,fk]]
#for each fi, v[fi] is the list of variables used in the function fi
def translateProgram(p,v,flag):
result = {}
for x in p[-1]:
funcName = x[2][0]
result[funcName] = translate0(x,v[funcName],flag)
return result
# assignment translation: p a program and v a set of program variables
map___VERIFIER_nondet={}
def translateAssign(p,v,flag): #p=[l,'=',left,right]
global map___VERIFIER_nondet
if p[1] != '=':
print('Not an assignment')
return
left = p[2] #left side of the assigment
op = left[0] #the functor in left
arity = len(left)-1 #arity of op
right = p[3] #right side of the assignment
right = update__VERIFIER_nondet_stmt(right,map___VERIFIER_nondet)
out=OUT if p[0] == '-1' else LABEL+p[0]
out_axioms = {}
frame_axioms = {}
for x in v:
if x == op:
args = list(expres('_x'+str(i+1)) for i in range(arity))
cond = expres('=',[expres('_x1'),left[1]]) if arity==1 else \
expres('and', list(expres('=', [expres('_x'+str(i2+1)),y]) for \
i2,y in zip(range(arity),left[1:])))
if arity == 0:
out_axioms[x]=wff_e(expres(op+out),right)
else:
out_axioms[x]=wff_e(expres(op+out,args), expres('ite',[cond,right,expres(op,args)]))
else:
args = list(expres('_x'+str(i+1)) for i in range(len(v[x])-2))
frame_axioms[x]=wff_e(expres(x+out,args), expres(x,args))
return frame_axioms, out_axioms, [], p[0]
def translateIf1(p,v,flag): # p=[l,'if1',c,e]
global map___VERIFIER_nondet
if p[1] != 'if1':
print('Not an if-then')
return
global TC
frame_axioms,out_axioms,axioms,llabel = translate0(p[3],v,flag)
old_out = OUT if llabel=='-1' else LABEL+llabel
out=OUT if p[0] == '-1' else LABEL+p[0]
if llabel=='-1': # body has no final label
TC += 1
body_out = TEMP+str(TC) if llabel=='-1' else LABEL+llabel
p[2] = update__VERIFIER_nondet_stmt(p[2],map___VERIFIER_nondet)
for x in v:
if x in frame_axioms:
ax = frame_axioms[x] #ax = ['e',e1,e2]
if llabel != '-1': #body has label: keep axioms about it
axioms.append(ax)
#generate the new frame axiom
frame_axioms[x] = wff_e(expr_sub(ax[1],x+old_out,x+out), ax[2])
else:
ax = out_axioms[x] #ax = ['e',e1,e2]
if llabel != '-1': #body has label: keep axioms about it
axioms.append(ax)
out_axioms[x] = wff_e(expres(x+out, ax[1][1:]),
expres('ite', [p[2], ax[2], expres(x,ax[1][1:])]))
return frame_axioms, out_axioms, axioms, p[0]
def translateIf2(p,v,flag): # p=[l,'if2',c,e1,e2]
global map___VERIFIER_nondet
if p[1] != 'if2':
print('Not an if-then-else')
return
global TC
frame_axioms0,out_axioms0,axioms0,llabel0 = translate0(p[3],v,flag)
frame_axioms1,out_axioms1,axioms1,llabel1 = translate0(p[4],v,flag)
axioms = axioms0+axioms1
old_out0 = OUT if llabel0=='-1' else LABEL+llabel0
old_out1 = OUT if llabel1=='-1' else LABEL+llabel1
out=OUT if p[0] == '-1' else LABEL+p[0]
if llabel0=='-1': # if body has no final label
TC += 1
body_out0 = TEMP+str(TC) if llabel0=='-1' else LABEL+llabel0 # if body new out
if llabel1=='-1': # else body has no final label
TC += 1
body_out1 = TEMP+str(TC) if llabel1=='-1' else LABEL+llabel1 # else body new out
frame_axioms = {}
out_axioms = {}
p[2] = update__VERIFIER_nondet_stmt(p[2],map___VERIFIER_nondet)
for x in v:
if x in frame_axioms0 and x in frame_axioms1:
ax0 = frame_axioms0[x] #ax0 = ['e',e1,e2]
ax1 = frame_axioms1[x] #ax1 = ['e',e1,e2]
if llabel0 != '-1': #if body has label: keep axioms about it
axioms.append(ax0)
if llabel1 != '-1': #else body has label: keep axioms about it
axioms.append(ax1)
#generate the new frame axiom
frame_axioms[x] = wff_e(expr_sub(ax0[1],x+old_out0,x+out), ax0[2])
else:
if x in frame_axioms0:
ax0=frame_axioms0[x]
else:
ax0=out_axioms0[x]
if x in frame_axioms1:
ax1=frame_axioms1[x]
else:
ax1=out_axioms1[x]
if llabel0 != '-1': #if body has label: keep axioms about it
axioms.append(ax0)
if llabel1 != '-1': #else body has label: keep axioms about it
axioms.append(ax1)
out_axioms[x] = wff_e(expres(x+out, ax0[1][1:]),
expres('ite', [p[2], ax0[2], ax1[2]]))
return frame_axioms, out_axioms, axioms, p[0]
def translateSeq(p,v,flag): # p=['-1','seq',p1,p2]
if p[1] != 'seq':
print('Not a sequence')
return
global TC
frame_axioms0,out_axioms0,axioms0,llabel0 = translate0(p[2],v,flag)
frame_axioms1,out_axioms1,axioms1,llabel1 = translate0(p[3],v,flag)
old_out0 = OUT if llabel0=='-1' else LABEL+llabel0
if llabel0=='-1': # if p1 has no final label
TC += 1
new_out0 = TEMP+str(TC) if llabel0=='-1' else LABEL+llabel0 # p1 new out
frame_axioms = {}
out_axioms = {}
para = {} #a dictonary of substitution: para[x] is the expression to replace x(t) in p2's axioms
for x in v:
if x in frame_axioms0 and x in frame_axioms1:
if llabel0 !='-1': #p1 has label, keep its axioms
axioms0.append(frame_axioms0[x])
frame_axioms[x]=frame_axioms1[x]
else:
if x in frame_axioms0:
ax0=frame_axioms0[x] #ax0=['e',e1,e2]
else:
ax0=out_axioms0[x]
if llabel0 != '-1': #p1 has label: keep equations about it
axioms0.append(ax0)
para[x]=ax0[2]
for i,ax in enumerate(axioms1): #substituting p1's output into p2's input in p2's axioms
axioms1[i] = wff_sub_dict(ax,para)
for x in v: #do the same for the p2's output equations and frame axioms
if not x in frame_axioms:
if x in frame_axioms1:
out_axioms[x] = frame_axioms1[x][:2]+[expr_sub_dict(frame_axioms1[x][2],para)]
else:
out_axioms[x] = out_axioms1[x][:2]+[expr_sub_dict(out_axioms1[x][2],para)]
for x in out_axioms:
out_axioms[x][-1] = simplification_expr(out_axioms[x][-1])
for x in axioms0:
x[-1] = simplification_expr(x[-1])
for x in axioms1:
x[-1] = simplification_expr(x[-1])
return frame_axioms, out_axioms, axioms0+axioms1, llabel1
def translateWhile(p,v,flag): #p=[l, 'while', c, b]
global map___VERIFIER_nondet
if p[1] != 'while':
print('Not a while statement')
return
global LC
global TC
frame_axioms, out_axioms0, axioms,llabel = translate0(p[3],v,flag) # axioms and output labels for the body of the loop
LC += 1
if llabel=='-1': # if body has no final label
if TC==0:
TC += 2
else:
TC += 1
loop_var = expres('_n'+str(LC)) #a new natural number variable for the loop
smallest = expres('_N'+str(LC)) #a new natural number variable for the loop
init=TEMP+str(TC) if llabel=='-1' else LABEL+llabel #iterating functions
old_out=OUT if llabel=='-1' else LABEL+llabel #original output functions in body
out=OUT if p[0]=='-1' else LABEL+p[0] #new output functions for the loop
for i0, ax0 in enumerate(axioms): #extend the axioms with [n]
ax0 = wff_sub_set(ax0,'',init,v,frame_axioms)
axioms[i0]=wff_extend(ax0, loop_var, frame_axioms,v)
for x in frame_axioms:
ax = frame_axioms[x] #ax = ['e',e1,e2]
if llabel != '-1': #body has label: keep axioms about it
axioms.append(ax)
#generate the new frame axiom
frame_axioms[x] = wff_e(expr_sub(ax[1],x+old_out,x+out), ax[2])
out_axioms00={}
for x in out_axioms0:
ax = out_axioms0[x] #ax = ['e',e1,e2]
#change output and input variable names to loop and extend e2[loop_var]
ax = wff_sub_set(ax,old_out,init,v,frame_axioms)
ax = wff_sub_set(ax,'',init,v,frame_axioms)
out_axioms00[x]=ax[:2]+[extend(ax[2],loop_var,frame_axioms,v)]
# using Pritom's solve_rec() to try to get closed-form solution
found_solution=True
variable=None
temp_out_axioms00 = copy.deepcopy(out_axioms00)
while found_solution:
found1=False
for x in temp_out_axioms00.keys():
if x in out_axioms00.keys():
ax=out_axioms00[x]
if expr_func(ax[2],v)==[]:
found1=True
e=extend(ax[1],loop_var,frame_axioms,v)
axioms.append(wff_e(e,ax[2]))
del out_axioms00[x]
for y in out_axioms00:
ax1= out_axioms00[y]
out_axioms00[y]=ax1[:2]+[expr_sub_dict(ax1[2],{expr_op(ax[1]):ax[2]})]
else:
e1=wff_i1(0,expr_op(loop_var),extend(ax[1],expres('+',[loop_var,['1']]),frame_axioms,v),ax[2])
e2=wff_i0(0,extend(ax[1],expres('0'),frame_axioms,v),expres(x,expr_args(ax[1])))
res=solve_rec(e1,e2)
if res != None: #res = ['i2',k,n,e1,e2]
found1=True
variable=res[2] # Variable add by Pritom Rajkhowa
axioms.append(wff_e(res[3],res[4]))
del out_axioms00[x]
for y in out_axioms00:
ax1= out_axioms00[y]
out_axioms00[y]=ax1[:2]+[expr_sub_dict(ax1[2],{expr_op(res[3]):res[4]})]
if not found1:
found_solution=False
for x in out_axioms00:
ax = out_axioms00[x] #ax = ['e',e1,e2]
e1=extend(ax[1],expres('+',[loop_var,['1']]),frame_axioms,v)
e2=ax[2]
axioms.append(wff_i1(len(expr_args(e1))-1,expr_op(loop_var),e1,e2))
#base case
for x in out_axioms00:
arity = len(v[x])-2
args = list(expres('_x'+str(i+1)) for i in range(arity))
axioms.append(wff_i0(arity,expres(x+init,args+[expres('0')]), expres(x,args)))
c=p[2] #loop condition
c = update__VERIFIER_nondet_stmt(c,map___VERIFIER_nondet)
c=expr_sub_set(c,'',init,v,frame_axioms)
c = extend(c,loop_var,frame_axioms,v) #add the smallest macro
#Add by pritom
cc = copy.deepcopy(c)
axioms.append(wff_s0(expr_sub(expr_complement(cc),expr_op(loop_var),expr_op(smallest))))
#axioms.append(wff_s0(expres('not',[expr_sub(c,expr_op(loop_var),expr_op(smallest))])))
axioms.append(wff_s1(expres('implies',
[expres('<', [loop_var, smallest]),c])))
out_axioms = {}
for x in v: # generate out_axioms
if not x in frame_axioms:
args = list(expres('_x'+str(i+1)) for i in range(len(v[x])-2))
e1=expres(x+out,args)
args.append(smallest)
e2=expres(x+init,args)
out_axioms[x]=wff_e(e1,e2)
#substitution of closed form solution by pritom rajkhowa
constant='_N'+str(LC)
variable='_n'+str(LC)
update_axioms=[]
equations=[]
for ax in axioms:
if ax[0]=='e':
equations.append(ax)
else:
update_axioms.append(ax)
for equation in equations:
equation1=copy.deepcopy(equation)
update_axioms=solnsubstitution(update_axioms,equation[1],equation[2])
equation1[1]=expr_replace_const(equation1[1],variable,constant)
equation1[2]=expr_replace_const(equation1[2],variable,constant)
update_axioms=solnsubstitution(update_axioms,equation1[1],equation1[2])
for x in out_axioms:
stmt=out_axioms[x]
stmt[2]=expr_replace(stmt[2],equation1[1],equation1[2])
axioms=update_axioms
updated_axioms=[]
for ax in axioms:
if ax[0]=='s0':
expression=expr2string1(ax[1])
if '->' not in expression and constant in expression:
if '**' not in expression:
#expression=normal_form_constant(expression, constant)
if '**' not in str(expression):
parser = c_parser.CParser()
ast = parser.parse("void test(){"+str(expression)+";}")
statement_temp=ast.ext[0].body.block_items[0]
axupdate = construct_expression_normalC(eval(expressionCreator_C(statement_temp)))
#axupdate=construct_expression_normal(tree)
if axupdate is not None:
updated_axioms.append(axupdate)
else:
updated_axioms.append(ax)
else:
updated_axioms.append(ax)
else:
updated_axioms.append(ax)
elif '<=' in expression and 'and' not in expression and 'or' not in expression:
if '**' not in expression:
#expression=normal_form_constant(expression, constant)
#expression = normal_form_constant(expression, constant)
#pp = getParser()
if '**' not in str(expression):
if '**' not in str(expression):
parser = c_parser.CParser()
ast = parser.parse("void test(){"+str(expression)+";}")
statement_temp=ast.ext[0].body.block_items[0]
#tree = pp.parse_expression(str(expression))
#axupdate=construct_expression_normal(tree)
axupdate = construct_expression_normalC(eval(expressionCreator_C(statement_temp)))
if axupdate is not None:
updated_axioms.append(axupdate)
else:
updated_axioms.append(ax)
else:
updated_axioms.append(ax)
else:
updated_axioms.append(ax)
else:
updated_axioms.append(ax)
else:
updated_axioms.append(ax)
#------------------------------------------
axioms=[]
for ax in updated_axioms:
axioms.append(ax)
#substitution of closed form solution by pritom rajkhowa
if flag==2:
g = graph(axioms,v) #construct dependency graph
for x in expr_func(p[2],v):
if not ['_N'+str(LC), x] in g:
g.append(['_N'+str(LC), x])
g.append(['_N'+str(LC), x+init])
for x in out_axioms00:
if not [x+init,x] in g:
g.append([x+init,x])
if not [x,x+init] in g:
g.append([x,x+init])
for y in expr_func(out_axioms00[x][2],v):
if not [x,y] in g:
g.append([x,y])
#build a dictionary para = { 'X':[1,['_y1']], 'X11':[0,['_y1','_y2'],['X','Y'],...}
#meaning 'X' is an input variable parameterized as '_y1' and
#'X11' is a function taking two new parameters '_y1' and '_y2' which correspond
# to 'X' and 'Y', respectively
para={}
for [x,x1] in g: #compute the dependency sets
if x in v and not x in frame_axioms:
para[x] = [1,[v[x][0]]]
else:
if not x in para and not x in frame_axioms:
t=[]
t1=[]
for y in reach_set([x],g):
if y in v and (not expres(y) in t1) and (not y in frame_axioms):
t.append(expres(v[y][0]))
t1.append(expres(y))
if t != []:
para[x] = [0,t,t1]
#parameterize input variables that N depends on and all associated functions
for i,ax in enumerate(axioms):
axioms[i] = parameterize_wff(ax,para)
#construct inductive definition for N
s_args = para['_N'+str(LC)][1]
smallest1=expres('_N'+str(LC), s_args)
next_args=[]
for i,y in enumerate(s_args):
x=expr_op(para['_N'+str(LC)][2][i])
next_args.append(parameterize_expres(out_axioms0[x][2],para))
axioms.append(['d0',smallest1, parameterize_expres(expres('not',[p[2]]),para)])
axioms.append(['d1','_n'+str(LC), smallest1,
expres('=',[loop_var,expres('_N'+str(LC),next_args)])])
#parameterize output axioms
for x in out_axioms:
out_axioms[x]=out_axioms[x][:2]+[parameterize_expr_sub(out_axioms[x][2],para)]
new_axioms = [] #for creating new inductive definitions
for ax in axioms:
if ax[0]=='i1':
x=expr_op(ax[3])
if x.endswith(init) and x[:len(x)-len(init)] in v:
next_args=[]
for k,arg in enumerate(expr_args(ax[3])):
if k==ax[1]:
next_args.append(expres(ax[2]))
else:
a=expr_op(arg)
if a.startswith('_y'):
for b in v:
if v[b][0]==a:
next_args.append(parameterize_expres(out_axioms0[b][2],para))
else:
next_args.append(arg)
new_axioms.append(ax[0:4]+[expres(x,next_args)])
axioms=axioms+new_axioms
return frame_axioms, out_axioms, axioms, p[0]
#construct a graph of dependency relation in a set of equations axioms
def graph(axioms,v):
ret = []
for ax in axioms:
if ax[0]=='e' or ax[0]=='i0' or ax[0]=='i1' or ax[0]=='d0' or ax[0]=='d1':
op=expr_op(ax[-2])
for x in expr_func(ax[-1],v):
if not [op,x] in ret:
ret.append([op,x])
elif ax[0]=='s1':
op=expr_op(expr_args(expr_args(ax[1])[0])[1])
for x in expr_func(expr_args(ax[1])[1],v):
if not [op,x] in ret:
ret.append([op,x])
return ret
#given a list s of nodes, return the list of nodes that are reachable from the nodes in s
def reach_set(s,g):
s1=[]
for [n1,n2] in g:
if (n1 in s) and not (n2 in s):
s1.append(n2)
if s1==[]:
return s
else:
return reach_set(s+s1,g)
# testing examples.
x=expres('x')
y=expres('y')
ex1 = ['-1','=',x, expres('+',[y,['1']])] #x=y+1
ex2 = ['-1','=',y, ['+',y,['1']]] #y=y+1
ex21 = ['1','=',y, ['+',y,['1']]] #1: y=y+1
ex22 = ['-1','if1',['>', y,['1']],ex2] # if y>1 then y=y+1
ex23 = ['-1','if1',['>', y,['1']],ex21] # if y>1 then l: y=y+1
ex24 = ['-1','if2',['>', y,['1']],ex21,ex1] # if y>1 then l: y=y+1 else x=y+1
ex3 = ['-1','seq',ex1,ex2] #x=y+1; y=y+1
v1 = {'x':['_y1','int'], 'y':['_y2','int']}
ex4 = ['-1', '=', ['t',x], ['+', ['+', ['z', x, ['t', x]], ['1']], x]]
ex42 = ['-1', '=', ['z',x,y], ['+', ['+', ['z', x, ['t', x]], ['1']], x]]
v2 = {'x':['_y1','int'], 'y':['_y2','int'], 't':['_y3','int','int'], 'z':['_y4','int','int','int']}
ex41 = ['-1','if1',['>', y,['1']],ex4] # if y>1 then ex4
ex25 = ['-1','if2',['>', y,['1']],ex1,ex4]
ex5 = ['-1','if2',expres('and', [expres('=', [expres('x'),expres('t',[expres('1')])]), expres('<', [expres('y'), expres('z',[expres('x'),expres('y')])])]), ex1, ex4]
ex6 = ['-1','while',expres('<',[expres('x'),expres('y')]),ex4]
#translate1(ex3,v1,1)
#translate1(ex4,v2,1)
#translate1(ex5,v2,1)
# factorial function
"""
i=1;
F=1;
while(i <= X) {
F=F*i;
i=i+1;
}
"""
i=expres('i')
F=expres('F')
X=expres('X')
fact0 = ['-1','seq',['-1','=',i,['1']],['-1','=',F,['1']]]
fact1 = ['-1','seq',['-1','=',F,['*',F,i]],['-1','=',i,['+',i,['1']]]]
fact2 = ['-1','while', ['<=',i,X], fact1]
fact = ['-1','seq',fact0,fact2]
vfact = {'i':['_y1','int'], 'X':['_y2','int'], 'F':['_y3','int'],RET:['_y0','int']}
#translate1(fact,vfact)
#factorial as a function: return F
fact3 = ['-1','=',expres(RET),F]
funfact = ['-1','fun',['factorial', 'X'],['-1','seq',fact,fact3]]
#a main() that uses factorial
# main1() { X=factorial(2) }
main1 = ['-1','fun',['main1'],['-1','=',X,expres('factorial',[expres('2')])]]
# variable list for main1()
man1v = {'X':['_y1','int']}
# variable lists for main1p, one for each function
main1pv = {'main1':man1v,'factorial':vfact}
main1p = ['-1','prog',[funfact,main1]]
# translate1(main1p, main1pv,1)
# in-place list reversing - see Lin and Yang 2015
"""
J = null;
while I != null do {
K = next(I);
next(I) = J;
J=I;
I=K;
}
I=J;
"""
lr6 = ['-1','=',['I'],['K']]
lr5 = ['-1','seq',['-1','=',['J'],['I']], lr6]
lr4 = ['-1','seq',['-1','=', ['next', ['I']],['J']], lr5]
lr3 = ['-1','seq',['-1','=',['K'],['next',['I']]], lr4]
lr2 = ['-1','while',['!=', ['I'], ['null']], lr3]
lr1 = ['-1','seq',lr2,['-1','=',['I'],['J']]]
lr = ['-1','seq',['-1','=',['J'],['null']], lr1]
vlr = {'J':['_y1','list'],'I':['_y2','list'],'K':['_y3','list'],'next':['_y4','list','list']}
#Cohen's division algorithm
"""
//XandYaretwoinputintegers;Y>0
Q=0; // quotient
R=X; // remainder
while (R >= Y) do {
A=1; // A and B are some that at any time for
B=Y; // some n, A=2^n and B=2^n*Y
while (R >= 2*B) do {
A = 2*A;
B = 2*B; }
R = R-B;
Q = Q+A }
//
return Q = X/Y;
"""
A=expres('A')
B=expres('B')
R=expres('R')
Q=expres('Q')
Y=expres('Y')
A2=expres('*',[expres('2'),A]) #2*A
B2=expres('*',[expres('2'),B]) #2*B
RB=expres('-',[R,B]) #R-B
QA=expres('+',[Q,A]) #Q+A
c1=expres('>=',[R,B2]) #R>=2*B
c2=expres('>=',[R,Y]) #R >= Y
cohen9=['-1','seq',['-1','=',A,A2],['-1','=',B,B2]]
cohen8=['-1','seq',['-1','=',R,RB],['-1','=',Q,QA]]
cohen7=['-1','while',c1, cohen9]
cohen6=['-1','seq',cohen7,cohen8]
cohen1=['-1','=',Q,['0']]
cohen5=['-1','seq',['-1','=',B,Y],cohen6]
cohen4=['-1','seq',['-1','=',A,['1']],cohen5]
cohen3=['-1','while',c2, cohen4]
cohen2=['-1','seq',['-1','=',R,X],cohen3]
cohen = ['-1', 'seq', cohen1,cohen2]
vcohen={'X':['_y1','int'],'Y':['_y2','int'],'Q':['_y3','int'],'R':['_y4','int'],'A':['_y5','int'],'B':['_y6','int']}
#product of two integers
"""
Z = 0;
while( Y!=0 ) {
if ( Y % 2 ==1 ) {
Z = Z+X;
Y =(Y-1);
}
X = 2*X;
Y = Y/2;
}
"""
Z=expres('Z')
prod1=['-1','seq',['-1','=',Z,expres('+',[Z,X])],['-1','=',Y,expres('-',[Y,['1']])]]
prod2=['-1','seq',['-1','=',X,expres('*',[['2'],X])],['-1','=',Y,expres('/',[Y,['2']])]]
prod3=['-1', 'if1', expres('=',[expres('%',[Y,['2']]), ['1']]), prod1]
prod4=['-1','seq',prod3,prod2]
prod5=['-1','while',expres('!=',[Y,['0']]),prod4]
prod = ['-1','seq',['-1','=',Z,['0']],prod5]
vprod = {'X':['_y1','int'],'Y':['_y2','int'],'Z':['_y3','int']}
#array sum array represented as a reference, and element represented by at predicate
"""
i=0;
sum=0;
while (i<size(A)) {
sum=at(A,i)+sum
i=i+1
}
"""
sum3=['-1','while',expres('<',[i,expres('size',[A])]),['-1','seq',['-1','=',['sum'],expres('+',[expres('at',[A,i]),['sum']])],['-1','=',i,expres('+',[i,['1']])]]]
sum2=['-1','seq',['-1','=',['sum'],['0']],sum3]
sum1=['-1','seq',['-1','=',i,['0']],sum2]
vsum = {'i':['_y1','int'],'sum':['_y2','int'],'size':['_y3','array','int'],'A':['_y4','array'],'at':['_y5','array','int','int']}
#Dijkstra's LCM algorithm
"""
X=A; Y=B; U=B; V=A;
while (X!=Y) {
if (X>Y) {X=X-Y; V=V+U;}
else {Y=Y-X; U=U+V;}
}
"""
A=expres('A')
B=expres('B')
X=expres('X')
V=expres('V')
U=expres('U')
XY=expres('-',[X,Y])
YX=expres('-',[Y,X])
UV=expres('+',[U,V])
lcm1=['-1','seq',['-1','=',X,A],['-1','=',Y,B]]
lcm2=['-1','seq',lcm1,['-1','=',U,B]]
lcm3=['-1','seq',lcm2,['-1','=',V,A]]
lcm4=['-1','seq',['-1','=',X,XY],['-1','=',V,UV]]
lcm5=['-1','seq',['-1','=',Y,YX],['-1','=',U,UV]]
c1=expres('>',[X,Y])
lcm6=['-1', 'if2', c1, lcm4,lcm5]
c2=expres('!=',[X,Y])
lcm7=['-1','while',c2,lcm6]
lcm = ['-1','seq',lcm3,lcm7]
vlcm={'A':['_y1','int'],'B':['_y2','int'],'X':['_y3','int'],'Y':['_y4','int'],'U':['_y5','int'],'V':['_y6','int']}
"""
matrix multiplication from verifythis-2016 competition
int[][] matrixMultiply(int[][] A, int[][] B) {
int n = A.length;
// initialise C
int[][] C = new int[n][n];
for (int i = 0; i < n; i++) {
for (int k = 0; k < n; k++) {
for (int j = 0; j < n; j++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
return C;
}
"""
def less(x,y):
return expres('<',[x,y])
def passign(l,le,ri):
return [l,'=',le,ri]
def initialize_array2(x,i,j,m,n):
a=passign('-1',expres('d2array',[x,i,j]),expres('0')) #d2array(x,i,j)=0
a1=passign('-1',i,expres('+',[i,expres('1')])) #i++
a2=passign('-1',j,expres('+',[j,expres('1')])) #j++
while1 = ['-1','while', less(j,n), ['-1','seq',a,a2]]
body1 = ['-1','seq',while1,a1]
body2 = ['-1','seq',passign('-1',j,expres('0')),body1]
while2 = ['-1','while', less(i,m), body2]
return ['-1','seq',passign('-1',i,expres('0')),while2]
mM1 = ['-1','seq',passign('-1',expres('n'),expres('length',[expres('a')])),
initialize_array2(expres('c'),expres('i'),expres('j'),expres('n'),expres('n'))]
mM = ['-1','seq',mM1,passign('-1',expres(RET),expres('c'))]
# for now matrixMuliply only initializes C
matrixMultipy = ['-1','fun', ['matrixMultiply','a','b'],mM]
mMv = {'a':['_y1','array'],'b':['_y2','array'],'c':['_y3','array'],RET:['_y0','array'],'i':['_y4','int'],'j':['_y5','int'],'k':['_y6','int'],'n':['_y7','int'],'d2array':['_y7','array','int','int','int']}
# translate1(matrixMultipy,mMv,1)
"""
A program variable has the attributes: its name, its type,
and its corresponding logical variable when parameterized.
A set of program variables is represented by a dictionary
with variable names as keys.
examples: { 'X':['_y1','init','int','char'], 'I':['_y2','int'] }
This set contains two program variables:
constant I of int value and function X from int*int to char
Notice that the arity of a variable x in a set d is len(d[x])-2
We assume no local function definitions, so the p in 'if b then p else p'
'while b do p', 'foo(x,...,y) {b}' is a normal body of program statements.
Program representations:
1. an assignment (l is the label, l='-1' means no label)
l: left = right
by [l,'=',e1,e2],
where e1,e2 are expressions representing left and right, respectively
2. a sequence
p1; p2
by ['-1','seq',p1,p2]
where p1 and p2 are programs
3. if-then:
l: if C then P
by [l,'if1', c,p], where c is the expression for C and p a program for P
4. if-then-else
l: if c then p1 else p2
by [l,'if2', c,p1,p2],
where c is Expr, p1 and p2 are Prog
5. while loop
l: while c {b} by
[l,'while', c,b],
where c is Expr, b is Prog
6. function definition
foo(x,...,y) { B }
['-1','fun',['foo','x',..,'y'], b]
where 'foo' is the name of the function, 'x',...,'y' parameters, and
b the Prog representing B. We assume that B has no local function, i.e.
a normal body of statements.
We assume a unique string 'RET' representing return
value because we do not have a special return statement.
Instead, a return statement
l: return E
is represented as a normal assignment
l: RET = e
We expect this will be the last statement of the body b
7. sequence of functions
foo1(...) {B1}, ..., fook(...) {Bk}
['-1', 'prog', [f1,...,fk]]
where fi is the program representation of fooi(...) {Bi}. For this, the list
of variables v needs to be a dictionary indexed by the function names
'foo1',...,'fook' whose value v['foo'] is the list of variables used in the function
"""
# for testing flag=1 (original translation), flag=2 (inductive definition for smallest N)
def translate1(p,v,flag):
global TC
global LC
TC=0
LC=0
if p[1]=='prog':
f_map={}
a_map={}
o_map={}
cm_map={}
assert_list_map={}
assume_list_map={}
assert_key_map={}
res = translate0(p,v,flag)
for fn in res:
x,f,o,a,l = res[fn]
#print f
#print o
#print('Output for '+fn+':')
simply_output_axioms_fn(f,o,a)
f,o,a,cm = rec_solver(f,o,a)
#f,o,a,cm = rec_solver_moment(f,o,a)
#cm=[]
#print f
#print o
#print a
organizeFreeVariable(f,o,a,v)
#f,o,a,cm = getDummyFunction(f,o,a,cm)
#f,o,a,cm = update__VERIFIER_nondet(f,o,a,cm)
f,o,a,assert_list,assume_list,assert_key=getAssertAssume(f,o,a,cm)
#f,o,a,assert_list,assume_list,assert_key=getAssertAssume(f,o,a,cm)
#assert_list=[]
#assume_list=[]
#assert_key=[]
assert_key_map={}
f_map[fn]=f
o_map[fn]=o
a_map[fn]=a
cm_map[fn]=cm
assert_list_map[fn]=assert_list
assume_list_map[fn]=assume_list
assert_key_map[fn]=assert_key
f,o,a=organizeOutput(f,o,a,v)
f_map[fn]=f
o_map[fn]=o
a_map[fn]=a
cm_map[fn]=cm
output_val=''
#output_axioms_fn(f,o,a)
output_val+=output_axioms_fn_print(f,o,a)
output_val+='\n4. Assumption :\n'
for x in assume_list:
if x[0]=='i1':
output_val+='\nForAll '+x[2]+' ( '+ expr2string1(x[-1])+' ) \n'
else:
if x[0]=='e':
output_val+='\n'+expr2string1(x[-1])+'\n'
elif x[0]!='i0':
output_val+='\n'+wff2string1(x)+'\n'
output_val+='\n5. Assertion :\n'
for x in assert_list:
if x[0]=='i1':
output_val+='\nForAll '+x[2]+' ( '+ expr2string1(x[-1])+' ) \n'
else:
if x[0]=='e':
output_val+='\n'+expr2string1(x[-1])+'\n'
elif x[0]!='i0':
output_val+='\n'+wff2string1(x)+'\n'
return f_map,o_map,a_map,cm_map,assert_list_map,assume_list_map,assert_key_map,output_val
elif p[1]=='fun':
fn,f,o,a,l = translate0(p,v,flag)
print('Output for ')
f,o,a,cm = rec_solver(f,o,a)
#f,o,a,cm = getDummyFunction(f,o,a,cm)
f,o,a,assert_list,assume_list,assert_key=getAssertAssume(f,o,a,cm)
f,o,a=organizeOutput(f,o,a,v)
output_val=''
output_val+=output_axioms_fn_print(f,o,a)
output_val+='\n4. Assumption :\n'
for x in assume_list:
if x[0]=='i1':
output_val+='ForAll '+x[2]+' ( '+ expr2string1(x[4])+' ) \n'
else:
if x[0]!='i0':
output_val+=wff2string1(x)+"\n"
output_val+='\n5. Assertion :\n'
for x in assert_list:
if x[0]=='i1':
output_val+='ForAll '+x[2]+' ( '+ expr2string1(x[4]) +' ) \n'
else:
if x[0]!='i0':
output_val+=wff2string1(x)+"\n"
return f,o,a,cm,assert_list,assume_list,assert_key,output_val
else:
f,o,a,l = translate0(p,v,flag)
#Add by Pritom Rajkhowa 10 June 2016
f,o,a,cm = rec_solver(f,o,a)
#f,o,a,cm = getDummyFunction(f,o,a,cm)
f,o,a,assert_list,assume_list,assert_key=getAssertAssume(f,o,a,cm)
f,o,a=organizeOutput(f,o,a,v)
output_axioms_fn(f,o,a)
print('\n4. Assumption :')
for x in assume_list:
if x[0]=='i1':
print('ForAll '+x[2]+' ( '+ expr2string1(x[4])+' ) ')
else:
if x[0]!='i0':
print(wff2string1(x))
print('\n5. Assertion :')
for x in assert_list:
if x[0]=='i1':
print('ForAll '+x[2]+' ( '+ expr2string1(x[4])+' ) ')
else:
if x[0]!='i0':
print(wff2string1(x))
return f,o,a,cm,assert_list,assume_list,assert_key
def simplification_expr(e):
args=expr_args(e)
op=expr_op(e)
list_expr=[]
list_expr.append(op)
if len(args)==0:
return e
else:
for arg in args:
arg = simplification_axioms(arg)
arg = simplification_expr(arg)
list_expr.append(arg)
if list_expr[0]=='ite':
list_expr = simplification_axioms(list_expr)
return list_expr
def getAssertAssume(f,o,a,cm):
new_o={}
new_a=[]
new_f={}
assert_list = []
assume_list = []
assert_key = []
for x in f:
if x.find('_assertion')<0 and x.find('_assumption')<0:
new_f[x]=f[x]
for x in o:
if x.find('_assertion')>0 and o[x][-1][0].find('_assertion')<0:
assert_list.append(o[x])
elif x.find('_assumption')>0 and o[x][-1][0].find('_assumption')<0:
assume_list.append(o[x])
else:
new_o[x]=o[x]
for x in a:
if x[0]=='i1':
if x[3][0].find('_assertion')>0:
assert_list.append(x)
elif x[3][0].find('_assumption')>0:
assert_list.append(x)
else:
new_a.append(x)
elif x[0]=='i0':
if x[-2][0].find('_assertion')<0 and x[-2][0].find('_assumption')<0:
new_a.append(x)
else:
new_a.append(x)
return new_f, new_o, new_a, assert_list, assume_list, assert_key
def simplification_axioms(e):
flag = None
Cond =None
Case1 = None
Case2 = None
if e[0]=='ite':
arg_list=expr_args(e)
if arg_list[0][0]=='ite':
Cond = simplification_axioms(arg_list[0])
else:
if arg_list[0][0]=='=':
leftSide = expr2string1(arg_list[0][1])
rightSide = expr2string1(arg_list[0][2])
result=simplify(leftSide+'-'+rightSide)
if is_number(result):
if int(result)==0:
flag = True
else:
flag = False
else:
Cond = arg_list[0]
else:
Cond = arg_list[0]
if arg_list[1][0]=='ite':
Case1 = simplification_axioms(arg_list[1])
else:
Case1 = arg_list[1]
if arg_list[2][0]=='ite':
Case2 = simplification_axioms(arg_list[2])
else:
Case2 = arg_list[2]
if flag==True:
return Case1
elif flag==False:
return Case2
else:
return ['ite']+[Cond]+[Case1]+[Case2]
else:
return e
def simply_output_axioms_fn(f,o,a):
for x in o:
o[x][-1] = simplification_axioms(o[x][-1])
#for x in a:
# x[-1] = simplification_axioms(x[-1])
def checkCondition(e):
if e[0]=='=':
arg1=e[1]
arg2=e[2]
if arg1==arg2:
return True
else:
return None
elif e[0]=='!=':
arg1=e[1]
arg2=e[2]
if arg1==arg2:
return False
else:
return None
return None
def simplifyAxioms(e):
if e[0]=='ite':
arg_list=expr_args(e)
if checkCondition(arg_list[0]) == True:
return e,arg_list[1]
elif checkCondition(arg_list[0]) == False:
return e,arg_list[2]
else:
return None, None
return None, None
def output_axioms_fn(f,o,a):
#print('Output in prefix notation:')
#print('1. Frame axioms:')
#eqset2string(f)
#print('\n2. Output equations:')
#eqset2string(o)
#print('\n3. Other axioms:')
#for x in a:
# print wff2string(x)
print('\nOutput in normal notation:')
print('1. Frame axioms:')
eqset2string1(f)
print('\n2. Output equations:')
eqset2string1(o)
print('\n3. Other axioms:')
for x in a:
print(wff2string1(x))
def output_axioms_fn_print(f,o,a):
Output_val=''
Output_val+='\nOutput in normal notation:\n'
Output_val+='\n1. Frame axioms:\n'
Output_val+=eqset2string1Print(f)+"\n"
Output_val+='\n2. Output equations:\n'
Output_val+=eqset2string1Print(o)+"\n"
Output_val+='\n3. Other axioms:\n'
for x in a:
Output_val+=wff2string1(x)+"\n"
return Output_val
def organizeOutput(f,o,a,vfacts):
array_list=[]
new_f={}
duplicate_map={}
new_f={}
new_o={}
new_a=[]
for vfact in vfacts.keys():
info_list=vfacts[vfact]
if type(info_list) is dict:
for info in info_list:
element_list=info_list[info]
if type(element_list) is list and len(element_list)>0:
if element_list[1]=='array' and '_PROVE' not in info and '_ASSUME' not in info and len(element_list)==2:
array_list.append(info)
else:
if info_list[1]=='array' and '_PROVE' not in vfact and '_ASSUME' not in vfact and len(element_list)==2:
array_list.append(vfact)
for e in f:
if isArrayFunction(e)==True:
if len(array_list)>0:
new_f[e]=f[e]
else:
new_f[e]=f[e]
for e in o:
if isArrayFunction(e)==True:
if len(array_list)>0:
new_o[e]=o[e]
else:
new_o[e]=o[e]
for e in a:
if e[0]=='i1':
if isArrayFunction(e[3][0])==True:
if len(array_list)>0:
new_a.append(e)
else:
new_a.append(e)
elif e[0]=='i0':
if isArrayFunction(e[2][0])==True:
if len(array_list)>0:
new_a.append(e)
else:
new_a.append(e)
else:
new_a.append(e)
return new_f,new_o,new_a
def update__VERIFIER_nondet_stmt(e,var_map):
args=expr_args(e)
if '__VERIFIER_nondet' in e[0] and len(args)==0:
if e[0] in var_map.keys():
VC=var_map[e[0]]
VC=VC+1
key=e[0]
var_map[key]=VC
e[0] = e[0]+str(VC)
return e
else:
key=e[0]
var_map[key]=2
e[0] = e[0]+str(2)
return e
else:
return e[:1]+list(update__VERIFIER_nondet_stmt(x,var_map) for x in expr_args(e))
"""
Recurrences Solving Module
#Add by Pritom Rajkhowa
#June 8
Test cases
Test Case 1
#e1=['i1', 2, '_n1', ['a3', ['+', ['_n1'], ['1']]], ['+', ['a3', ['_n1']], ['1']]]
#e2=['i0', 0, ['a3', ['0']], ['0']]
Test Case 2
#e1=['i1', 2, '_n1', ['a3', ['+', ['_n1'], ['1']]], ['*', ['a3', ['_n1']], ['+', ['_n1'], ['1']]]]
#e2=['i0', 0, ['a3', ['0']], ['1']]
Test Case 3
#e1=['i1', 2, '_n1', ['t3', ['+', ['_n1'], ['1']]], ['+', ['t3', ['_n1']], ['2']]]
#e2=['i0', 0, ['a3', ['0']], ['1']]
Test Case 4
#e1=['i1', 2, '_n1', ['a3', ['+', ['_n1'], ['1']]], ['*', ['a3', ['_n1']], ['2']]]
#e2=['i0', 0, ['a3', ['0']], ['1']]
"""
def solve_rec(e1,e2):
global fun_call_map
lefthandstmt=None
righthandstmt=None
righthandstmt_d=None
lefthandstmt_base=None
righthandstmt_base=None
righthandstmt_base_d=None
variable=None
closed_form_soln=None
if e1[0]=='i1':
lefthandstmt=expr2string1(e1[3])
righthandstmt=expr2string1(e1[4])
lefthandstmt=lefthandstmt.strip()
righthandstmt=righthandstmt.strip()
variable=e1[2]
if lefthandstmt.find('_PROVE')>0:
return None
elif lefthandstmt.find('_ASSUME')>0:
return None
if 'ite' not in righthandstmt and '>' not in righthandstmt and '<' not in righthandstmt and '==' not in righthandstmt and '!=' not in righthandstmt and '|' not in righthandstmt and '&' not in righthandstmt:
lefthandstmt=simplify(lefthandstmt)
righthandstmt=simplify(righthandstmt)
variable=simplify(variable)
else:
if '|' not in righthandstmt and '&' not in righthandstmt and '<<' not in righthandstmt and '>>' not in righthandstmt:
righthandstmt=expr2stringSimplify(e1[4])
righthandstmt=righthandstmt.strip()
if 'ite' not in righthandstmt and '>' not in righthandstmt and '<' not in righthandstmt and '==' not in righthandstmt and '<' not in righthandstmt and '==' not in righthandstmt and '!=' not in righthandstmt and '|' not in righthandstmt and '&' not in righthandstmt:
lefthandstmt=simplify(lefthandstmt)
righthandstmt=simplify(righthandstmt)
variable=simplify(variable)
else:
lefthandstmt=None
righthandstmt=None
variable=None
if e2[0]=='i0':
lefthandstmt_base=expr2string1(e2[2])
righthandstmt_base=expr2string1(e2[3])
variable_list=[]
expr2varlist(e2[3],variable_list)
lefthandstmt_base=lefthandstmt_base.strip()
righthandstmt_base=righthandstmt_base.strip()
if 'ite' in righthandstmt_base or '|' in righthandstmt_base or '&' in righthandstmt_base or '<<' in righthandstmt_base or '>>' in righthandstmt_base:
return None
lefthandstmt_base=simplify(lefthandstmt_base)
righthandstmt_base=simplify(righthandstmt_base)
if variable is not None and lefthandstmt is not None and righthandstmt is not None and lefthandstmt_base is not None and righthandstmt_base is not None:
righthandstmt_d=righthandstmt
righthandstmt_base_d=righthandstmt_base
term1=lefthandstmt.subs(simplify(str(variable)+"+1"),0)
term2=lefthandstmt.subs(simplify(str(variable)+"+1"),simplify(variable))
if term1==lefthandstmt_base and str(term2) in str(righthandstmt):
righthandstmt=simplify(righthandstmt).subs({simplify(term2):simplify('T(n)'),simplify(variable):simplify('n')})
result=None
#Try to solve recurrences
try:
result = getSympyCache(righthandstmt,righthandstmt_base)
#result = None
if result is None:
#result=recurreSolver_wolframalpha(righthandstmt,righthandstmt_base,variable_list)
result=recurreSolver_sympy(righthandstmt,righthandstmt_base)
#result = None
#if result is None:
#result=recurreSolver_sympy(righthandstmt,righthandstmt_base)
#result=recurreSolver_wolframalpha(righthandstmt,righthandstmt_base,variable_list)
except ValueError:
result=None
if result is not None:
result=substituteValue(simplify_sympy(result),simplify('n'),simplify(variable))
if "**" in str(result):
result=translatepowerToFun(str(result))
expression=str(str(term2)+"="+str(result))
fun_call_map={}
parser = c_parser.CParser()
ast = parser.parse("void test(){"+expression+";}")
statement_temp=ast.ext[0].body.block_items[0]
closed_form_soln = construct_expressionC(e1[1],e1[2],expr_replace_power(eval(expressionCreator_C(statement_temp.lvalue))),expr_replace_power(eval(expressionCreator_C(statement_temp.rvalue))))
#tree = p.parse_expression(expression)
#closed_form_soln=construct_expression(tree,e1[1],e1[2])
#return None
return closed_form_soln
# expr_replace(e,e1,e2): replace all subterm e1 in e by e2
def expr_replace_power(e): #e,e1,e2: expr
args=expr_args(e)
op=expr_op(e)
if len(args)>0:
if op=='power' or 'power_' in op :
return eval("['**']")+list(expr_replace_power(x) for x in expr_args(e))
else:
return e[:1]+list(expr_replace_power(x) for x in expr_args(e))
else:
return e
#get variable
def expr2varlist(e,variable_list):
args=expr_args(e)
op=expr_op(e)
if len(args)==0:
if '_n' not in op and is_number(op)==False:
variable_list.append(op)
else:
if op=='and' or op=='or':
if len(args)==1:
expr2varlist(args[0],variable_list)
else:
for x in args:
expr2varlist(x,variable_list)
elif op=='not' and len(args)==1:
expr2varlist(args[0],variable_list)
elif op=='implies' and len(args)==2:
expr2varlist(args[0],variable_list)
expr2varlist(args[1],variable_list)
elif op in _infix_op and len(args)==2:
expr2varlist(args[0],variable_list)
expr2varlist(args[1],variable_list)
else:
for x in args:
expr2varlist(x,variable_list)
def getSympyCache(expression,base_expression):
#print '~~~~~~~~~~~~~~'
#print expression
#print base_expression
#print '~~~~~~~~~~~~~~'
#cache_map={'(n + 1)**3 + T(n)':['0','(n**2*(n + 1)**2)/2'],'(i + n + 1)**3 + T(n)':['N_1','N_1 + (n*(n + (1 + 2*i) )*(- (2 - 2*i) + n*(n + (1 + 2*i) )))/4']}
cache_map={'(n + 1)**3 + T(n)':['0','(n**2*(n + 1)**2)/4'],'T(n) - 1':['N_1','N_1 - n'],'4000 + T(n)':['0','4000*n'],'2000 + T(n)':['0','2000*n']}
for element in cache_map.keys():
if simplify(element)==simplify(expression):
try:
return simplify(cache_map[element][1]).subs(simplify(cache_map[element][0]),simplify(base_expression))
except ValueError:
return None
return None
"""
#Solving Recurrences using sympy
"""
def recurreSolver_sympy(righthandstmt,righthandstmt_base):
expression="T(n+1)-("+str(righthandstmt)+")"
#print expression
f=simplify(expression)
#Register n as Symbol
n=Symbol('n')
#Register T as Function
T=Function('T')
result=None
#Converting String to Sympy Expression
terminationList={sympify("T(0)"):righthandstmt_base}
#Try to solve recurrences
try:
result=rsolve(f, T(n), terminationList)
flag=False
flag=isConstInResult( str(result) )
if flag==False and result is not None and 'RisingFactorial' not in str(result) and 'binomial' not in str(result) and 'gamma' not in str(result) and 'rgamma' not in str(result) and 'gammaprod' not in str(result) and 'loggamma' not in str(result) and 'beta' not in str(result) and 'superfac' not in str(result) and 'barnesg' not in str(result):
result=simplify(result)
else:
result=None
#writeLogFile( "j2llogs.logs" , "\nFailed to find close form solution\n" )
except ValueError:
result=None
#writeLogFile( "j2llogs.logs" , "\nFailed to find close form solution\n" )
return result
#Parsing Method Starts
# define some basic operand expressions
number = Regex(r'\d+(\.\d*)?([Ee][+-]?\d+)?')
ident = Word(alphas+'_', alphanums+'_')
#fn_call = ident + '(' + Optional(delimited_list(expr)) + ')'
# forward declare our overall expression, since a slice could
# contain an arithmetic expression
expr = Forward()
#slice_ref = '[' + expr + ']'
slice_ref = '[' + expr + ZeroOrMore("," + expr) + ']'
# define our arithmetic operand
operand = number | Combine(ident + Optional(slice_ref))
#operand = number | fn_call | Combine(ident + Optional(slice_ref))
inequalities = oneOf("< > >= <= = == !=")
# parse actions to convert parsed items
def convert_to_pow(tokens):
tmp = tokens[0][:]
ret = tmp.pop(-1)
tmp.pop(-1)
while tmp:
base = tmp.pop(-1)
# hack to handle '**' precedence ahead of '-'
if base.startswith('-'):
ret = '-power(%s,%s)' % (base[1:], ret)
else:
ret = 'power(%s,%s)' % (base, ret)
if tmp:
tmp.pop(-1)
return ret
def unary_as_is(tokens):
return '(%s)' % ''.join(tokens[0])
def as_is(tokens):
return '%s' % ''.join(tokens[0])
# simplest infixNotation - may need to add a few more operators, but start with this for now
arith_expr = infixNotation( operand,
[
('-', 1, opAssoc.RIGHT, as_is),
('**', 2, opAssoc.LEFT, convert_to_pow),
('-', 1, opAssoc.RIGHT, unary_as_is),
((inequalities,inequalities), 3, opAssoc.LEFT, as_is),
(inequalities, 2, opAssoc.LEFT, as_is),
(oneOf("* /"), 2, opAssoc.LEFT, as_is),
(oneOf("+ -"), 2, opAssoc.LEFT, as_is),
(oneOf('and or'), 2, opAssoc.LEFT, as_is),
])
#('-', 1, opAssoc.RIGHT, as_is),
# now assign into forward-declared expr
expr <<= arith_expr.setParseAction(lambda t: '(%s)' % ''.join(t))
"""
#expression="2**3"
#expression="2**-3"
#expression="2**3**x5"
#expression="2**-3**x6[-1]"
#expression="2**-3**x5+1"
#expression="(a+1)**2"
#expression="((a+b)*c)**2"
#expression="B**2"
#expression="-B**2"
#expression"(-B)**2"
#expression="B**-2"
#expression="B**(-2)"
#expression="((Z**(_N1+1)-1)/(Z-1))*(Z-1))"
#expression="((_N1+1)**2)<=X"
#expression="_n2*_n3*_N1(_n2, _n3)**2/2"
#translatepowerToFun(expression)
#expression="_n2*_n3*_N1(_n2, X(_n3))**2/2"
#expression="(((2.00000000000000)+_n2*_n3*_N1(_n2, X(_n3))**2/2))"
"""
def translatepowerToFun(expression):
if "**" in expression:
try:
backup_expression=expression
if ("<" in expression or ">" in expression) and '/' not in expression :
expression=simplify(expression)
expression=transferToFunctionSyntax(str(expression))
xform = expr.transformString(expression)[1:-1]
#xform = expr.transformString(expression)
xform=xform.replace('[','(')
expression=xform.replace(']',')')
except Exception as e:
expression=backup_expression
#print expression
return expression
"""
Example 1:
>>> expression="x(n)+(y(n)+1)*n"
>>> transferToMathematicaSyntax(expression)
'x[n]+(y[n]+1)*n'
Example 2:
>>> expression="x(n(a,b),a,b)+2^(y(_N1(a,b),a,b)+1)"
>>> transferToMathematicaSyntax(expression)
'x[n[a,b],a,b]+2^(y[_N1[a,b],a,b]+1)'
Example 3:
>>> expression="x(n)+(y(n)/(_N1(n)))"
>>> transferToMathematicaSyntax(expression)
'x[n]+(y[n]/(_N1(n)))'
"""
#Changing function of the formate f(n) to f[n]. It assist the pasring
def transferToFunctionSyntax(expression):
if "(" in expression and ")" in expression:
p = regex.compile(r'\b[a-zA-Z_]\w*(\((?>[^()]|(?1))*\))')
result=(p.sub(lambda m: m.group().replace("(", "[").replace(")", "]"), expression))
else:
result=expression
return result
#Parsing Method Ends
def isConstInResult( variable ):
status=False
find=regex.compile(r'C\d')
group = find.search(variable)
if group is not None:
status=True
return status
def simplify_sympy(expression):
#if '/' in str(expression) and '>' not in str(expression) and '<' not in str(expression) and '=' not in str(expression):
if '<<' in str(expression) or '>>' in str(expression) or 'ite' in str(expression) or 'and' in str(expression) or '&' in str(expression) or '|' in str(expression) or '^' in str(expression):
return expression
try:
sympify(expression)
except Exception as e:
return expression
if sympify(expression)==True or sympify(expression)==False:
return expression
if '/' in str(expression):
expression,flag=expressionChecking(expression)
if flag==True:
expression_mod=expression
else:
expression_mod=powsimp(expression)
else:
if 'array' not in str(expression):
expression_mod=powsimp(expression)
else:
expression_mod=expression
if '/' not in str(expression_mod) and 'E' not in str(expression_mod) and 'e' not in str(expression_mod):
expression=expression_mod
if '/' in str(expression):
no,deno=fraction(together(expression))
no=sympify(no).expand(basic=True)
deno=sympify(deno).expand(basic=True)
if deno==1:
expression,flag=expressionChecking(expression)
if flag==True:
return expression
#return pow_to_mul(powsimp(expression))
else:
return pow_to_mul(powsimp(expression))
#return pow_to_mul(powsimp(no))
else:
return Mul(pow_to_mul(powsimp(no)), Pow(pow_to_mul(powsimp(deno)), -1), evaluate=False)
else:
#return str(sympify(expression).expand(basic=True))
if type(expression) is str:
return expression
else:
expressiontemp=sympify(expression).expand(basic=True)
if '/' in str(expressiontemp):
return pow_to_mul(powsimp(sympify(expression)))
else:
return pow_to_mul(powsimp(sympify(expression).expand(basic=True)))
def substituteValue(expression,key,value):
if '/' in str(expression):
#no,deno=fraction(together(expression))
no,deno=fraction(expression)
no=sympify(no).expand(basic=True)
no=no.subs(simplify(key),simplify(value))
deno=deno.subs(simplify(key),simplify(value))
if deno==1:
return powsimp(no)
else:
return Mul(powsimp(no), Pow(powsimp(deno), -1), evaluate=False)
else:
return simplify(expression).subs(simplify(key),simplify(value))
"""
Expanding algebraic powers
"""
def pow_to_mul(expression):
"""
Convert integer powers in an expression to Muls, like a**2 => a*a(Only for Squre).
"""
#expression=simplify(expression).expand(basic=True)
#expression=simplify(expression)
pows=list(expression.atoms(Pow))
if any(not e.is_Integer for b, e in (i.as_base_exp() for i in pows)):
#A power contains a non-integer exponent
return expression
repl=None
for b,e in (i.as_base_exp() for i in pows):
if e==2:
repl = zip(pows,((Mul(*[b]*e,evaluate=False)) for b,e in (i.as_base_exp() for i in pows)))
if repl is not None:
return expression.subs(repl)
else:
return expression
def translatepowerToFunCheck(expression):
if "**" in expression:
expression=transferToFunctionSyntax(str(expression))
xform = expr.transformString(expression)[1:-1]
xform=xform.replace('[','(')
expression=xform.replace(']',')')
#print expression
return expression
#expression="(A+B+((Z**(K)-1)/(Z-1))*(Z-1))"
expression="((Z**(K)-1)/(Z-1))*(Z-1)"
expression="(Z/2)*6<=Z"
expression="r6(_n2)>=(((2**-(_n2))*((2**_N1)*B))/2)"
#expressionChecking(expression)
def expressionChecking(expression):
if '(((((((' not in str(expression):
if "**" in str(expression):
expression=translatepowerToFunCheck(str(expression))
#p = getParser()
parser = c_parser.CParser()
#tree = p.parse_expression(expression)
ast = parser.parse("void test(){"+str(expression)+";}")
statement_temp=ast.ext[0].body.block_items[0]
#expr_wff=eval(expressionCreator(tree))
expr_wff = eval(expressionCreator_C(statement_temp))
flag=False
return expr2simplified(expr_wff,flag)
else:
return str(expression),False
def construct_expressionC(postion,variable,e1,e2):
expression=[]
expression.append('i2')
expression.append(postion)
expression.append(variable)
expression.append(e1)
expression.append(e2)
return expression
def construct_expression(tree,postion,variable):
expression=""
if type(tree) is m.Assignment:
expression="['i2',"+str(postion)+",'"+variable+"',"+expressionCreator(tree.lhs)+","+expressionCreator(tree.rhs)+"]"
return eval(expression)
def construct_expression_normalC(e):
if e is not None:
expression=[]
expression.append('s0')
expression.append(e)
return expression
else:
return None
def construct_expression_normal(tree):
if tree is not None:
expression=""
if type(tree) is m.Relational:
expression="['s0',"+expressionCreator(tree)+"]"
return eval(expression)
else:
return None
"""
Program Expression to a Array of Statement Compatible to Translator Program
"""
fun_call_map={}
current_fun_call=None
defineMap={}
def expressionCreator_C(statement):
expression=""
global defineMap
global defineDetaillist
global fun_call_map
global current_fun_call
if type(statement) is c_ast.ID:
if statement.name in defineMap.keys():
value = defineMap[statement.name]
return str(eval("expres('"+value+"')"))
else:
return str(eval("expres('"+statement.name+"')"))
elif type(statement) is c_ast.Constant:
if statement.type=='char':
if str(statement.value)==str("'\\0'"):
return str(eval("expres('0')"))
else:
return "['char',expres("+statement.value+")]"
elif statement.type=='float':
if statement.value[-1]=='f':
#return "expres('"+str(round(float(statement.value[:-1]), 7))+"')"
return str(eval("expres('"+str(statement.value[:-1])+"')"))
#return "expres('"+str(float(statement.value))+"')"
return str(eval("expres('"+str(statement.value)+"')"))
elif statement.type=='double':
#return "expres('"+str(float(statement.value))+"')"
return str(eval("expres('"+str(statement.value)+"')"))
else:
if is_hex(statement.value) is not None:
return str(eval("expres('"+is_hex(statement.value)+"')"))
else:
return str(eval("expres('"+statement.value+"')"))
elif type(statement) is c_ast.FuncCall:
parameter=''
parameter_list=[]
defineDetaillist=[]
defineDetailtemp=[]
parameter_list.append('int')
if statement.args is not None:
for param in statement.args.exprs:
if type(param) is c_ast.ID:
parameter_list.append('int')
if param.name in defineMap.keys():
param.name = defineMap[param.name]
if parameter=='':
parameter = str(eval("expres('"+param.name+"')"))
else:
parameter += ","+str(eval("expres('"+param.name+"')"))
elif type(param) is c_ast.Constant:
parameter_list.append('int')
if parameter=='':
if is_hex(param.value) is not None:
parameter = str(eval("expres('"+is_hex(param.value)+"')"))
else:
parameter = str(eval("expres('"+param.value+"')"))
else:
if is_hex(param.value) is not None:
parameter += ","+str(eval("expres('"+is_hex(param.value)+"')"))
else:
parameter += ","+str(eval("expres('"+param.value+"')"))
elif type(param) is c_ast.UnaryOp:
if parameter=='':
parameter = str(eval("expres('"+param.op+"',["+expressionCreator_C(param.expr)+"])"))
else:
parameter +=','+str(eval("expres('"+param.op+"',["+expressionCreator_C(param.expr)+"])"))
elif type(param) is c_ast.BinaryOp:
if parameter=='':
parameter =expressionCreator_C(param)
else:
parameter +=','+expressionCreator_C(param)
elif type(param) is c_ast.FuncCall:
if parameter=='':
#param.show()
parameter =expressionCreator_C(param)
else:
#param.show()
parameter +=','+expressionCreator_C(param)
else:
if type(param) is c_ast.ArrayRef:
parameter_list.append('int')
degree=0
stmt,degree=createArrayList_C(param,degree)
if parameter=='':
parameter = str(eval("expres('d"+str(degree)+'array'+"',["+stmt+"])"))
else:
parameter += ","+str(eval("expres('d"+str(degree)+'array'+"',["+stmt+"])"))
#print '@@@@@@@@@@@RamRam'
#print param.show()
#print '@@@@@@@@@@@'
if 'uniform' in statement.name.name:
parameter="['"+statement.name.name+"'],"+parameter
return "['RV',"+parameter+"]"
elif 'gauss' in statement.name.name:
parameter="['"+statement.name.name+"'],"+parameter
return "['RV',"+parameter+"]"
else:
defineDetailtemp.append(statement.name.name)
defineDetailtemp.append(len(parameter_list)-1)
defineDetailtemp.append(parameter_list)
defineDetaillist.append(defineDetailtemp)
#if statement.name.name in fun_call_map.keys() and statement.name.name != current_fun_call and '__VERIFIER_nondet_' not in statement.name.name:
# fc_count=fun_call_map[statement.name.name]
# fc_count+=1
# fun_call_map[statement.name.name]=fc_count
# return "['"+statement.name.name+"_"+str(fc_count)+"',"+parameter+"]"
#else:
# fun_call_map[statement.name.name]=0
return "['"+statement.name.name+"',"+parameter+"]"
else:
if '__VERIFIER_nondet_' not in statement.name.name:
defineDetailtemp.append(statement.name.name)
defineDetailtemp.append(len(parameter_list)-1)
defineDetailtemp.append(parameter_list)
defineDetaillist.append(defineDetailtemp)
#if statement.name.name in fun_call_map.keys() and statement.name.name != current_fun_call and '__VERIFIER_nondet_' not in statement.name.name:
# fc_count=fun_call_map[statement.name.name]
# fc_count+=1
# fun_call_map[statement.name.name]=fc_count
# return str(eval("expres('"+statement.name.name+"_"+str(fc_count)+"'"+")"))
#else:
# fun_call_map[statement.name.name]=0
return str(eval("expres('"+statement.name.name+"'"+")"))
elif type(statement) is c_ast.ArrayRef:
degree=0
stmt,degree=createArrayList_C(statement,degree)
return str(eval("expres('d"+str(degree)+'array'+"',["+stmt+"])"))
else:
if type(statement) is c_ast.Cast:
if statement.to_type.type.type.names[0]=='float':
return "['"+"_ToReal"+"',"+expressionCreator_C(statement.expr)+"]"
elif statement.to_type.type.type.names[0]=='double':
return "['"+"_ToReal"+"',"+expressionCreator_C(statement.expr)+"]"
elif statement.to_type.type.type.names[0]=='int':
return "['"+"_ToInt"+"',"+expressionCreator_C(statement.expr)+"]"
else:
if statement.op in ['+','-','*','/','%']:
expression="expres('"
expression+=statement.op
if type(statement) is c_ast.BinaryOp:
expression+="',["+expressionCreator_C(statement.left)
expression+=','+expressionCreator_C(statement.right)
else:
expression+="',["+expressionCreator_C(statement.expr)
expression+='])'
expression=str(eval(expression))
return expression
else:
#if statement.op == '!=':
# statement=c_ast.UnaryOp(op='!', expr=c_ast.BinaryOp(op='==',left=statement.left, right=statement.right))
expression="['"
if statement.op == '&&':
expression+='and'
elif statement.op == '||':
expression+='or'
elif statement.op == '!':
expression+='not'
else:
expression+=statement.op
if type(statement) is c_ast.BinaryOp:
expression+="',"+expressionCreator_C(statement.left)
expression+=','+expressionCreator_C(statement.right)
expression+=']'
else:
expression="expres('"
if statement.op == '!':
expression+='not'
else:
expression+=statement.op
statement.show()
print('=================')
expression+="',["+expressionCreator_C(statement.expr)+"]"
expression+=')'
expression=str(eval(expression))
return expression
"""
Construct Array List
"""
def createArrayList_C(statement,degree):
if type(statement) is c_ast.ArrayRef:
degree=degree+1
stmt=''
if type(statement.name) is c_ast.ArrayRef:
stmt,degree=createArrayList_C(statement.name,degree)
if type(statement.subscript) is c_ast.ID:
stmt+=",expres('"+statement.subscript.name+"')"
elif type(statement.subscript) is c_ast.BinaryOp:
stmt+=","+expressionCreator_C(statement.subscript)
else:
stmt+=",expres('"+statement.subscript.value+"')"
return stmt,degree
else:
if type(statement.name) is c_ast.ID:
if type(statement.subscript) is c_ast.ID:
stmt+="expres('"+statement.name.name+"')"+",expres('"+statement.subscript.name+"')"
return stmt,degree
elif type(statement.subscript) is c_ast.BinaryOp:
stmt+="expres('"+statement.name.name+"')"+","+expressionCreator_C(statement.subscript)
return stmt,degree
else:
if type(statement.subscript) is c_ast.ArrayRef:
temp_degree=0
temp_stmt,temp_degree=createArrayList_C(statement.subscript,temp_degree)
stmt+="expres('"+statement.name.name+"')"+","+"expres('d"+str(temp_degree)+'array'+"',["+temp_stmt+"])"
return stmt,degree
else:
stmt+="expres('"+statement.name.name+"')"+",expres('"+statement.subscript.value+"')"
return stmt,degree
else:
if type(statement.name) is c_ast.FuncCall:
if type(statement.subscript) is c_ast.FuncCall:
stmt+=expressionCreator_C(statement.name)+","+expressionCreator_C(statement.subscript)
elif type(statement.subscript) is c_ast.BinaryOp:
stmt+=expressionCreator_C(statement.name)+","+expressionCreator_C(statement.subscript)
else:
stmt+=expressionCreator_C(statement.name)+",expres('"+statement.subscript.value+"')"
else:
stmt+="expres('"+statement.name.value+"')"+",expres('"+statement.subscript.value+"')"
return stmt,degree
else:
return "expres('"+statement.name+"')",degree
"""
Recurrence Solver After Translation
"""
def rec_solver(f,o,a):
global fun_call_map
constant_fun_map={}
equation_map={}
base_map={}
for axiom in a:
if axiom[0]=='i1':
lefthandstmt=expr2string1(axiom[3])
lefthandstmt=lefthandstmt.strip()
equation_map[str(simplify(lefthandstmt))]=axiom
if axiom[0]=='i0':
lefthandstmt=expr2string1(axiom[2])
lefthandstmt=lefthandstmt.strip()
base_map[str(simplify(lefthandstmt))]=axiom
if axiom[0]=='s1':
equ=expr2string1(axiom[1])
if '->' in equ:
axiomes=equ.split('->')
axiomes[0]=simplify(str(axiomes[0]))
if '<' in str(axiomes[0]):
variables=str(axiomes[0]).split('<')
elif '>' in str(axiomes[0]):
variables=str(axiomes[0]).split('>')
variables[0]=variables[0].strip()
variables[1]=variables[1].strip()
constant_fun_map[variables[0]]=variables[1]
while True:
solution_map={}
for equation in equation_map:
e1=equation_map[equation]
equation_base=str(simplify(equation).subs(simplify(str(e1[2])+"+1"),0))
if equation_base in base_map.keys():
e2=base_map[equation_base]
result=solve_rec(e1,e2)
#result = None
if result is not None:
a.remove(base_map[equation_base])
del base_map[equation_base]
solution_map[equation]=result
for equation in solution_map:
a.remove(equation_map[equation])
del equation_map[equation]
e=solution_map[equation]
e1=copy.deepcopy(e)
variable=e[2]
a=solnsubstitution(a,e[3],e[4])
constant=constant_fun_map[variable]
#p = getParser()
#tree = p.parse_expression(constant)
#constant=eval(expressionCreator(tree))
fun_call_map={}
parser = c_parser.CParser()
ast = parser.parse("void test(){"+str(constant)+";}")
statement_temp=ast.ext[0].body.block_items[0]
constant=eval(expressionCreator_C(statement_temp))
variable_list=eval("expres('"+variable+"')")
e1[3]=expr_replace(e1[3],variable_list,constant)
e1[4]=expr_replace(e1[4],variable_list,constant)
a=solnsubstitution(a,e1[3],e1[4])
for x in o:
stmt=o[x]
stmt[2]=expr_replace(stmt[2],e1[3],e1[4])
if len(equation_map)==0 or len(solution_map)==0:
break
return f,o,a,constant_fun_map
"""
#Function to replace variable by constant
#Test Case
#e=['a', ['<', ['x2', ['_n1']], ['y2', ['_n1']]]]
#variable='_n1'
#constant='_N1'
#expr_replace_const(e,variable,constant)
"""
def expr_replace_const(e,variable,constant):
if e[:1]==expres(variable):
e[:1]=expres(constant)
return e[:1]+list(expr_replace_const(x,variable,constant) for x in expr_args(e))
def get_All_Var(e,var_map):
args=expr_args(e)
op=expr_op(e)
if len(args)==0:
if is_number(op)==False and is_hex(op)==None and op not in _base:
var_map.append(op)
else:
if op=='and' or op=='or':
if len(args)==1:
get_All_Var(args[0],var_map)
else:
for x in args:
get_All_Var(x,var_map)
elif op=='not' and len(args)==1:
get_All_Var(args[0],var_map)
elif op=='implies' and len(args)==2:
get_All_Var(args[0],var_map)
get_All_Var(args[1],var_map)
elif op in _infix_op and len(args)==2:
get_All_Var(args[0],var_map)
get_All_Var(args[1],var_map)
else:
if is_number(op)==False and is_hex(op)==None and op not in _base:
var_map.append(op)
for x in args:
get_All_Var(x,var_map)
#substituting close form solution in rest of the axiomes
def solnsubstitution(axioms,key,substituter):
update_axioms=[]
for axiom in axioms:
if axiom[0]!='i0' and axiom[0]!='i1':
update_axioms.append(expr_replace(axiom,key,substituter))
else:
if axiom[0]=='i1':
axiom[4]=expr_replace(axiom[4],key,substituter)
update_axioms.append(axiom)
elif axiom[0]=='i0':
axiom[3]=expr_replace(axiom[3],key,substituter)
update_axioms.append(axiom)
else:
update_axioms.append(axiom)
return update_axioms
def solnsubstitution_Array(axioms,key,substituter):
update_axioms=[]
for axiom in axioms:
if axiom[0]!='i0' and axiom[0]!='i1':
update_axioms.append(expr_array_replace(axiom,key,substituter))
else:
if axiom[0]=='i1':
axiom[4]=expr_array_replace(axiom[4],key,substituter)
update_axioms.append(axiom)
elif axiom[0]=='i0':
axiom[3]=expr_array_replace(axiom[3],key,substituter)
update_axioms.append(axiom)
else:
update_axioms.append(axiom)
return update_axioms
#normal infix printing
def expr2stringSimplify(e):
args=expr_args(e)
op=expr_op(e)
if len(args)==0:
return op
else:
if op=='and' or op=='or':
if len(args)==1:
return expr2stringSimplify(args[0])
else:
return '('+(' '+op+' ').join(list(expr2stringSimplify(x) for x in args))+')'
elif op=='not' and len(args)==1:
return 'not '+expr2stringSimplify(args[0])
elif op=='implies' and len(args)==2:
return expr2stringSimplify(args[0])+ ' -> '+expr2stringSimplify(args[1])
elif op in _infix_op and len(args)==2:
return '(' + expr2stringSimplify(args[0])+ op+expr2stringSimplify(args[1])+')'
else:
if op is 'ite':
expresion1 = expr2stringSimplify(args[1])
expresion2 = expr2stringSimplify(args[2])
if ('and' not in expresion1 and 'or' not in expresion1 and 'ite' not in expresion1) and ('and' not in expresion2 and 'or' not in expresion2 and 'ite' not in expresion2) and simplify(expresion1+'=='+expresion2)==True:
return expresion1
else:
return op +'('+ ','.join(list(expr2stringSimplify(x) for x in args))+ ')'
else:
return op +'('+ ','.join(list(expr2stringSimplify(x) for x in args))+ ')'
# expr_replace(e,e1,e2): replace all subterm e1 in e by e2
#e=['a', ['implies', ['<', ['_n1'], ['_N1']], ['<', ['x2', ['_n1']], ['y2', ['_n1']]]]]
#e=['a', ['<', ['x2', ['_N1']], ['y2', ['_N1']]]]
def expr_complement(e): #e,e1,e2: expres
if e[:1]==['<']:
e[:1]=['>=']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['>']:
e[:1]=['<=']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['>=']:
e[:1]=['<']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['<=']:
e[:1]=['>']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['==']:
e[:1]=['!=']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['!=']:
e[:1]=['==']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['&&']:
e[:1]=['||']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['||']:
e[:1]=['&&']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['and']:
e[:1]=['or']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
elif e[:1]==['or']:
e[:1]=['and']
return e[:1]+list(expr_complement(x) for x in expr_args(e))
else:
return e[:1]+list(expr_complement(x) for x in expr_args(e))
"""
Convert Inequality to Normal Form
"""
def normal_form_constant(expression, constant):
#print "*************"
#print expression
#print "*************"
mult_by_minus_one_map = {
None: '==',
'>=': '<=',
'<=': '>=',
'>': '<',
'<': '>',
}
ineq=simplify(expression)
l = ineq.lhs
r = ineq.rhs
op = ineq.rel_op
all_on_left = l - r
coeff_dict = all_on_left.as_coefficients_dict()
var_types = coeff_dict.keys()
new_rhs = sympify(0)
for s in var_types:
if s != simplify(constant):
factor=s.coeff(simplify(constant))
if factor==0:
all_on_left = (all_on_left - (coeff_dict[s]*s))
new_rhs = (new_rhs - (coeff_dict[s]*s))
all_on_left=all_on_left.expand(basic=True)
coeff_dict = all_on_left.as_coefficients_dict()
var_types = coeff_dict.keys()
if len(var_types)==1:
for s in var_types:
if coeff_dict[s]<0:
all_on_left = all_on_left * -1
new_rhs = new_rhs * -1
op = mult_by_minus_one_map[op]
factor=all_on_left.coeff(simplify(constant))
if factor!=0:
all_on_left=all_on_left/factor
new_rhs=new_rhs/factor
else:
all_on_left=simplify(all_on_left)
new_rhs=simplify(new_rhs)
coeff_dict = all_on_left.as_coefficients_dict()
var_types = coeff_dict.keys()
if len(var_types)==1:
for s in var_types:
if coeff_dict[s]<0:
all_on_left = all_on_left * -1
new_rhs = new_rhs * -1
op = mult_by_minus_one_map[op]
#print "*************"
#print all_on_left
#print new_rhs
#print "*************"
return Relational(all_on_left,new_rhs,op)
#wff to Simplified Expression
def expr2simplified(e,flag):
args=expr_args(e)
op=expr_op(e)
if len(args)==0:
return op,flag
else:
if op in _infix_op and len(args)==2:
expr1,flag=expr2simplified(args[0],flag)
if flag==True:
expr2,flag=expr2simplified(args[1],flag)
flag=True
else:
expr2,flag=expr2simplified(args[1],flag)
if op=='*' and expr_op(args[0])=='/':
n,d=fraction(expr1)
if gcd(d,expr2)!=1:
flag=True
elif op=='/' and expr_op(args[0])=='*':
n,d=fraction(expr2)
if gcd(expr1,d)!=1:
flag=True
if flag==True:
expression= '(' + expr1+ op + expr2 +')'
else:
expression= '((' + str(pow_to_mul(powsimp(expr1)))+ ')'+ op + '('+ str(pow_to_mul(powsimp(expr2)))+'))'
return expression,flag
else:
return op +'('+ ','.join(list(trim_p(expr2string1(x)) for x in args))+ ')',flag
def organizeFreeVariable(f,o,a,vfacts):
struct_type_list=[]
for vfact in vfacts.keys():
info_list=vfacts[vfact]
for info in info_list:
if len(info_list[info])>0 and info_list[info][1] not in ['int','short','unsigned','long','char','float','double','array']:
struct_type_list.append(info)
for x in o:
e=o[x]
if e[0]=='e':
if is_Stuct(e[-2][0],struct_type_list):
e[-1] = expr_replace(e[-1],eval("['_x1']"),eval("['_s1']"))
e[-2] = expr_replace(e[-2],eval("['_x1']"),eval("['_s1']"))
for e in a:
if e[0]=='i1' or e[0]=='i0':
if is_Stuct(e[-2][0],struct_type_list):
e[-1] = expr_replace(e[-1],eval("['_x1']"),eval("['_s1']"))
e[-2] = expr_replace(e[-2],eval("['_x1']"),eval("['_s1']"))
def is_Stuct(var,struct_type_list):
status=False
for x in struct_type_list:
temp=var.replace(x,'').strip()
if is_number(temp)==True:
status=True
return status
#Test Case 1
#variable="d1array4"
#Test Case 2
#variable="d1ar4"
def isArrayFunction( variable ):
status=False
find=regex.compile(r'([d]\d[a][r][r][a][y]\d|[d]\d[a][r][r][a][y])')
group = find.search(variable)
if group is not None:
status=True
return status
| [
"[email protected]"
] | |
6698cb9625f61a12ebcd1b81b10dff98494f516a | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/linalg/linear_operator_addition.py | 209e3ef4b16d0e34459d5fb1ef484c36c7eda18c | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/linalg/linear_operator_addition.py | [
"[email protected]"
] | |
a74ff399c94d7abbb0737bb769e59ed0db02c535 | bd93fa910151c278be8249055bc084e5a5c35a6a | /Python/itcast/01-Python进阶1/4异常/4抛出自定义异常.py | 45e5cd62b39f425811af7f9113a9ac4885ab6479 | [] | no_license | ahojcn/practice-code | bd81595b80239cd2550183093566bd536a83ed3f | b65f4e76271479269463e92fd3fd41585c2ac792 | refs/heads/master | 2021-07-10T14:15:08.036592 | 2020-07-09T11:32:16 | 2020-07-09T11:32:16 | 153,059,349 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | class ShortInputException(Exception):
'''自定义异常类'''
def __init__(self, length, atlieast):
self.length = length
self.atlieast = atlieast
def main():
try:
s = input("请输入 -> ")
if len(s) < 3:
# raise 引发一个自定义异常
raise ShortInputException(len(s), 3)
except ShortInputException as e:
print("ShortInputException, 输入长度(%d), 最小长度(%d)" % (e.length, e.atlieast))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
ce0c9e95fea923683045ab70e0cd201076d5ba46 | d28a65d23c204a9736b597ae510d9dd54d2ffd0f | /tests/testUtils/testSctidGenerator.py | 7f1015fad1169ccb000f226bcefdd7bb6b7e9825 | [
"BSD-3-Clause"
] | permissive | cts2/rf2db | 99ba327611e620fc5533245064afcc1daff7c164 | 985cd7ad84c8907306a0d7d309d4a1c0fb422ba4 | refs/heads/master | 2020-05-17T22:37:25.476553 | 2015-08-24T22:18:19 | 2015-08-24T22:18:19 | 15,264,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,482 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from rf2db.utils.sctid_generator import *
from rf2db.utils.sctid import sctid
class GeneratorTestCase(unittest.TestCase):
def test_gen(self):
generator = sctid_generator(MAYO_Namespace, sctid_generator.RELATIONSHIP, 1000)
self.assertEqual(10001000134127, generator.next())
self.assertEqual([(1, 10011000134125), (2, 10021000134121), (3, 10031000134123),
(4, 10041000134129), (5, 10051000134126), (6, 10061000134128),
(7, 10071000134120), (8, 10081000134122), (9, 10091000134124)],
list(zip(range(1, 10), generator)))
self.assertEqual(171000160104, (sctid_generator(CIMI_Namespace, sctid_generator.CONCEPT, 17)).next())
self.assertEqual(911431000160119, (sctid_generator(CIMI_Namespace, sctid_generator.DESCRIPTION, 91143)).next())
self.assertEqual(10101000134126, sctid(generator.next()))
self.assertEqual([(1, 10111000134129), (2, 10121000134120), (3, 10131000134122),
(4, 10141000134128), (5, 10151000134125), (6, 10161000134127),
(7, 10171000134124), (8, 10181000134121), (9, 10191000134123)],
[(a, sctid(generator.next())) for a in range(1, 10)])
self.assertEqual(171000160104, sctid((sctid_generator(CIMI_Namespace, sctid_generator.CONCEPT, 17)).next()))
self.assertEqual(911431000160119, sctid((sctid_generator(CIMI_Namespace, sctid_generator.DESCRIPTION, 91143)).next()))
def test_zero_partition(self):
self.assertEqual(123456001, sctid_generator(0, sctid_generator.CONCEPT, 123456).next())
self.assertEqual(654321026, sctid_generator(0, sctid_generator.RELATIONSHIP, 654321).next())
self.assertEqual(5349010, sctid_generator(0, sctid_generator.DESCRIPTION, 5349).next())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0486fbd83ea6d8e49d1a8483cd10867e4bd01827 | 518d911a66485947c5d336e96a842f162ef9caf1 | /res/scripts/client/messenger/proto/bw/clanlistener.py | 39c44a4a5bbb7958948df803005d0fa3f6e79198 | [] | no_license | wotmods/WOTDecompiled | 84b8e5d32ee73e1356b4d57318eb76dfac6b5220 | 45fd599666c55cb871f6b84b0ec977b9d4baf469 | refs/heads/master | 2020-12-25T21:34:26.096544 | 2014-11-05T13:58:39 | 2014-11-05T13:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,968 | py | # 2014.10.18 14:44:16 Central European Daylight Time
#Embedded file name: scripts/client/messenger/proto/bw/ClanListener.py
import BigWorld
from PlayerEvents import g_playerEvents
from debug_utils import LOG_DEBUG, LOG_ERROR
from messenger.ext.player_helpers import getPlayerDatabaseID
from messenger.proto.bw.entities import BWUserEntity
from messenger.proto.bw.find_criteria import BWClanChannelFindCriteria
from messenger.proto.entities import CurrentUserEntity
from messenger.proto.events import g_messengerEvents
from messenger.storage import storage_getter
class _INIT_STEPS(object):
CLAN_INFO_RECEIVED = 1
MEMBERS_LIST_RECEIVED = 2
LIST_INITED = CLAN_INFO_RECEIVED | MEMBERS_LIST_RECEIVED
class ClanListener(object):
def __init__(self):
super(ClanListener, self).__init__()
self.__initSteps = 0
self.__clanChannel = None
self.__channelCriteria = BWClanChannelFindCriteria()
@storage_getter('users')
def usersStorage(self):
return None
@storage_getter('playerCtx')
def playerCtx(self):
return None
def start(self):
self.__findClanChannel()
cEvents = g_messengerEvents.channels
cEvents.onChannelInited += self.__ce_onChannelInited
cEvents.onChannelDestroyed += self.__ce_onChannelDestroyed
g_playerEvents.onClanMembersListChanged += self.__pe_onClanMembersListChanged
self.playerCtx.onClanInfoChanged += self.__pc_onClanInfoChanged
def stop(self):
cEvents = g_messengerEvents.channels
cEvents.onChannelInited -= self.__ce_onChannelInited
cEvents.onChannelDestroyed -= self.__ce_onChannelDestroyed
self.__clearClanChannel()
g_playerEvents.onClanMembersListChanged -= self.__pe_onClanMembersListChanged
self.playerCtx.onClanInfoChanged -= self.__pc_onClanInfoChanged
def __findClanChannel(self):
channel = storage_getter('channels')().getChannelByCriteria(self.__channelCriteria)
if channel is not None:
self.__initClanChannel(channel)
def __initClanChannel(self, channel):
if self.__clanChannel is not None:
LOG_ERROR('Clan channel is defined', self.__clanChannel, channel)
return
self.__clanChannel = channel
self.__clanChannel.onMembersListChanged += self.__ce_onMembersListChanged
self.__refreshClanMembers()
def __clearClanChannel(self):
if self.__clanChannel is not None:
self.__clanChannel.onMembersListChanged -= self.__ce_onMembersListChanged
self.__clanChannel = None
for user in self.usersStorage.getClanMembersIterator():
user.update(isOnline=False)
g_messengerEvents.users.onClanMembersListChanged()
def __refreshClanMembers(self):
getter = self.__clanChannel.getMember
changed = False
for user in self.usersStorage.getClanMembersIterator():
dbID = user.getID()
isOnline = user.isOnline()
member = getter(dbID)
if member is not None:
if not isOnline:
user.update(isOnline=True)
changed = True
elif isOnline:
user.update(isOnline=False)
changed = True
if changed:
g_messengerEvents.users.onClanMembersListChanged()
def __pe_onClanMembersListChanged(self):
clanMembers = getattr(BigWorld.player(), 'clanMembers', {})
LOG_DEBUG('setClanMembersList', clanMembers)
if not self.__initSteps & _INIT_STEPS.MEMBERS_LIST_RECEIVED:
self.__initSteps |= _INIT_STEPS.MEMBERS_LIST_RECEIVED
clanAbbrev = self.playerCtx.getClanAbbrev()
members = []
if self.__clanChannel is not None:
getter = self.__clanChannel.getMember
else:
getter = lambda dbID: None
playerID = getPlayerDatabaseID()
for dbID, (name, roleFlags) in clanMembers.iteritems():
isOnline = False if getter(dbID) is None else True
if playerID == dbID:
user = CurrentUserEntity(dbID, name=name, clanAbbrev=clanAbbrev, clanRole=roleFlags)
else:
user = BWUserEntity(dbID, name=name, clanAbbrev=clanAbbrev, clanRole=roleFlags, isOnline=isOnline)
members.append(user)
self.usersStorage._setClanMembersList(members)
if self.__initSteps & _INIT_STEPS.LIST_INITED != 0:
g_messengerEvents.users.onClanMembersListChanged()
def __pc_onClanInfoChanged(self):
clanInfo = self.playerCtx.clanInfo
hasClanInfo = clanInfo is not None and len(clanInfo) > 0
if not self.__initSteps & _INIT_STEPS.CLAN_INFO_RECEIVED and hasClanInfo:
self.__initSteps |= _INIT_STEPS.CLAN_INFO_RECEIVED
user = self.usersStorage.getUser(getPlayerDatabaseID())
if user:
user.update(clanRole=self.playerCtx.getClanRole())
clanAbbrev = self.playerCtx.getClanAbbrev()
for user in self.usersStorage.getClanMembersIterator():
user.update(clanAbbrev=clanAbbrev)
if self.__initSteps & _INIT_STEPS.LIST_INITED != 0:
g_messengerEvents.users.onClanMembersListChanged()
def __ce_onChannelInited(self, channel):
if self.__channelCriteria.filter(channel):
self.__initClanChannel(channel)
def __ce_onChannelDestroyed(self, channel):
if self.__channelCriteria.filter(channel):
self.__clearClanChannel()
def __ce_onMembersListChanged(self):
self.__refreshClanMembers()
+++ okay decompyling res/scripts/client/messenger/proto/bw/clanlistener.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2014.10.18 14:44:16 Central European Daylight Time
| [
"[email protected]"
] | |
199294e0b423494211f9880690d3ca663d5aae1e | 8d402df39c18eba7e1c86c762f205c944357c5df | /www/src/Lib/site-packages/ui/menu.py | 78483584c32e8801d0303aea19d88121aed69b3a | [
"BSD-3-Clause"
] | permissive | brython-dev/brython | 87cc023e25550dec9ce459ba68774189f33712b6 | b33958bff0e8c7a280babc30232dc389a2500a7a | refs/heads/master | 2023-09-04T04:49:29.156209 | 2023-09-01T06:36:08 | 2023-09-01T06:36:08 | 24,046,239 | 6,569 | 625 | BSD-3-Clause | 2023-07-05T06:13:32 | 2014-09-15T06:58:21 | Python | UTF-8 | Python | false | false | 2,270 | py | from . import widget
from browser import html, document
class Menu(html.UL, widget.Widget):
def __init__(self, id=None, style={}):
default_style = dict(position= 'relative', height='auto',
width='auto')
default_style.update(style)
html.UL.__init__(self, Class="ui-widget ui-menu", style=default_style)
widget.Widget.__init__(self, self, 'menu', id)
document.bind('click', self.leave)
self.active = False
def add(self, title):
item = MenuItem(title, Class="ui-widget ui-menu-item")
item.bind('click', item.activate)
item.bind('mouseenter', item.enter)
self <= item
return item
def leave(self, ev):
for child in self.children:
if child.state == 'show':
document.remove(child.div)
child.state = 'hide'
self.active = False
class MenuItem(html.LI):
def __init__(self, *args, **kw):
html.LI.__init__(self, *args, **kw)
self.items = []
self.state = "hide"
def activate(self, ev):
self.parent.active = True
self.show(ev)
ev.stopPropagation()
def enter(self, ev):
if self.parent.active:
self.show(ev)
def show(self, ev):
for item in self.parent.children:
if item.state == "show":
if item == self:
return
document.remove(item.div)
item.state = "hide"
if self.state == "hide":
left = ev.target.left
top = ev.target.top+ev.target.height
self.div = html.DIV(Class="ui-widget ui-menu-sublist",
style=dict(position='absolute', left=left, top=top, zIndex=99))
for item in self.items:
line = html.DIV(item[0], Class="ui-menu-subitem")
if item[1] is not None:
line.bind('click', item[1])
self.div <= line
self.state = "show"
self.div.style.borderWidth = "1px"
document <= self.div
else:
document.remove(self.div)
self.state = "hide"
def add(self, label, callback = None):
self.items.append((label, callback))
| [
"[email protected]"
] | |
03427bc747dcd4f72ae0ca4206d202efd9bd40fe | f55f3cd5f44982260fd1bcf711207e3d952499a3 | /platform_crawler/spiders/pylib/login_qq_with_cli.py | b1dd5448a9cec103c4a23412ffb2782281cd30f0 | [] | no_license | prynix/save_code | c065c79d79cc6a5b9181081fa06deaea32af0d78 | 4556d1ad01ed192f91ae210983010ad45bf4635c | refs/heads/master | 2022-02-20T18:32:44.055007 | 2019-08-19T09:51:18 | 2019-08-19T09:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | import os
import time
import win32gui
import psutil
import logging
from ctypes import windll
from platform_crawler.settings import join, IMG_PATH, GlobalVal
from platform_crawler.configs.excute_paths import ExecutePaths
ACC, u, pag, logger = None, None, None, None
TIM_IMG_PATH = join(IMG_PATH, 'tim_img')
NEW_ERROR_PATH = join(TIM_IMG_PATH, 'new_error_img')
if not os.path.exists(TIM_IMG_PATH):
os.makedirs(TIM_IMG_PATH)
if not os.path.exists(NEW_ERROR_PATH):
os.makedirs(NEW_ERROR_PATH)
img_path = join(TIM_IMG_PATH, 'qq_cli_vc_cf.png')
err_account_img = join(TIM_IMG_PATH, 'err_account.png')
death_acc_img = join(TIM_IMG_PATH, 'death_acc.png')
find_password_img = join(TIM_IMG_PATH, 'find_password.png')
login_success = join(TIM_IMG_PATH, 'login_succ.png')
after_enter_login_btn = join(NEW_ERROR_PATH, 'after_enter.png')
authentication_img = join(TIM_IMG_PATH, 'need_auth.png')
VERIFY_TIMES = 1
def kill_qq():
for e in psutil.process_iter():
a = e.name()
if 'TIM' in a:
e.kill()
def btn_location(img_name_path, loop_time=2, dur=0):
# 获取图片位置
s = time.time()
for e in range(loop_time):
try:
x, y, w, h = pag.locateOnScreen(img_name_path)
logger.info('Find once cost time: %s' % int(time.time() - s))
return x, y
except TypeError:
if dur != 0:
time.sleep(dur)
continue
else:
return False
def handle_login_res(loginid):
result = btn_location(img_path) # vc page
if result:
logger.info('Verify Code Appeared')
return deal_vc(loginid)
elif btn_location(err_account_img): # account error page
kill_qq()
logger.info('Wrong account or password!')
res = False
elif btn_location(death_acc_img):
kill_qq()
logger.info('Frozen account')
res = False
elif btn_location(find_password_img):
kill_qq()
logger.info('Wrong password! Find and recheck')
res = False
elif btn_location(authentication_img):
kill_qq()
logger.info('Need to authentication!')
res = False
elif btn_location(login_success):
logger.info('Tim client login success')
return True
else:
logger.info('Unknown situation with account: %s' % ACC)
res = False
if not res:
pic_name = join(NEW_ERROR_PATH, 'error_%s.png' % (int(time.time())))
pag.screenshot(pic_name)
return res
def deal_vc(loginid):
global VERIFY_TIMES
# cut and deal vc img
img1_path = join(TIM_IMG_PATH, 'qq_cli_vc.png')
pag.screenshot(img1_path, region=(loginid[4][0] + 120, loginid[4][1] + 202, 132, 56))
with open(img1_path, 'br') as f:
im = f.read()
res = u.rc.rk_create(im, '2040')
windll.user32.SetCursorPos(loginid[4][0] + 100, loginid[4][1] + 110)
pag.typewrite(res.get('Result').lower())
pag.hotkey('enter')
time.sleep(0.8)
if VERIFY_TIMES != 1:
u.rc.rk_report_error(res.get('Id'))
VERIFY_TIMES += 1
return handle_login_res(loginid)
def QQ(qq, pwd):
# a = win32gui.FindWindow(None, "QQ")
# 运行QQ
os.system('"%s"' % ExecutePaths.TimPath)
time.sleep(5)
a = win32gui.FindWindow(None, "TIM") # 获取窗口的句柄,参数1: 类名,参数2: 标题QQ
loginid = win32gui.GetWindowPlacement(a)
windll.user32.SetCursorPos(loginid[4][0] + 300, loginid[4][1] + 273)
pag.click()
time.sleep(0.2)
# 输入账号
pag.typewrite(qq)
time.sleep(0.2)
# tab切换
pag.hotkey('tab')
pag.typewrite(pwd)
# 点击回车键登录
pag.hotkey('enter')
time.sleep(3)
pag.screenshot(after_enter_login_btn)
# 判断是否出现验证码 (90,135)
res = handle_login_res(loginid)
if not res:
return False
pag.hotkey('enter')
time.sleep(4)
a = win32gui.FindWindow(None, "TIM") # 获取窗口的句柄,参数1: 类名,参数2: 标题QQ
loginid = win32gui.GetWindowPlacement(a)
pag.click(loginid[4][2]-68, loginid[4][1]+29)
# print(68, 29)
return True
def login_cli(acc, pwd, util):
global u, pag, logger, ACC
u = util
ACC = acc
pag = util.pag
logger = logging.getLogger('%s.login_with_tim' % GlobalVal.CUR_MAIN_LOG_NAME)
kill_qq()
return QQ(acc, pwd)
if __name__ == '__main__':
from platform_crawler.utils.utils import Util
login_cli('2823259680', 'Hhmt123456', Util())
| [
"[email protected]"
] | |
eef2258601df458529923175682ced7135be04c4 | e93690e8ac06fd6aa2f7fe7d3ea56978e787e496 | /optimizeDLM/OldFrench/optimizeDependencyLength_FuncHead_Other.py | ef1077c3609c1ffc9a962382e7e2edd717cb1942 | [] | no_license | m-hahn/optimization-landscapes | 8446fbb0ae783f7aa76278e8a5f4cf5e6f4b2cd8 | b16f640dd855a912f52844882b3de701e5b9eca6 | refs/heads/master | 2023-08-12T01:44:18.434912 | 2021-10-03T14:37:11 | 2021-10-03T14:37:11 | 273,661,277 | 0 | 0 | null | 2021-04-15T04:39:52 | 2020-06-20T07:36:12 | TeX | UTF-8 | Python | false | false | 16,951 | py | # Optimizing a grammar for dependency length minimization
import random
import sys
objectiveName = "DepL"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--language', type=str, default="Old_French_2.4")
parser.add_argument('--entropy_weight', type=float, default=0.001)
parser.add_argument('--lr_policy', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
args = parser.parse_args()
myID = random.randint(0,10000000)
posUni = set()
posFine = set()
deps = ["acl", "acl:relcl", "advcl", "advmod", "amod", "appos", "aux", "auxpass", "case", "cc", "ccomp", "compound", "compound:prt", "conj", "conj:preconj", "cop", "csubj", "csubjpass", "dep", "det", "det:predet", "discourse", "dobj", "expl", "foreign", "goeswith", "iobj", "list", "mark", "mwe", "neg", "nmod", "nmod:npmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubjpass", "nummod", "parataxis", "punct", "remnant", "reparandum", "root", "vocative", "xcomp"]
from math import log, exp
from random import random, shuffle
from corpusIterator_FuncHead import CorpusIteratorFuncHead as CorpusIterator_V
originalDistanceWeights = {}
def makeCoarse(x):
if ":" in x:
return x[:x.index(":")]
return x
from collections import defaultdict
docs = defaultdict(int)
def initializeOrderTable():
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
for partition in ["train", "dev"]:
for sentence, metadata in CorpusIterator_V(args.language,partition).iterator():
docs[metadata["newdoc id"]] += 1
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
line["coarse_dep"] = makeCoarse(line["dep"])
depsVocab.add(line["coarse_dep"])
posFine.add(line["posFine"])
posUni.add(line["posUni"])
if line["coarse_dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
dep = line["coarse_dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = dep
keyWithDir = (dep, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
dhLogits = {}
for key in keys:
hd = orderTable.get((key, "HD"), 0) + 1.0
dh = orderTable.get((key, "DH"), 0) + 1.0
dhLogit = log(dh) - log(hd)
dhLogits[key] = dhLogit
return dhLogits, vocab, keys, depsVocab
import torch.nn as nn
import torch
from torch.autograd import Variable
# "linearization_logprobability"
def recursivelyLinearize(sentence, position, result, gradients_from_the_left_sum):
line = sentence[position-1]
# Loop Invariant: these are the gradients relevant at everything starting at the left end of the domain of the current element
allGradients = gradients_from_the_left_sum + sum(line.get("children_decisions_logprobs",[]))
if "linearization_logprobability" in line:
allGradients += line["linearization_logprobability"] # the linearization of this element relative to its siblings affects everything starting at the start of the constituent, but nothing to the left of it
else:
assert line["coarse_dep"] == "root"
# there are the gradients of its children
if "children_DH" in line:
for child in line["children_DH"]:
allGradients = recursivelyLinearize(sentence, child, result, allGradients)
result.append(line)
line["relevant_logprob_sum"] = allGradients
if "children_HD" in line:
for child in line["children_HD"]:
allGradients = recursivelyLinearize(sentence, child, result, allGradients)
return allGradients
import numpy.random
softmax_layer = torch.nn.Softmax()
logsoftmax = torch.nn.LogSoftmax()
def orderChildrenRelative(sentence, remainingChildren, reverseSoftmax, distanceWeights_):
childrenLinearized = []
while len(remainingChildren) > 0:
logits = torch.cat([distanceWeights_[stoi_deps[sentence[x-1]["dependency_key"]]].view(1) for x in remainingChildren])
softmax = softmax_layer(logits.view(1,-1)).view(-1)
selected = numpy.random.choice(range(0, len(remainingChildren)), p=softmax.data.numpy())
log_probability = torch.log(softmax[selected])
assert "linearization_logprobability" not in sentence[remainingChildren[selected]-1]
sentence[remainingChildren[selected]-1]["linearization_logprobability"] = log_probability
childrenLinearized.append(remainingChildren[selected])
del remainingChildren[selected]
if reverseSoftmax:
childrenLinearized = childrenLinearized[::-1]
return childrenLinearized
def orderSentence(sentence, dhLogits, printThings):
sentence, metadata = sentence
root = None
logits = [None]*len(sentence)
logProbabilityGradient = 0
if printThings:
print(metadata["newdoc id"])
dhWeights_ = dhWeights[stoi_docs[metadata["newdoc id"]]]
distanceWeights_ = distanceWeights[stoi_docs[metadata["newdoc id"]]]
for line in sentence:
line["coarse_dep"] = makeCoarse(line["dep"])
if line["coarse_dep"] == "root":
root = line["index"]
if printThings:
print("------ROOT-------")
continue
if line["coarse_dep"].startswith("punct"):
continue
key = line["coarse_dep"]
line["dependency_key"] = key
dhLogit = dhWeights_[stoi_deps[key]]
probability = 1/(1 + torch.exp(-dhLogit))
dhSampled = (random() < probability.data.numpy())
line["ordering_decision_log_probability"] = torch.log(1/(1 + torch.exp(- (1 if dhSampled else -1) * dhLogit)))
direction = "DH" if dhSampled else "HD"
if printThings:
print("\t".join(list(map(str,["ORD", line["index"], (line["word"]+" ")[:10], ("".join(list(key)) + " ")[:22], line["head"],line["head"], dhSampled, direction, (str(probability.data.numpy())+" ")[:8], (str(distanceWeights_[stoi_deps[key]].data.numpy())+" ")[:8] ] ))))
headIndex = line["head"]-1
sentence[headIndex]["children_"+direction] = (sentence[headIndex].get("children_"+direction, []) + [line["index"]])
sentence[headIndex]["children_decisions_logprobs"] = (sentence[headIndex].get("children_decisions_logprobs", []) + [line["ordering_decision_log_probability"]])
for line in sentence:
if "children_DH" in line:
childrenLinearized = orderChildrenRelative(sentence, line["children_DH"][:], False, distanceWeights_)
line["children_DH"] = childrenLinearized
if "children_HD" in line:
childrenLinearized = orderChildrenRelative(sentence, line["children_HD"][:], True, distanceWeights_)
line["children_HD"] = childrenLinearized
linearized = []
recursivelyLinearize(sentence, root, linearized, Variable(torch.FloatTensor([0.0])))
if printThings or len(linearized) == 0:
print(" ".join(map(lambda x:x["word"], sentence)))
print(" ".join(map(lambda x:x["word"], linearized)))
# store new dependency links
moved = [None] * len(sentence)
for i, x in enumerate(linearized):
moved[x["index"]-1] = i
for i,x in enumerate(linearized):
if x["head"] == 0: # root
x["reordered_head"] = 0
else:
x["reordered_head"] = 1+moved[x["head"]-1]
return linearized, logits
dhLogits, vocab, vocab_deps, depsVocab = initializeOrderTable()
print(docs)
print(docs)
times = {'Graal_1225_prose': 1225, 'Aucassin_early13_verse-prose': 1210, 'QuatreLivresReis_late12_prose': 1180, 'TroyesYvain_1180_verse': 1180, 'Roland_1100_verse': 1100, 'BeroulTristan_late12_verse': 1180, 'StLegier_1000_verse': 1000, 'StAlexis_1050_verse': 1050, 'Strasbourg_842_prose': 842, 'Lapidaire_mid12_prose': 1150}
itos_docs = sorted(list(docs), key=lambda x:times[x])
stoi_docs = dict(zip(itos_docs, range(len(itos_docs))))
#time
#for d in docs:
#quit()
posUni = list(posUni)
itos_pos_uni = posUni
stoi_pos_uni = dict(zip(posUni, range(len(posUni))))
posFine = list(posFine)
itos_pos_ptb = posFine
stoi_pos_ptb = dict(zip(posFine, range(len(posFine))))
itos_pure_deps = sorted(list(depsVocab))
stoi_pure_deps = dict(zip(itos_pure_deps, range(len(itos_pure_deps))))
itos_deps = sorted(vocab_deps)
stoi_deps = dict(zip(itos_deps, range(len(itos_deps))))
relevantPath = "../raw-results/manual_output_funchead_coarse_depl_quasiF/"
import os
#files = [x for x in os.listdir(relevantPath) if x.startswith(args.language+"_")]
posCount = 0
negCount = 0
#for name in files:
# with open(relevantPath+name, "r") as inFile:
# for line in inFile:
# line = line.split("\t")
# if line[2] == "obj":
# dhWeight = float(line[1])
# if dhWeight < 0:
# negCount += 1
# elif dhWeight > 0:
# posCount += 1
# break
#
#print(["Neg count", negCount, "Pos count", posCount])
#
#if posCount >= 4 and negCount >= 4:
# print("Enough models!")
# quit()
dhWeights = Variable(torch.FloatTensor([0.0] * len(itos_deps) * len(docs)).view(len(docs), len(itos_deps)), requires_grad=True)
distanceWeights = Variable(torch.FloatTensor([0.0] * len(itos_deps) * len(docs)).view(len(docs), len(itos_deps)), requires_grad=True)
#dhWeights.data[:, stoi_deps["nsubj"]] = 10.0
dhWeights.data[:, stoi_deps["lifted_case"]] = -10.0
dhWeights.data[:, stoi_deps["acl"]] = -10.0
dhWeights.data[:, stoi_deps["lifted_mark"]] = -10.0
#dhWeights.data[:, stoi_deps["xcomp"]] = -10.0
#for i, key in enumerate(itos_deps):
# dhLogits[key] = 0.0
# if key == "obj":
# dhLogits[key] = (10.0 if posCount < negCount else -10.0)
#
# dhWeights.data[i] = dhLogits[key]
#
# originalDistanceWeights[key] = 0.0 #random()
# distanceWeights.data[i] = originalDistanceWeights[key]
words = list(vocab.items())
words = sorted(words, key = lambda x:x[1], reverse=True)
itos = list(map(lambda x:x[0], words))
stoi = dict(zip(itos, range(len(itos))))
if len(itos) > 6:
assert stoi[itos[5]] == 5
vocab_size = 50000
word_embeddings = torch.nn.Embedding(num_embeddings = vocab_size+3, embedding_dim = 1) #.cuda()
pos_u_embeddings = torch.nn.Embedding(num_embeddings = len(posUni)+3, embedding_dim = 1) #.cuda()
pos_p_embeddings = torch.nn.Embedding(num_embeddings = len(posFine)+3, embedding_dim=1) #.cuda()
baseline = nn.Linear(3, 1) #.cuda()
dropout = nn.Dropout(0.5) #.cuda()
components = [word_embeddings, pos_u_embeddings, pos_p_embeddings, baseline] # rnn
def parameters():
for c in components:
for param in c.parameters():
yield param
yield dhWeights
yield distanceWeights
#for pa in parameters():
# print pa
initrange = 0.1
word_embeddings.weight.data.uniform_(-initrange, initrange)
pos_u_embeddings.weight.data.uniform_(-initrange, initrange)
pos_p_embeddings.weight.data.uniform_(-initrange, initrange)
baseline.bias.data.fill_(0)
baseline.weight.data.uniform_(-initrange, initrange)
batchSize = 1
lr_lm = 0.1
crossEntropy = 10.0
def encodeWord(w):
return stoi[w]+3 if stoi[w] < vocab_size else 1
import torch.nn.functional
counter = 0
while True:
corpus = CorpusIterator_V(args.language)
corpus.permute()
corpus = corpus.iterator(rejectShortSentences = False)
for current in corpus:
if counter > 50000000:
print("Quitting at counter "+str(counter))
quit()
counter += 1
printHere = (counter % 50 == 0)
current = [current]
batchOrdered, logits = orderSentence(current[0], dhLogits, printHere)
metadata = current[0][1]
maxLength = len(batchOrdered)
if maxLength <= 2:
print("Skipping extremely short sentence", metadata)
continue
batchOrdered = [batchOrdered]
input_words = []
input_pos_u = []
input_pos_p = []
for i in range(maxLength+2):
input_words.append(list(map(lambda x: 2 if i == 0 else (encodeWord(x[i-1]["word"]) if i <= len(x) else 0), batchOrdered)))
input_pos_u.append(list(map(lambda x: 2 if i == 0 else (stoi_pos_uni[x[i-1]["posUni"]]+3 if i <= len(x) else 0), batchOrdered)))
input_pos_p.append(list(map(lambda x: 2 if i == 0 else (stoi_pos_ptb[x[i-1]["posFine"]]+3 if i <= len(x) else 0), batchOrdered)))
loss = 0
wordNum = 0
lossWords = 0
policyGradientLoss = 0
baselineLoss = 0
for c in components:
c.zero_grad()
for p in [dhWeights, distanceWeights]:
if p.grad is not None:
p.grad.data = p.grad.data.mul(args.momentum)
if True:
words_layer = word_embeddings(Variable(torch.LongTensor(input_words))) #.cuda())
pos_u_layer = pos_u_embeddings(Variable(torch.LongTensor(input_pos_u))) #.cuda())
pos_p_layer = pos_p_embeddings(Variable(torch.LongTensor(input_pos_p))) #.cuda())
inputEmbeddings = dropout(torch.cat([words_layer, pos_u_layer, pos_p_layer], dim=2))
baseline_predictions = baseline(inputEmbeddings)
lossesHead = [[Variable(torch.FloatTensor([0.0]))]*1 for i in range(maxLength+1)]
cudaZero = Variable(torch.FloatTensor([0.0]), requires_grad=False)
for i in range(1,len(input_words)):
for j in range(1):
if input_words[i][j] != 0:
if batchOrdered[j][i-1]["head"] == 0:
realHead = 0
else:
realHead = batchOrdered[j][i-1]["reordered_head"]
if batchOrdered[j][i-1]["dep"] == "root":
continue
# to make sure reward attribution considers this correctly
registerAt = max(i, realHead)
depLength = abs(i - realHead)
assert depLength >= 0
baselineLoss += torch.nn.functional.mse_loss(baseline_predictions[i][j] + baseline_predictions[realHead][j], depLength + cudaZero )
depLengthMinusBaselines = depLength - baseline_predictions[i][j] - baseline_predictions[realHead][j]
lossesHead[registerAt][j] += depLengthMinusBaselines
lossWords += depLength
for i in range(1,len(input_words)):
for j in range(1):
if input_words[i][j] != 0:
policyGradientLoss += batchOrdered[j][-1]["relevant_logprob_sum"] * ((lossesHead[i][j]).detach().cpu())
if input_words[i][j] > 2 and j == 0 and printHere:
print([itos[input_words[i][j]-3], itos_pos_ptb[input_pos_p[i][j]-3], "Cumul_DepL_Minus_Baselines", lossesHead[i][j].data.cpu().numpy()[0], "Baseline Here", baseline_predictions[i][j].data.cpu().numpy()[0]])
wordNum += 1
if wordNum == 0:
print(input_words)
print(batchOrdered)
continue
if printHere:
print(loss/wordNum)
print(lossWords/wordNum)
print(["CROSS ENTROPY", crossEntropy, exp(crossEntropy)])
crossEntropy = 0.99 * crossEntropy + 0.01 * (lossWords/wordNum)
probabilities = torch.sigmoid(dhWeights)
neg_entropy = torch.sum( probabilities * torch.log(probabilities) + (1-probabilities) * torch.log(1-probabilities))
policy_related_loss = args.lr_policy * (args.entropy_weight * neg_entropy + policyGradientLoss) # lives on CPU
policy_related_loss.backward()
loss += baselineLoss # lives on GPU
if loss is 0:
continue
loss.backward()
if printHere:
print("BACKWARD 3 "+__file__+" "+args.language+" "+str(myID)+" "+str(counter))
torch.nn.utils.clip_grad_norm(parameters(), 5.0, norm_type='inf')
for param in parameters():
if param.grad is None:
print("WARNING: None gradient")
continue
param.data.sub_(lr_lm * param.grad.data)
if counter % 10000 == 0:
TARGET_DIR = "../raw-results/"
print("Saving")
with open(TARGET_DIR+"/manual_output_funchead_coarse_depl_quasiF/"+args.language+"_"+__file__+"_model_"+str(myID)+".tsv", "w") as outFile:
print("\t".join(list(map(str,["CoarseDependency", "Document", "DH_Weight","DistanceWeight"]))), file=outFile)
dhWeight = dhWeights.data.numpy()
distanceWeight = distanceWeights.data.numpy()
for i in range(len(itos_deps)):
key = itos_deps[i]
dependency = key
for doc in range(len(itos_docs)):
print("\t".join(list(map(str,[dependency, itos_docs[doc], dhWeight[doc, i], distanceWeight[doc, i]]))), file=outFile)
| [
"[email protected]"
] | |
f786367311655515bd413905975a7193b98e5326 | 1d8624b84243107bcc82876a74917dac983ba67d | /testing/runtests.py | 2651b5b061b99b7f34e063c74255164e50b8a21d | [
"BSD-3-Clause"
] | permissive | nwp90/djorm-ext-pgarray | 14f6877f61975b4a64d3dd601dbd0101fb191918 | 1d0d3db7b3539a8840dcbdaf8322a72aef0875d2 | refs/heads/master | 2021-01-22T00:10:35.570321 | 2013-12-12T20:01:28 | 2013-12-12T20:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | # -*- coding: utf-8 -*-
import os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import call_command
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) == 0:
argv.append("pg_array_fields")
call_command("test", *args, verbosity=2)
| [
"[email protected]"
] | |
177f9385ab21b6e6d7c23d7e1029cab821061e52 | ef9382999c0e37b30923180a56893c01cf626a2e | /tensorflow_probability/python/bijectors/bijector_properties_test.py | 891a3374e6dbfed7a4969ca49f3de88a6283e32a | [
"Apache-2.0"
] | permissive | bolcom/probability | 41ebc9f62caa425e0304881ed60563beb2adea2e | 4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7 | refs/heads/master | 2020-08-02T11:11:20.181219 | 2019-09-27T06:29:14 | 2019-09-27T06:30:18 | 211,330,640 | 0 | 0 | Apache-2.0 | 2019-09-27T13:54:06 | 2019-09-27T13:54:05 | null | UTF-8 | Python | false | false | 18,122 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Property-based tests for TFP bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
flags.DEFINE_enum('tf_mode', 'graph', ['eager', 'graph'],
'TF execution mode to use')
FLAGS = flags.FLAGS
TF2_FRIENDLY_BIJECTORS = (
'AffineScalar',
'BatchNormalization',
# 'CategoricalToDiscrete', TODO(b/137956955): Add support
# for hypothesis testing
'CholeskyOuterProduct',
'Cumsum',
'DiscreteCosineTransform',
'Exp',
'Expm1',
'FillTriangular',
'Gumbel',
'Identity',
'Inline',
'Invert',
'IteratedSigmoidCentered',
'Kumaraswamy',
'MatvecLU',
'NormalCDF',
'Ordered',
'PowerTransform',
'RationalQuadraticSpline',
'Reciprocal',
'ScaleTriL',
'Sigmoid',
'SinhArcsinh',
'Softplus',
'Softsign',
'Square',
'Tanh',
'Weibull',
)
BIJECTOR_PARAMS_NDIMS = {
'AffineScalar': dict(shift=0, scale=0, log_scale=0),
'Gumbel': dict(loc=0, scale=0),
'Kumaraswamy': dict(concentration1=0, concentration0=0),
'MatvecLU': dict(lower_upper=2, permutation=1),
'SinhArcsinh': dict(skewness=0, tailweight=0),
'Softplus': dict(hinge_softness=0),
'RationalQuadraticSpline': dict(bin_widths=1, bin_heights=1, knot_slopes=1),
'Weibull': dict(concentration=0, scale=0),
}
MUTEX_PARAMS = (
set(['scale', 'log_scale']),
)
FLDJ = 'forward_log_det_jacobian'
ILDJ = 'inverse_log_det_jacobian'
INVERT_LDJ = {FLDJ: ILDJ, ILDJ: FLDJ}
NO_LDJ_GRADS_EXPECTED = {
'AffineScalar': dict(shift={FLDJ, ILDJ}),
'BatchNormalization': dict(beta={FLDJ, ILDJ}),
'Gumbel': dict(loc={ILDJ}),
}
def is_invert(bijector):
return isinstance(bijector, tfb.Invert)
# pylint is unable to handle @hps.composite (e.g. complains "No value for
# argument '...' in function call"), so disable this lint for the file.
# pylint: disable=no-value-for-parameter
@hps.composite
def broadcasting_params(draw,
bijector_name,
batch_shape,
event_dim=None,
enable_vars=False):
"""Draws a dict of parameters which should yield the given batch shape."""
params_event_ndims = BIJECTOR_PARAMS_NDIMS.get(bijector_name, {})
def _constraint(param):
return constraint_for(bijector_name, param)
return draw(
tfp_hps.broadcasting_params(
batch_shape,
params_event_ndims,
event_dim=event_dim,
enable_vars=enable_vars,
constraint_fn_for=_constraint,
mutex_params=MUTEX_PARAMS))
@hps.composite
def bijectors(draw, bijector_name=None, batch_shape=None, event_dim=None,
enable_vars=False):
"""Strategy for drawing Bijectors.
The emitted bijector may be a basic bijector or an `Invert` of a basic
bijector, but not a compound like `Chain`.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
bijector_name: Optional Python `str`. If given, the produced bijectors
will all have this type. If omitted, Hypothesis chooses one from
the whitelist `TF2_FRIENDLY_BIJECTORS`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
bijector. Hypothesis will pick one if omitted.
event_dim: Optional Python int giving the size of each of the underlying
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
Returns:
bijectors: A strategy for drawing bijectors with the specified `batch_shape`
(or an arbitrary one if omitted).
"""
if bijector_name is None:
bijector_name = draw(hps.sampled_from(TF2_FRIENDLY_BIJECTORS))
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if bijector_name == 'Invert':
underlying_name = draw(
hps.sampled_from(sorted(set(TF2_FRIENDLY_BIJECTORS) - {'Invert'})))
underlying = draw(
bijectors(
bijector_name=underlying_name,
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars))
return tfb.Invert(underlying, validate_args=True)
if bijector_name == 'Inline':
if enable_vars:
scale = tf.Variable(1., name='scale')
else:
scale = 2.
b = tfb.AffineScalar(scale=scale)
inline = tfb.Inline(
forward_fn=b.forward,
inverse_fn=b.inverse,
forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian( # pylint: disable=g-long-lambda
x, event_ndims=b.forward_min_event_ndims),
forward_min_event_ndims=b.forward_min_event_ndims,
is_constant_jacobian=b.is_constant_jacobian,
)
inline.b = b
return inline
if bijector_name == 'DiscreteCosineTransform':
dct_type = draw(hps.integers(min_value=2, max_value=3))
return tfb.DiscreteCosineTransform(
validate_args=True, dct_type=dct_type)
if bijector_name == 'PowerTransform':
power = draw(hps.floats(min_value=0., max_value=10.))
return tfb.PowerTransform(validate_args=True, power=power)
bijector_params = draw(
broadcasting_params(bijector_name, batch_shape, event_dim=event_dim,
enable_vars=enable_vars))
ctor = getattr(tfb, bijector_name)
return ctor(validate_args=True, **bijector_params)
Support = tfp_hps.Support
def constrain_forward_shape(bijector, shape):
"""Constrain the shape so it is compatible with bijector.forward."""
if is_invert(bijector):
return constrain_inverse_shape(bijector.bijector, shape=shape)
support = bijector_hps.bijector_supports()[
type(bijector).__name__].forward
if support == tfp_hps.Support.VECTOR_SIZE_TRIANGULAR:
# Need to constrain the shape.
shape[-1] = int(shape[-1] * (shape[-1] + 1) / 2)
return shape
def constrain_inverse_shape(bijector, shape):
"""Constrain the shape so it is compatible with bijector.inverse."""
if is_invert(bijector):
return constrain_forward_shape(bijector.bijector, shape=shape)
return shape
@hps.composite
def domain_tensors(draw, bijector, shape=None):
"""Strategy for drawing Tensors in the domain of a bijector.
If the bijector's domain is constrained, this proceeds by drawing an
unconstrained Tensor and then transforming it to fit. The constraints are
declared in `bijectors.hypothesis_testlib.bijector_supports`. The
transformations are defined by `tfp_hps.constrainer`.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
bijector: A `Bijector` in whose domain the Tensors will be.
shape: An optional `TensorShape`. The shape of the resulting
Tensors. Hypothesis will pick one if omitted.
Returns:
tensors: A strategy for drawing domain Tensors for the desired bijector.
"""
if is_invert(bijector):
return draw(codomain_tensors(bijector.bijector, shape))
if shape is None:
shape = draw(tfp_hps.shapes())
bijector_name = type(bijector).__name__
support = bijector_hps.bijector_supports()[bijector_name].forward
if isinstance(bijector, tfb.PowerTransform):
constraint_fn = bijector_hps.power_transform_constraint(bijector.power)
else:
constraint_fn = tfp_hps.constrainer(support)
return draw(tfp_hps.constrained_tensors(constraint_fn, shape))
@hps.composite
def codomain_tensors(draw, bijector, shape=None):
"""Strategy for drawing Tensors in the codomain of a bijector.
If the bijector's codomain is constrained, this proceeds by drawing an
unconstrained Tensor and then transforming it to fit. The constraints are
declared in `bijectors.hypothesis_testlib.bijector_supports`. The
transformations are defined by `tfp_hps.constrainer`.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
bijector: A `Bijector` in whose codomain the Tensors will be.
shape: An optional `TensorShape`. The shape of the resulting
Tensors. Hypothesis will pick one if omitted.
Returns:
tensors: A strategy for drawing codomain Tensors for the desired bijector.
"""
if is_invert(bijector):
return draw(domain_tensors(bijector.bijector, shape))
if shape is None:
shape = draw(tfp_hps.shapes())
bijector_name = type(bijector).__name__
support = bijector_hps.bijector_supports()[bijector_name].inverse
constraint_fn = tfp_hps.constrainer(support)
return draw(tfp_hps.constrained_tensors(constraint_fn, shape))
def assert_no_none_grad(bijector, method, wrt_vars, grads):
for var, grad in zip(wrt_vars, grads):
expect_grad = var.dtype not in (tf.int32, tf.int64)
if 'log_det_jacobian' in method:
if tensor_util.is_ref(var):
# We check tensor_util.is_ref to account for xs/ys being in vars.
var_name = var.name.rstrip('_0123456789:').split('/')[-1]
else:
var_name = '[arg]'
to_check = bijector.bijector if is_invert(bijector) else bijector
to_check_method = INVERT_LDJ[method] if is_invert(bijector) else method
if var_name == '[arg]' and bijector.is_constant_jacobian:
expect_grad = False
exempt_var_method = NO_LDJ_GRADS_EXPECTED.get(type(to_check).__name__, {})
if to_check_method in exempt_var_method.get(var_name, ()):
expect_grad = False
if expect_grad != (grad is not None):
raise AssertionError('{} `{}` -> {} grad for bijector {}'.format(
'Missing' if expect_grad else 'Unexpected', method, var, bijector))
@test_util.run_all_in_graph_and_eager_modes
class BijectorPropertiesTest(test_case.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': bname, 'bijector_name': bname}
for bname in TF2_FRIENDLY_BIJECTORS)
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testBijector(self, bijector_name, data):
if tf.executing_eagerly() != (FLAGS.tf_mode == 'eager'):
return
event_dim = data.draw(hps.integers(min_value=2, max_value=6))
bijector = data.draw(
bijectors(bijector_name=bijector_name, event_dim=event_dim,
enable_vars=True))
# Forward mapping: Check differentiation through forward mapping with
# respect to the input and parameter variables. Also check that any
# variables are not referenced overmuch.
# TODO(axch): Would be nice to get rid of all this shape inference logic and
# just rely on a notion of batch and event shape for bijectors, so we can
# pass those through `domain_tensors` and `codomain_tensors` and use
# `tensors_in_support`. However, `RationalQuadraticSpline` behaves weirdly
# somehow and I got confused.
codomain_event_shape = [event_dim] * bijector.inverse_min_event_ndims
codomain_event_shape = constrain_inverse_shape(
bijector, codomain_event_shape)
shp = bijector.inverse_event_shape(codomain_event_shape)
shp = tensorshape_util.concatenate(
data.draw(
tfp_hps.broadcast_compatible_shape(
shp[:shp.ndims - bijector.forward_min_event_ndims])),
shp[shp.ndims - bijector.forward_min_event_ndims:])
xs = tf.identity(data.draw(domain_tensors(bijector, shape=shp)), name='xs')
wrt_vars = [xs] + [v for v in bijector.trainable_variables
if v.dtype.is_floating]
with tf.GradientTape() as tape:
with tfp_hps.assert_no_excessive_var_usage(
'method `forward` of {}'.format(bijector)):
tape.watch(wrt_vars)
# TODO(b/73073515): Fix graph mode gradients with bijector caching.
ys = bijector.forward(xs + 0)
grads = tape.gradient(ys, wrt_vars)
assert_no_none_grad(bijector, 'forward', wrt_vars, grads)
# FLDJ: Check differentiation through forward log det jacobian with
# respect to the input and parameter variables. Also check that any
# variables are not referenced overmuch.
event_ndims = data.draw(
hps.integers(
min_value=bijector.forward_min_event_ndims,
max_value=xs.shape.ndims))
with tf.GradientTape() as tape:
max_permitted = 2 if hasattr(bijector, '_forward_log_det_jacobian') else 4
if is_invert(bijector):
max_permitted = (2 if hasattr(bijector.bijector,
'_inverse_log_det_jacobian') else 4)
with tfp_hps.assert_no_excessive_var_usage(
'method `forward_log_det_jacobian` of {}'.format(bijector),
max_permissible=max_permitted):
tape.watch(wrt_vars)
# TODO(b/73073515): Fix graph mode gradients with bijector caching.
ldj = bijector.forward_log_det_jacobian(xs + 0, event_ndims=event_ndims)
grads = tape.gradient(ldj, wrt_vars)
assert_no_none_grad(bijector, 'forward_log_det_jacobian', wrt_vars, grads)
# Inverse mapping: Check differentiation through inverse mapping with
# respect to the codomain "input" and parameter variables. Also check that
# any variables are not referenced overmuch.
domain_event_shape = [event_dim] * bijector.forward_min_event_ndims
domain_event_shape = constrain_forward_shape(bijector, domain_event_shape)
shp = bijector.forward_event_shape(domain_event_shape)
shp = tensorshape_util.concatenate(
data.draw(
tfp_hps.broadcast_compatible_shape(
shp[:shp.ndims - bijector.inverse_min_event_ndims])),
shp[shp.ndims - bijector.inverse_min_event_ndims:])
ys = tf.identity(
data.draw(codomain_tensors(bijector, shape=shp)), name='ys')
wrt_vars = [ys] + [v for v in bijector.trainable_variables
if v.dtype.is_floating]
with tf.GradientTape() as tape:
with tfp_hps.assert_no_excessive_var_usage(
'method `inverse` of {}'.format(bijector)):
tape.watch(wrt_vars)
# TODO(b/73073515): Fix graph mode gradients with bijector caching.
xs = bijector.inverse(ys + 0)
grads = tape.gradient(xs, wrt_vars)
assert_no_none_grad(bijector, 'inverse', wrt_vars, grads)
# ILDJ: Check differentiation through inverse log det jacobian with respect
# to the codomain "input" and parameter variables. Also check that any
# variables are not referenced overmuch.
event_ndims = data.draw(
hps.integers(
min_value=bijector.inverse_min_event_ndims,
max_value=ys.shape.ndims))
with tf.GradientTape() as tape:
max_permitted = 2 if hasattr(bijector, '_inverse_log_det_jacobian') else 4
if is_invert(bijector):
max_permitted = (2 if hasattr(bijector.bijector,
'_forward_log_det_jacobian') else 4)
with tfp_hps.assert_no_excessive_var_usage(
'method `inverse_log_det_jacobian` of {}'.format(bijector),
max_permissible=max_permitted):
tape.watch(wrt_vars)
# TODO(b/73073515): Fix graph mode gradients with bijector caching.
xs = bijector.inverse_log_det_jacobian(ys + 0, event_ndims=event_ndims)
grads = tape.gradient(xs, wrt_vars)
assert_no_none_grad(bijector, 'inverse_log_det_jacobian', wrt_vars, grads)
def ensure_nonzero(x):
return tf.where(x < 1e-6, tf.constant(1e-3, x.dtype), x)
CONSTRAINTS = {
'concentration':
tfp_hps.softplus_plus_eps(),
'concentration0':
tfp_hps.softplus_plus_eps(),
'concentration1':
tfp_hps.softplus_plus_eps(),
'hinge_softness':
tfp_hps.softplus_plus_eps(),
'scale':
tfp_hps.softplus_plus_eps(),
'tailweight':
tfp_hps.softplus_plus_eps(),
'AffineScalar.scale':
tfp_hps.softplus_plus_eps(),
'bin_widths':
bijector_hps.spline_bin_size_constraint,
'bin_heights':
bijector_hps.spline_bin_size_constraint,
'knot_slopes':
bijector_hps.spline_slope_constraint,
'lower_upper':
lambda x: tf.linalg.set_diag(x, ensure_nonzero(tf.linalg.diag_part(x))),
'permutation':
lambda x: tf.math.top_k(x, k=x.shape[-1]).indices,
}
def constraint_for(bijector_name=None, param=None):
if param is not None:
return CONSTRAINTS.get('{}.{}'.format(bijector_name, param),
CONSTRAINTS.get(param, tfp_hps.identity_fn))
return CONSTRAINTS.get(bijector_name, tfp_hps.identity_fn)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| [
"[email protected]"
] | |
7957c4004a81dab3bef7da261038849f20e09149 | 7565f8a0b26b97e40494275b90852d2ae1ed0c95 | /project/app/models.py | 6989ef06acc164f36bd9a149ada65aa610474539 | [
"MIT"
] | permissive | iNgredie/advertising-site | 881a3db8410db5cf776a5cdac8e79bb443c9e925 | 7ce1de769d68d920c36c00df262b3d416d208e4b | refs/heads/main | 2023-01-06T20:22:24.749547 | 2020-11-01T19:00:49 | 2020-11-01T19:00:49 | 307,781,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from django.db import models
class Ad(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=1000)
price = models.DecimalField(max_digits=8, decimal_places=2)
photos_urls = models.CharField(max_length=1000)
create_at = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
9aa2b79bc01869cd066b241bdaa8f8f51fd66517 | 7097fe9390eb0841951c6e00f4df6df2266c22cc | /DYAnalysis/AnalysisCode_8TeV/DimuonAnalysis/DYPackage/test/ShapeR/uncertEE_2D.py | c99950559b265c7b123354e0984db972ebde428c | [] | no_license | echapon/pA_DY_8TeV | 35128dc7bcace21197321d01c7a54a67f59a58c9 | 02ead424f33723da7048282dd3a697809ee630b6 | refs/heads/master | 2021-07-24T04:48:27.455575 | 2021-02-18T14:57:49 | 2021-02-18T14:57:49 | 84,451,650 | 0 | 2 | null | 2020-06-16T14:09:34 | 2017-03-09T14:33:24 | C | UTF-8 | Python | false | false | 7,660 | py | #!/usr/bin/env python
from ROOT import *
from math import sqrt, pow
from array import array
import sys, os
from rshape_tools import *
def bambuConverter7_24(h,m_num,m_den):
for ibin_rows in range(1,7):
for ibin_cols in range(24):
if ibin_rows == 6 and ibin_cols > 11: continue
#FIXME what about the last bin?
h.SetBinContent(ibin_cols+1+(ibin_rows-1)*24,100.*sqrt(m_num(ibin_rows,ibin_cols))/m_den(ibin_rows,ibin_cols))
h.SetBinError(ibin_cols+1+(ibin_rows-1)*24,0.)
#print h.GetBinContent(ibin_cols+1+(ibin_rows-1)*24)
#FIXME accept new inouts from Andrius
fAndrius = TFile('../Inputs/sys/table_2D_frac_nBayes1.root')
#
#New source: MC efficiency and pile up
#
heffMC_syst1 = fAndrius.Get('eff_rnd_err')
heffMC_syst = TH1D("effMC_syst","effMC_syst",132,0,132)
for ix in range(heffMC_syst1.GetNbinsX()):
for iy in range(heffMC_syst1.GetNbinsY()):
heffMC_syst.SetBinContent(iy+1+ix*24,100.*sqrt(pow(heffMC_syst1.GetBinContent(ix+1,iy+1),2)))
heffMC_syst.SetBinError(iy+1+ix*24,0.)
#
#New source: pile up
#
hcollCS_syst1 = fAndrius.Get('pileup_err')
hcollCS_syst = TH1D("collCS_syst","collCS_syst",132,0,132)
for ix in range(hcollCS_syst1.GetNbinsX()):
for iy in range(hcollCS_syst1.GetNbinsY()):
hcollCS_syst.SetBinContent(iy+1+ix*24,100.*sqrt(pow(hcollCS_syst1.GetBinContent(ix+1,iy+1),2)))
hcollCS_syst.SetBinError(iy+1+ix*24,0.)
#
#get statistical uncertainty
#
print "Doing stat uncertainty"
fstat1 = TFile('../Inputs/sys/yields_bg-subtracted2D.root')
mstat_full_den = fstat1.Get('YieldsSignal')
fstat2 = TFile('../Inputs/sys/yields2D.root')
mstat_full_num = fstat2.Get('yields_data')
hstat_full = TH1D('hstat_full','hstat_full',132,0,132)
bambuConverter7_24(hstat_full,mstat_full_num, mstat_full_den)
#
#get FSR systematic uncertainty
#
print "Doing FSR syst uncertainty"
#FSR systematics
hsyst_FSR1 = fAndrius.Get("fsr_rnd_err")
hsyst_FSR2 = fAndrius.Get("fsr_model_err")
hsyst_FSR = TH1D('syst_FSR','syst_FSR',132,0,132)
for ix in range(hsyst_FSR1.GetNbinsX()):
for iy in range(hsyst_FSR1.GetNbinsY()):
hsyst_FSR.SetBinContent(iy+1+ix*24,100.*sqrt(pow(hsyst_FSR1.GetBinContent(ix+1,iy+1),2)+pow(hsyst_FSR2.GetBinContent(ix+1,iy+1),2)))
hsyst_FSR.SetBinError(iy+1+ix*24,0.)
#
#get background systematic uncertainty
#
print "Doing background syst uncertainty"
hsyst_bkg0 = fAndrius.Get("bkgr_est_err")
hsyst_bkg = TH1D("syst_bkg","syst_bkg",132,0,132)
for ix in range(hsyst_bkg0.GetNbinsX()):
for iy in range(hsyst_bkg0.GetNbinsY()):
hsyst_bkg.SetBinContent(iy+1+ix*24,100.*hsyst_bkg0.GetBinContent(ix+1,iy+1))
hsyst_bkg.SetBinError(iy+1+ix*24,0.)
#
#get efficiency correction systematics
#
print "Doing eff corr syst uncertainty"
heffcorr_err0 = fAndrius.Get('rho_err')
heffcorr_err = TH1D("effcorr_err","effcorr_err",132,0,132)
for ix in range(heffcorr_err0.GetNbinsX()):
for iy in range(heffcorr_err0.GetNbinsY()):
heffcorr_err.SetBinContent(iy+1+ix*24,100.*heffcorr_err0.GetBinContent(ix+1,iy+1))
heffcorr_err.SetBinError(iy+1+ix*24,0.)
#
#get PDF uncertainty on acceptance (same as dimuons)
#
print "Doing PDF uncertainty"
f_acc_pdf = ROOT.TFile('../Inputs/sys/pdf_syst2D_7TeV.root')
hsys1 = f_acc_pdf.Get('hslice1')
hsys2 = f_acc_pdf.Get('hslice2')
hsys3 = f_acc_pdf.Get('hslice3')
hsys4 = f_acc_pdf.Get('hslice4')
hsys5 = f_acc_pdf.Get('hslice5')
hsys6 = f_acc_pdf.Get('hslice6')
syst_list = [hsys1,hsys2,hsys3,hsys4,hsys5,hsys6]
hacc_pdf = TH1D('hacc_pdf','hacc_pdf',132,0,132)
for ih in range(len(syst_list)):
for ibin in range(syst_list[ih].GetNbinsX()):
#print ih, " ", ibin, " ", 100.*syst_list[ih].GetBinError(ibin+1), " ", ibin+1+ih*syst_list[ih].GetNbinsX()
hacc_pdf.SetBinContent(ibin+1+ih*24,100.*syst_list[ih].GetBinError(ibin+1))
hacc_pdf.SetBinError(ibin+1+ih*24,0.0)
#for ibin in range(hacc_pdf.GetNbinsX()):
# print hacc_pdf.GetBinContent(ibin+1)
#
#get unfolding systematics
#
print "Doing unfolding uncertainty"
hsyst_unf0 = fAndrius.Get("det_resolution_err")
hsyst_unf = TH1D("syst_unf","syst_unf",132,0,132)
for ix in range(hsyst_unf0.GetNbinsX()):
for iy in range(hsyst_unf0.GetNbinsY()):
hsyst_unf.SetBinContent(iy+1+ix*24,100.*hsyst_unf0.GetBinContent(ix+1,iy+1))
hsyst_unf.SetBinError(iy+1+ix*24,0.)
#save stat uncertainty on unfolding for muons
#fout = ROOT.TFile("muon_unf_stat_2D.root","recreate")
##scaling numerator: ok
#hsyst_unf_mu = TH1D('muon_unf_stat','muon_unf_stat',132,0,132)
##scaling denominator ok
#mele_yield = fstat2.Get('yields_data')
#hsyst_unf_ele = TH1D('ele_unf_stat','ele_unf_stat',132,0,132)
#for ibin_rows in range(1,7):
# for ibin_cols in range(24):
# if ibin_rows == 6 and ibin_cols > 11: continue
# #FIXME what about the last bin?
# hsyst_unf_ele.SetBinContent(ibin_cols+1+(ibin_rows-1)*24,mele_yield(ibin_rows,ibin_cols))
# hsyst_unf_ele.SetBinError(ibin_cols+1+(ibin_rows-1)*24,0.)
# #print "XXX ", ibin_cols+1+(ibin_rows-1)*24, " ", hsyst_unf_ele.GetBinContent(ibin_cols+1+(ibin_rows-1)*24)
#
#fraw = ROOT.TFile("../Inputs/rawYield/DYspectrum_Rap_uncorr_2013_tmp_TRMNov.root")
#hmu_yield = fraw.Get("hdata")
#for ibin in range(hsyst_unf_mu.GetNbinsX()):
# #print ibin, " ",
# hsyst_unf_mu.SetBinContent(ibin+1,hsyst_unf.GetBinContent(ibin+1)/sqrt(hmu_yield.GetBinContent(ibin+1)/hsyst_unf_ele.GetBinContent(ibin+1)))
# hsyst_unf_mu.SetBinError(ibin+1,0.)
# print ibin, " XX ", hsyst_unf_mu.GetBinContent(ibin+1)
#
#fout.cd()
#hsyst_unf_mu.Write("muon_unf_stat")
#fout.Close()
#
#get escale systematics (2011)
#
print "Doing escale uncertainty"
hsyst_escale1 = fAndrius.Get("escale_err")
#hsyst_escale2 = fAndrius.Get("unf_escale_res")
hsyst_escale = TH1D('syst_escale','syst_escale',132,0,132)
for ix in range(hsyst_escale1.GetNbinsX()):
for iy in range(hsyst_escale1.GetNbinsY()):
hsyst_escale.SetBinContent(iy+1+ix*24,100.*sqrt(pow(hsyst_escale1.GetBinContent(ibin+1),2))) #+pow(hsyst_escale2.GetBinContent(ibin+1),2)))
hsyst_escale.SetBinError(iy+1+ix*24,0.)
#
#get total xsection systematics as they are filled !
#
print "Doing total uncertainty"
f = TFile("../Outputs/absex_DET2D_PI_Bayesian.root")
thisx = f.Get('hxsec')
this_err = thisx.Clone()
for ibin in range(thisx.GetNbinsX()):
#alternative
this_err.SetBinContent(ibin+1,sqrt(pow(hcollCS_syst.GetBinContent(ibin+1),2)+pow(heffMC_syst.GetBinContent(ibin+1),2)+pow(hsyst_escale.GetBinContent(ibin+1),2)+pow(hsyst_unf.GetBinContent(ibin+1),2)+pow(heffcorr_err.GetBinContent(ibin+1),2)+pow(hsyst_bkg.GetBinContent(ibin+1),2)+pow(hstat_full.GetBinContent(ibin+1),2)+pow(hsyst_FSR.GetBinContent(ibin+1),2)))
#print ibin+1," ",heffMC_syst.GetBinContent(ibin+1)," ",hsyst_escale.GetBinContent(ibin+1)," ",hsyst_unf.GetBinContent(ibin+1)," ",heffcorr_err.GetBinContent(ibin+1)," ",hsyst_bkg.GetBinContent(ibin+1)," ",hstat_full.GetBinContent(ibin+1)," ",hsyst_FSR.GetBinContent(ibin+1)
this_err.SetBinError(ibin+1,0.)
thisx.SetBinError(ibin+1,this_err.GetBinContent(ibin+1))
#print ibin," ",thisx.GetBinContent(ibin+1)
fout = TFile("uncertaintiesEE_2D.root","recreate")
fout.cd()
this_err.Write()
fout.Close()
printHistoIlyaEE_2D(hstat_full, heffcorr_err, hsyst_unf, hsyst_bkg, hsyst_escale, thisx, hacc_pdf,hsyst_FSR, heffMC_syst,hcollCS_syst)
#for ibin in range(hstat_full.GetNbinsX()):
# print ibin+1, ' ', hstat_full.GetBinContent(ibin+1)
#####systematics table
#printHistoStoyanEE_2D(hstat_full, hsyst_escale, heffcorr_err, hsyst_unf, hsyst_bkg, hsyst_FSR, thisx)
#printHisto_2D(thisx)
| [
"[email protected]"
] | |
d1f2ca2daed0f8996c2359f8c6b24f3eaa5d077e | 65e7bde414934cdda16a10f4905cf1b011166f31 | /IntMemo/Parser_test.py | 76dad187ed6fd9ba5ac4307fda49d97f734f1abc | [
"MIT"
] | permissive | Wizmann/IntMemo | b996ce9238352d2cd69648fea455a48ba7a35b9e | 2b1c0cf7895dc02cda9da9e3ec0ddbfcf2305b27 | refs/heads/master | 2020-05-30T04:27:57.971130 | 2015-03-29T05:55:44 | 2015-03-29T05:55:44 | 32,198,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | #coding=utf-8
import unittest
from Parser import parser
from Parser import lexer
memo1 = '''
[Metadata]
date: 2015-03-07
title: 你很重要,打比赛已经不行了。我得去造个轮子
[Tags]
categories: python, angularjs, love
difficulty: 5
[Description]
我觉得我已经没有什么潜力可挖了。突然感到有些腻烦。
对于我来说,并不可以谈生活。因为我见到过巫师的水晶球,我向里面看了一眼。
从此不能自拔。
[Process]
# DO NOT EDIT THE THINGS BELOW UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING
{"date": "2013-01-04", "comment": "Only work no play, make Jake a dull boy."}
'''
class TestParser(unittest.TestCase):
def test_lexer(self):
lexer.input("[maerlyn's]\n[rainbow]")
self.assertEqual('SECTION', lexer.token().type)
self.assertEqual('CR', lexer.token().type)
self.assertEqual('SECTION', lexer.token().type)
def test_parser(self):
result = parser.parse(memo1.strip())
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['section'], '[Metadata]')
self.assertEqual(result[1]['section'], '[Tags]')
self.assertEqual(result[2]['section'], '[Description]')
self.assertEqual(result[3]['section'], '[Process]')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0bfcd3c57377c7d5882b6a53e48b53be08b491e6 | 8a3e7b779676e396853dc1fb22525e501050cffb | /geoist/inversion/hyper_param.py | a22cd8f57912afd7808ebffc70004f317dde3151 | [
"MIT"
] | permissive | CHEN-Zhaohui/geoist | 3a8218105b8bd21d23f3e15e3d20397adf8f571d | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | refs/heads/master | 2021-03-31T19:19:04.472355 | 2020-03-18T03:18:04 | 2020-03-18T03:18:04 | 248,126,521 | 0 | 0 | MIT | 2020-03-18T03:07:54 | 2020-03-18T03:07:53 | null | UTF-8 | Python | false | false | 16,551 | py | r"""
Classes for hyper parameter estimation (like the regularizing parameter).
These classes copy the interface of the standard inversion classes based on
:class:`~geoist.inversion.misfit.Misfit` (i.e.,
``solver.config(...).fit().estimate_``). When their ``fit`` method is called,
they perform many runs of the inversion and try to select the optimal values
for the hyper parameters. The class will then behave as the solver that yields
the best estimate (e.g., ``solver[0].predicted()``).
Available classes:
* :class:`~geoist.inversion.hyper_param.LCurve`: Estimate the regularizing
parameter using an L-curve analysis.
----
"""
import multiprocessing
import numpy
#from ..vis import giplt
import matplotlib as mpl
from .base import OptimizerMixin
class LCurve(OptimizerMixin):
"""
Use the L-curve criterion to estimate the regularization parameter.
Runs the inversion using several specified regularization parameters.
The best value is the one that falls on the corner of the log-log plot of
the data misfit vs regularizing function.
This point is automatically found using the triangle method of
Castellanos et al. (2002).
This class behaves as :class:`~geoist.inversion.base.Misfit`.
To use it, simply call ``fit`` and optionally ``config``.
The estimate will be stored in ``estimate_`` and ``p_``.
The estimated regularization parameter will be stored in ``regul_param_``.
Parameters:
* datamisfit : :class:`~geoist.inversion.base.Misfit`
The data misfit instance for the inverse problem. Can be a sum of other
misfits.
* regul : A class from :mod:`geoist.inversion.regularization`
The regularizing function.
* regul_params : list
The values of the regularization parameter that will be tested.
* loglog : True or False
If True, will use a log-log scale for the L-curve (recommended).
* jobs : None or int
If not None, will use *jobs* processes to calculate the L-curve.
References:
Castellanos, J. L., S. Gomez, and V. Guerra (2002), The triangle method for
finding the corner of the L-curve, Applied Numerical Mathematics, 43(4),
359-373, doi:10.1016/S0168-9274(01)00179-9.
Examples:
We'll use the L-curve to estimate the best regularization parameter for a
smooth inversion using :mod:`geoist.seismic.srtomo`.
First, we'll setup some synthetic data:
>>> import numpy
>>> from geoist.mesher import SquareMesh
>>> from geoist.seismic import ttime2d, srtomo
>>> from geoist.inversion import Smoothness2D, LCurve
>>> from geoist import utils, gridder
>>> area = (0, 2, 0, 2)
>>> shape = (10, 10)
>>> model = SquareMesh(area, shape)
>>> vp = 4*numpy.ones(shape)
>>> vp[3:7,3:7] = 10
>>> vp
array([[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.]])
>>> model.addprop('vp', vp.ravel())
>>> src_loc_x, src_loc_y = gridder.scatter(area, 30, seed=0)
>>> src_loc = numpy.transpose([src_loc_x, src_loc_y])
>>> rec_loc_x, rec_loc_y = gridder.circular_scatter(area, 20,
... random=True, seed=0)
>>> rec_loc = numpy.transpose([rec_loc_x, rec_loc_y])
>>> srcs = [src for src in src_loc for _ in rec_loc]
>>> recs = [rec for _ in src_loc for rec in rec_loc]
>>> tts = ttime2d.straight(model, 'vp', srcs, recs)
>>> tts = utils.contaminate(tts, 0.01, percent=True, seed=0)
Now we can setup a tomography by creating the necessary data misfit
(``SRTomo``) and regularization (``Smoothness2D``) objects. We'll normalize
the data misfit by the number of data points to make the scale of the
regularization parameter more tractable.
>>> mesh = SquareMesh(area, shape)
>>> datamisfit = (1./tts.size)*srtomo.SRTomo(tts, srcs, recs, mesh)
>>> regul = Smoothness2D(mesh.shape)
The tomography solver will be the ``LCurve`` solver. It works by calling
``fit()`` and accessing ``estimate_``, exactly like any other solver:
>>> regul_params = [10**i for i in range(-10, -2, 1)]
>>> tomo = LCurve(datamisfit, regul, regul_params)
>>> _ = tomo.fit()
>>> print(numpy.array_repr(tomo.estimate_.reshape(shape), precision=0))
array([[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 11., 9., 11., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 11., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 11., 10., 11., 9., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.]])
When ``fit()`` is called, the ``LCurve`` will run the inversion for each
value of the regularization parameter, build an l-curve, and find the
best solution (i.e., the corner value of the l-curve).
The ``LCurve`` object behaves like a normal multi-objective function.
In fact, it will try to mirror the objective function that resulted in the
best solution.
You can index it to access the data misfit and regularization parts.
For example, to get the residuals vector or the predicted data:
>>> predicted = tomo[0].predicted()
>>> residuals = tomo[0].residuals()
>>> print '%.4f %.4f' % (residuals.mean(), residuals.std())
-0.0000 0.0047
The estimated regularization parameter is stored in ``regul_param_``:
>>> tomo.regul_param_
1e-05
You can run the l-curve analysis in parallel by specifying the ``njobs``
argument. This will spread the computations over ``njobs`` number of
processes and give some speedup over running sequentially. Note that you
should **not** enable any kind of multi-processes parallelism
on the data misfit class. It is often better to run each inversion
sequentially and run many of them in parallel. Note that you'll enough
memory to run multiple inversions at the same time, so this is not suited
for large, memory hungry inversions.
>>> par_tomo = LCurve(datamisfit, regul, regul_params, njobs=2)
>>> _ = par_tomo.fit() # Will you 2 processes to run inversions
>>> par_tomo.regul_param_
1e-05
>>> print(numpy.array_repr(par_tomo.estimate_.reshape(shape), precision=0))
array([[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 11., 9., 11., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 11., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 11., 10., 11., 9., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.]])
``LCurve`` also has a ``config`` method to configure the optimization
process for non-linear problems, for example:
>>> initial = numpy.ones(mesh.size)
>>> _ = tomo.config('newton', initial=initial, tol=0.2).fit()
>>> tomo.regul_param_
1e-05
>>> print(numpy.array_repr(tomo.estimate_.reshape(shape), precision=0))
array([[ 4., 4., 3., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 12., 9., 11., 10., 4., 4., 4.],
[ 4., 4., 4., 11., 11., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 10., 10., 10., 10., 4., 4., 4.],
[ 4., 4., 4., 11., 10., 11., 9., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 5., 4., 4., 4.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.]])
You can view the optimization information for the run corresponding to the
best estimate using the ``stats_`` attribute:
>>> list(sorted(tomo.stats_))
['iterations', 'method', 'objective']
>>> tomo.stats_['method']
"Newton's method"
>>> tomo.stats_['iterations']
2
"""
def __init__(self, datamisfit, regul, regul_params, loglog=True,
njobs=1):
assert njobs >= 1, "njobs should be >= 1. {} given.".format(njobs)
self.regul_params = regul_params
self.datamisfit = datamisfit
self.regul = regul
self.objectives = None
self.dnorm = None
self.mnorm = None
self.fit_method = None
self.fit_args = None
self.njobs = njobs
self.loglog = loglog
# Estimated parameters from the L curve
self.corner_ = None
def _run_fit_first(self):
"""
Check if a solution was found by running fit.
Will raise an ``AssertionError`` if not.
"""
assert self.corner_ is not None, \
'No optimal solution found. Run "fit" to run the L-curve analysis.'
@property
def regul_param_(self):
"""
The regularization parameter corresponding to the best estimate.
"""
self._run_fit_first()
return self.regul_params[self.corner_]
@property
def objective_(self):
"""
The objective function corresponding to the best estimate.
"""
self._run_fit_first()
return self.objectives[self.corner_]
@property
def stats_(self):
"""
The optimization information for the best solution found.
"""
return self.objective_.stats_
@property
def p_(self):
"""
The estimated parameter vector obtained from the best regularization
parameter.
"""
return self.objective_.p_
def fmt_estimate(self, p):
"""
Return the ``estimate_`` attribute of the optimal solution.
"""
return self.objective_.estimate_
def __getitem__(self, i):
return self.objective_[i]
def fit(self):
"""
Solve for the parameter vector and optimum regularization parameter.
Combines the data-misfit and regularization solvers using the range of
regularization parameters provided and calls ``fit`` and ``config`` on
each.
The ``p_`` and ``estimate_`` attributes correspond to the combination
that falls in the corner of the L-curve.
The regularization parameter for this corner point if stored in the
``regul_param_`` attribute.
Returns:
* self
"""
if self.datamisfit.islinear:
self.datamisfit.jacobian('null')
solvers = [
self.datamisfit + mu * self.regul for mu in self.regul_params]
if self.fit_method is not None:
for solver in solvers:
solver.config(self.fit_method, **self.fit_args)
if self.njobs > 1:
pool = multiprocessing.Pool(self.njobs)
results = pool.map(_fit_solver, solvers)
pool.close()
pool.join()
else:
results = [s.fit() for s in solvers]
self.objectives = results
self.dnorm = numpy.array(
[self.datamisfit.value(s.p_) for s in results])
self.mnorm = numpy.array([self.regul.value(s.p_) for s in results])
self.select_corner()
return self
def _scale_curve(self):
"""
Puts the data-misfit and regularizing function values in the range
[-10, 10].
"""
if self.loglog:
x, y = numpy.log(self.dnorm), numpy.log(self.mnorm)
else:
x, y = self.dnorm, self.mnorm
def scale(a):
vmin, vmax = a.min(), a.max()
l, u = -10, 10
return (((u - l) / (vmax - vmin)) *
(a - (u * vmin - l * vmax) / (u - l)))
return scale(x), scale(y)
def select_corner(self):
"""
Select the corner value of the L-curve formed inversion results.
This is performed automatically after calling the
:meth:`~geoist.inversion.hyper_param.LCurve.fit` method.
You can run this method separately after
:meth:`~geoist.inversion.hyper_param.LCurve.fit` has been called to
tweak the results.
You can access the estimated values by:
* The ``p_`` and ``estimate_`` attributes will hold the estimated
parameter vector and formatted estimate, respective, corresponding
to the corner value.
* The ``regul_param_`` attribute holds the value of the regularization
parameter corresponding to the corner value.
* The ``corner_`` attribute will hold the index of the corner value
in the list of computed solutions.
Uses the Triangle method of Castellanos et al. (2002).
References:
Castellanos, J. L., S. Gomez, and V. Guerra (2002), The triangle method
for finding the corner of the L-curve, Applied Numerical Mathematics,
43(4), 359-373, doi:10.1016/S0168-9274(01)00179-9.
"""
x, y = self._scale_curve()
n = len(self.regul_params)
corner = n - 1
def dist(p1, p2):
"Return the geometric distance between p1 and p2"
return numpy.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
cte = 7. * numpy.pi / 8.
angmin = None
c = [x[-1], y[-1]]
for k in range(0, n - 2):
b = [x[k], y[k]]
for j in range(k + 1, n - 1):
a = [x[j], y[j]]
ab = dist(a, b)
ac = dist(a, c)
bc = dist(b, c)
cosa = (ab ** 2 + ac ** 2 - bc ** 2) / (2. * ab * ac)
ang = numpy.arccos(cosa)
area = 0.5 * ((b[0] - a[0]) * (a[1] - c[1]) -
(a[0] - c[0]) * (b[1] - a[1]))
# area is > 0 because in the paper C is index 0
if area > 0 and (ang < cte and
(angmin is None or ang < angmin)):
corner = j
angmin = ang
self.corner_ = corner
def plot_lcurve(self, ax=None, guides=True):
"""
Make a plot of the data-misfit x regularization values.
The estimated corner value is shown as a blue triangle.
Parameters:
* ax : matplotlib Axes
If not ``None``, will plot the curve on this Axes instance.
* guides : True or False
Plot vertical and horizontal lines across the corner value.
"""
if ax is None:
ax = mpl.gca()
else:
mpl.sca(ax)
x, y = self.dnorm, self.mnorm
if self.loglog:
mpl.loglog(x, y, '.-k')
else:
mpl.plot(x, y, '.-k')
if guides:
vmin, vmax = ax.get_ybound()
mpl.vlines(x[self.corner_], vmin, vmax)
vmin, vmax = ax.get_xbound()
mpl.hlines(y[self.corner_], vmin, vmax)
mpl.plot(x[self.corner_], y[self.corner_], '^b', markersize=10)
mpl.xlabel('Data misfit')
mpl.ylabel('Regularization')
def _fit_solver(solver):
"""
Call ``fit`` on the solver. Needed for multiprocessing.
"""
return solver.fit()
| [
"[email protected]"
] | |
8cba1c54e9b7375b8e3d4b7a6580186abe1b1406 | 1407537a535255e68164d7495786ca2d08f95370 | /backend/home/migrations/0001_load_initial_data.py | 37144435244754865ad3e6f940fa8a1ed3c13eba | [] | no_license | crowdbotics-apps/lucky-thunder-27398 | 4976a8d6e49618ca7885101a5fb75d600c32ff87 | 83b4c2a91c79a1547b7f3263555f7faf833bf201 | refs/heads/master | 2023-05-05T15:20:55.604463 | 2021-05-25T21:36:30 | 2021-05-25T21:36:30 | 370,833,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "lucky-thunder-27398.botics.co"
site_params = {
"name": "Lucky Thunder",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
61dbfba05b6a975dff34da029db039cadfac6fa9 | 58c976db39b69e3f30f1649e4f2c474f8c59224e | /chain/settings.py | 3365db0713189ec5b57ce943ad2ae5a22e20b586 | [
"Apache-2.0"
] | permissive | JennyLJY/chain | 4be485ad72b54bb1c6ec7cda4c3e82e6d33f7797 | f484019a31a65a02f389f2f3aec1aec1b154dc98 | refs/heads/master | 2020-03-09T21:44:29.153850 | 2018-04-10T12:02:23 | 2018-04-10T12:02:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,794 | py | """
Django settings for chain project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os,sys
import djcelery
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'npn1nb&p-eb%rseya)anzsi4uuvk5+enyt1m$_a8&&uy882ak3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'jet.dashboard',
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'asset',
'index',
'tasks',
'rest_framework',
'rest_framework.authtoken',
'djcelery',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chain.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
)
ANONYMOUS_USER_ID = -1
WSGI_APPLICATION = 'chain.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'chain',
# 'USER': 'root',
# 'PASSWORD': '111111',
# 'HOST': '127.0.0.1',
# 'PORT': '3306',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
LOGIN_URL = '/login.html'
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = False # 注意是False 配合下边时间格式
USE_TZ = False # 如果只是内部使用的系统,这行建议为false,不然会有时区问题
DATETIME_FORMAT = 'Y-m-d H:i:s'
DATE_FORMAT = 'Y-m-d'
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = '/hequan/chain/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
DISPLAY_PER_PAGE = 25
#http://www.django-rest-framework.org/api-guide/permissions/#api-reference
#rest-framework 权限分类,现在是默认所有人都可以访问
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.AllowAny',
'rest_framework.permissions.IsAdminUser',
),
}
# webssh
web_ssh = "47.94.252.25"
web_port = 8002
## logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[chain] %(levelname)s %(asctime)s %(module)s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
},
'loggers': {
'tasks': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'asset': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# celery
djcelery.setup_loader()
BROKER_URL = 'redis://127.0.0.1:6379/0' #消息存储数据存储在仓库0
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' # 指定 Backend
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Shanghai'
CELERY_IMPORTS = ('tasks.tasks',)
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler' #这是使用了django-celery默认的数据库调度模型,任务执行周期都被存在你指定的orm数据库中
##jet
JET_DEFAULT_THEME = 'default'
# 主题
JET_THEMES = [
{
'theme': 'default', # theme folder name
'color': '#47bac1', # color of the theme's button in user menu
'title': 'Default' # theme title
},
{
'theme': 'green',
'color': '#44b78b',
'title': 'Green'
},
{
'theme': 'light-green',
'color': '#2faa60',
'title': 'Light Green'
},
{
'theme': 'light-violet',
'color': '#a464c4',
'title': 'Light Violet'
},
{
'theme': 'light-blue',
'color': '#5EADDE',
'title': 'Light Blue'
},
{
'theme': 'light-gray',
'color': '#222',
'title': 'Light Gray'
},
]
# 是否展开所有菜单
JET_SIDE_MENU_COMPACT = True # 菜单不是很多时建议为TRUE
| [
"[email protected]"
] | |
960fe4703e14455f4c229c585910aada0e8aaa45 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/SetResellerUserQuotaRequest.py | 10ffd6b52727da42dc0bc5acd388bea160ba9e09 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,908 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class SetResellerUserQuotaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'SetResellerUserQuota')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Amount(self):
return self.get_query_params().get('Amount')
def set_Amount(self,Amount):
self.add_query_param('Amount',Amount)
def get_OutBizId(self):
return self.get_query_params().get('OutBizId')
def set_OutBizId(self,OutBizId):
self.add_query_param('OutBizId',OutBizId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Currency(self):
return self.get_query_params().get('Currency')
def set_Currency(self,Currency):
self.add_query_param('Currency',Currency) | [
"[email protected]"
] | |
005534cbb812c1a5259eac995b271e47d1de2375 | 4d74341029f12e7e53b6df9d4e17f8a7b1247305 | /infra/subsys/frontend/friendlynamed.py | 5649daca2f2fb0729bcaaa513ac315e620b9aaaa | [] | no_license | dr-natetorious/app-FinSurf | 0a137d1e8dc50b5ba81c2b69e36f89dfb70acdaf | 799710e046626a6a9e753d37af76fbc421e942e4 | refs/heads/master | 2023-02-11T11:47:58.704138 | 2021-01-12T22:30:29 | 2021-01-12T22:30:29 | 315,193,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | #!/usr/bin/env python3
from infra.reusable.context import InfraContext
from infra.reusable.proxyfrontend import LambdaProxyConstruct
from infra.reusable.pythonlambda import PythonLambda
from aws_cdk import (
core,
aws_s3 as s3,
aws_ec2 as ec2,
aws_apigateway as a,
aws_dynamodb as d,
aws_lambda as lambda_,
aws_iam as iam,
aws_kms as kms,
aws_ssm as ssm,
aws_elasticache as ec,
aws_apigateway as a,
aws_route53 as dns,
aws_route53_targets as dns_t,
aws_certificatemanager as acm,
core
)
class FriendlyNamedLayer(core.Construct):
"""
Configure and deploy the network
"""
def __init__(self, scope: core.Construct, id: str, context:InfraContext, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.security_group = ec2.SecurityGroup(self,'FriendlyNamedSvc-SG',
vpc=context.networking.vpc,
allow_all_outbound=True,
description='Security group for FriendlyNamed service components')
self.security_group.add_ingress_rule(
peer= ec2.Peer.any_ipv4(),
connection=ec2.Port(
protocol=ec2.Protocol.TCP,
string_representation='RedisInbound',
from_port=6379, to_port=6379))
self.subnet_group = ec.CfnSubnetGroup(self,'CacheSubnets',
cache_subnet_group_name='FriendlyNamed-Subnets',
description='Subnet groups for FriendlyNamed service',
subnet_ids= [net.subnet_id for net in context.networking.vpc._select_subnet_objects(subnet_group_name='FriendlyNamed')]
)
self.cluster = ec.CfnCacheCluster(self,'FriendlyNamedStore',
cache_node_type= "cache.t2.micro",
engine='redis',
cluster_name='friendly-named',
num_cache_nodes=1,
auto_minor_version_upgrade=True,
cache_subnet_group_name=self.subnet_group.cache_subnet_group_name,
vpc_security_group_ids=[self.security_group.security_group_id])
self.python_lambda = PythonLambda(self,'Friendly-Named',
build_prefix='artifacts/FinSurf-Friendly-Named',
handler='handler.app',
subnet_group_name='FriendlyNamed',
context=context,
securityGroups= [self.security_group])
self.python_lambda.function.add_environment(
key='REDIS_HOST', value=self.cluster.attr_redis_endpoint_address)
self.python_lambda.function.add_environment(
key='REDIS_PORT', value=self.cluster.attr_redis_endpoint_port)
self.frontend_proxy = LambdaProxyConstruct(self,'FriendlyNamedAPI',
handler=self.python_lambda.function,
context=context)
self.url = self.frontend_proxy.rest_api.url
| [
"nate@bachmeier"
] | nate@bachmeier |
8b9590d0cf08be59f1a87430462a08eeb9637b28 | f9c98f9c127fa1cd9fba17abe17199fb5440b36b | /timber_modisette/Python/assn2/assn16.py | 28dd681c2ccf78458e5ef28ad2ee4a4eb0242aeb | [] | no_license | RibRibble/python_april_2017 | 162e543f97afc77d44fcc858106e4730d3f7f760 | 3cc4240d371a8bad8da2ea085e3675272cca2de3 | refs/heads/master | 2021-01-19T01:12:34.667828 | 2017-04-27T22:11:53 | 2017-04-27T22:11:53 | 87,233,010 | 1 | 0 | null | 2017-04-04T20:41:44 | 2017-04-04T20:41:44 | null | UTF-8 | Python | false | false | 450 | py | import random
def scores_grades():
arr = []
for i in range(0,10):
arr.append(random.randint(60,100))
print arr
for x in arr:
if x >= 90:
print "score: ", x,"; your grade is an a"
if x >= 80 and x <=89:
print "score: ", x,"; your grade is a b"
if x >= 70 and x <=79:
print "score: ", x,"; your grade is a c"
if x >=60 and x <=69:
print "score: ", x,"; your grade is a d"
print "end of program goodbye"
scores_grades() | [
"[email protected]"
] | |
8acba408ff4d695cdb50ec45f21070d11914dc9e | bce2731c2d9a81972c4347d07d4ba101a50449e9 | /legacy/lookup_table_generator.py | b691bd44d10534a18039ecf29a5356a67be8d7e6 | [] | no_license | dnarayanan/despotic_lookup_table_generator | 004e99836fea5b59a587944f4ce53cdb81e69ef4 | 5b954d6321b7e336ca8f7147dcabe215961e8e93 | refs/heads/master | 2020-09-25T07:52:48.353428 | 2019-12-04T20:26:23 | 2019-12-04T20:26:23 | 225,954,745 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,376 | py | #NOTES TO DO:
#1. si the velocity dispersion forced to be virialised? If not, need to make sure it is.
# Import the despotic library and various standard python libraries
from despotic import cloud,zonedcloud
from despotic.chemistry import NL99_GC
import despotic
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy import constants as constants
import ipdb,pdb
import pickle
import copy
########################################################################
# User-settable options
########################################################################
#set the column density for 3 clouds that we'll study: units are Msun/pc^2
#column_density = np.array([75,250,1000])* u.Msun/u.pc**2.
column_density = np.linspace(0,3,10)
column_density = 10.**(column_density)*u.Msun/u.pc**2
#set number of radial zones in each cloud
NZONES = 4
metalgrid = np.linspace(1.5,0.1,3) #set up the metallicities
#set the nH grid
nhgrid = np.linspace(0.1,3,10)
nhgrid = 10.**nhgrid
#set the SFR grid
sfrgrid = np.linspace(0,3,10)
sfrgrid = 10.**sfrgrid
#DEBUG
'''
column_density = np.array([100])*u.Msun/u.pc**2
nhgrid = np.array([100])
sfrgrid = np.array([30])
metalgrid = np.array([1])
'''
##################################################e######################
# Program code
########################################################################
class MultiDimList(object):
def __init__(self, shape):
self.shape = shape
self.L = self._createMultiDimList(shape)
def get(self, ind):
if(len(ind) != len(self.shape)): raise IndexError()
return self._get(self.L, ind)
def set(self, ind, val):
if(len(ind) != len(self.shape)): raise IndexError()
return self._set(self.L, ind, val)
def _get(self, L, ind):
return self._get(L[ind[0]], ind[1:]) if len(ind) > 1 else L[ind[0]]
def _set(self, L, ind, val):
if(len(ind) > 1):
self._set(L[ind[0]], ind[1:], val)
else:
L[ind[0]] = val
def _createMultiDimList(self, shape):
return [self._createMultiDimList(shape[1:]) if len(shape) > 1 else None for _ in range(shape[0])]
def __repr__(self):
return repr(self.L)
ncolumns = len(column_density)
nmetals = len(metalgrid)
ndens = len(nhgrid)
nsfr = len(sfrgrid)
obj_list = MultiDimList((nmetals,ncolumns,ndens,nsfr))
CO_lines_list = MultiDimList((nmetals,ncolumns,ndens,nsfr,10))
CI_lines_list = MultiDimList((nmetals,ncolumns,ndens,nsfr,2))
CII_lines_list = MultiDimList((nmetals,ncolumns,ndens,nsfr))
H2_abu_list = MultiDimList((nmetals,ncolumns,ndens,nsfr))
HI_abu_list = MultiDimList((nmetals,ncolumns,ndens,nsfr))
CO_lines_array = np.zeros([nmetals,ncolumns,ndens,nsfr,10])
CI_lines_array = np.zeros([nmetals,ncolumns,ndens,nsfr,2])
CII_lines_array = np.zeros([nmetals,ncolumns,ndens,nsfr])
H2_abu_array = np.zeros([nmetals,ncolumns,ndens,nsfr])
HI_abu_array = np.zeros([nmetals,ncolumns,ndens,nsfr])
CO_intTB_array = np.zeros([nmetals,ncolumns,ndens,nsfr,10])
CI_intTB_array = np.zeros([nmetals,ncolumns,ndens,nsfr,2])
CII_intTB_array = np.zeros([nmetals,ncolumns,ndens,nsfr])
#convert the column densities to CGS
mu = 2.33
column_density_cgs = (column_density/(mu*constants.m_p)).cgs
for nm in range(nmetals):
for nc in range(ncolumns):
for nd in range(ndens):
for nsf in range(nsfr):
print '============================='
print (nm,nc,nd,nsf)
print '============================='
#set up the zoned cloud (a radially stratified cloud)
gmc = zonedcloud(colDen = np.linspace(column_density_cgs[nc].value/NZONES,column_density_cgs[nc].value,NZONES))
#usually we import these via some *.desp file. Here, we
#explicitly say these values as they depend on the metallicity
#as well, and can impact how the CRs and UV radiation can get
#in.
gmc.sigmaD10 = 2.0e-26 * metalgrid[nm] # Cross section to 10K thermal radiation, cm^2 H^-1
gmc.sigmaDPE = 1.0e-21 * metalgrid[nm] # Cross section to 8-13.6 eV photons, cm^2 H^-1
gmc.sigmaDISRF = 3.0e-22 * metalgrid[nm] # Cross section to ISRF photons, cm^2 H^-1
gmc.Zdust = 1.0 * metalgrid[nm] # Dust abundance relative to solar
gmc.alphaGD = 3.2e-34 * metalgrid[nm] # Dust-gas coupling coefficient, erg cm^3 K^-3/2
gmc.beta = 2.0 # Dust spectral index
gmc.dust.sigma10 = 2.0e-26 * metalgrid[nm]
gmc.dust.sigmaPE = 1.0e-21 * metalgrid[nm]
gmc.dust.sigmaISRF = 3.0e-22 * metalgrid[nm]
gmc.dust.Zd = 1.0 * metalgrid[nm]
gmc.dust.alphaGD = 3.2e-34 * metalgrid[nm]
gmc.dust.beta = 2.0
#initalise the emitter abundances
gmc.addEmitter('c+',1.e-100)
gmc.addEmitter('c',2.e-4)
gmc.addEmitter('o', 4.e-4)
gmc.addEmitter('co',1.e-100)
#you'll probably want to play with this to make it dynamically
#depend on the redshift of the slice [gmc.Tcmb = 2.73*(1+z)]
gmc.Tcmb = 2.73
#initialise the abundances for H2 and He, and tell the code to
#extrapolate from the collision tables if you hit a derived
#temperature outside of the Leiden MolData
for nz in range(NZONES):
gmc.comp[nz].xH2 = 0.5
gmc.comp[nz].xHe = 0.1
gmc.emitters[nz]['co'].extrap = True
gmc.emitters[nz]['c+'].extrap = True
gmc.emitters[nz]['o'].extrap = True
gmc.emitters[nz]['c'].extrap = True
#================================================================
#SUBGRID MODEL STUFF FOR CLOUDS
#================================================================
#we put in turbulent compression after zoned_cloud_properties
#since we don't actually want to always scale up the gmc.nh
#(i.e. if tempeq isn't being called) Note, if you call TempEq
#then you have to put in the argument noClump=True, or
#alternatively get rid of the manual turbulent compression of
#densities
gamma = 1.4
cs = np.sqrt(gamma*constants.k_B/mu/constants.m_p*10.*u.K) #assuming temp of 10K
alpha_vir = 1.0 # arbitrary value, assuming cloud is virialized
sigma_vir = np.sqrt(4.0/15.0*np.pi*alpha_vir*constants.G*mu*constants.m_p*column_density_cgs[nc]**2/(nhgrid[nd]/u.cm**3))# assuming spherical cloud
sigma_vir = max(cs,sigma_vir)
sigmaNT = np.sqrt(sigma_vir**2-cs**2)
#assign other properties of clouds
SFR = sfrgrid[nsf]
gmc.nH = nhgrid[nd]
gmc.Td = 10
gmc.Tg = 10
gmc.rad.TradDust = 10
gmc.ionRate = 1.e-17*SFR
gmc.rad.ionRate = 1.e-17*SFR
gmc.chi = 1.*SFR
gmc.rad.chi = 1*SFR
gmc.sigmaNT = np.repeat(sigmaNT.cgs.value,NZONES)
#================================================================
#actually run the chemical equilibrium model. this evolves
#the temperature calcualtion in iteration with the chemistry
#which is slower, but the most right thing to do as it
#simultaneously solves for radiative transfer, chemistry and
#temperature all at once.
try:
gmc.setChemEq(network=NL99_GC, evolveTemp = 'iterate', verbose=True)
gmc.lineLum('co')[0]['lumPerH']
except (despotic.despoticError,ValueError,np.linalg.linalg.LinAlgError,IndexError):
gmc = copy.deepcopy(gmc_old)
gmc_old = copy.deepcopy(gmc)
#calculate the CO and C+ lines since we really don't want to have to do that later
CO_lines_array[nm,nc,nd,nsf,:] = np.array([gmc.lineLum('co')[r]['lumPerH'] for r in range(10)])
CI_lines_array[nm,nc,nd,nsf,:] = np.array([gmc.lineLum('c')[r]['lumPerH'] for r in range(2)])
CII_lines_array[nm,nc,nd,nsf] = gmc.lineLum('c+')[0]['lumPerH']
CO_intTB_array[nm,nc,nd,nsf,:] = np.array([gmc.lineLum('co')[r]['intTB'] for r in range(10)])
CI_intTB_array[nm,nc,nd,nsf,:] = np.array([gmc.lineLum('c')[r]['intTB'] for r in range(2)])
CII_intTB_array[nm,nc,nd,nsf] = gmc.lineLum('c+')[0]['intTB']
H2_abu_array[nm,nc,nd,nsf] = np.average(np.array([gmc.chemabundances_zone[n]['H2'] for n in range(NZONES)]),weights=gmc.mass())
HI_abu_array[nm,nc,nd,nsf] = np.average(np.array([gmc.chemabundances_zone[n]['H'] for n in range(NZONES)]),weights=gmc.mass())
#for i in range(10): CO_lines_list.set((nm,nc,nd,nsf,i),CO_lines[i])
#CII_lines_list.set((nm,nc,nd,nsf),CII_lines)
obj_list.set((nm,nc,nd,nsf),gmc) #DEBUG
np.savez('high_res.npz',column_density = column_density.value,metalgrid = metalgrid,nhgrid = nhgrid,sfrgrid = sfrgrid, CO_lines_array = CO_lines_array, CI_lines_array = CI_lines_array, CII_lines_array = CII_lines_array,CO_intTB_array = CO_intTB_array, CI_intTB_array = CI_intTB_array, CII_intTB_array=CII_intTB_array, H2_abu_array = H2_abu_array, HI_abu_array = HI_abu_array)
'''
filehandler = open("junk.obj","wb")
pickle.dump(obj_list,filehandler)
filehandler.close()
data = [obj_list,CO_lines_list]
with open("high_res.obj","wb") as f:
pickle.dump((obj_list),f)
'''
| [
"none@none"
] | none@none |
5d7b553d8930d9cca8b7c81a61c81612662bdc3c | ea52ecee2fbdac38aa03b7e8bafea0aebe0473da | /light_sabers.py | bd81a61bfc32c6844cfec58328bba06596dce814 | [] | no_license | cooklee/obiektowka | 56e7aa27b4aacff6017731419d6ec90a52d17d74 | 6fa7283bb3e2868205e3eea17257611d4a4f528d | refs/heads/master | 2023-02-21T21:05:01.858270 | 2021-01-27T09:08:35 | 2021-01-27T09:08:35 | 332,677,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | class LightSabres:
types = {'blue':'light', 'green':'light', 'red':'dark'}
power = 20
def __init__(self, color):
if color in LightSabres.types:
self._color = color
a = LightSabres('red')
b = LightSabres('green')
c = LightSabres('blue')
for item in dir(a):
print(item)
print(a.types, b.types,c.types, sep="\n")
a.types= {
1,2,3
}
print(a.types, b.types,c.types, sep="\n")
a.power = 115
print(a.power, b.power, c.power)
| [
"[email protected]"
] | |
1d80804d4bcb0e3c8a7cdf61c6307befbf7776f5 | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/nb/morph_rules.py | b2b906d9c4c1a8abd46022edd60fe2f0dd83d47a | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 19,452 | py | # encoding: utf8
from __future__ import unicode_literals
from ...symbols import LEMMA, PRON_LEMMA
"""
This dict includes all the PRON and DET tag combinations found in the
dataset developed by Schibsted, Nasjonalbiblioteket and LTG (to be published
autumn 2018) and the rarely used polite form.
"""
MORPH_RULES = {
"PRON__Animacy=Anim|Case=Nom|Number=Sing|Person=1|PronType=Prs": {
"jeg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Nom|Number=Sing|Person=2|PronType=Prs": {
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom"},
#polite form, not sure about the tag
"De": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom", "Polite": "Form"}
},
"PRON__Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing|Person=3|PronType=Prs": {
"hun": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Nom|Gender=Masc|Number=Sing|Person=3|PronType=Prs": {
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"}
},
"PRON__Gender=Neut|Number=Sing|Person=3|PronType=Prs": {
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"alt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"intet": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"}
},
"PRON__Gender=Fem,Masc|Number=Sing|Person=3|PronType=Prs": {
"den": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": ("Fem", "Masc")}
},
"PRON__Animacy=Anim|Case=Nom|Number=Plur|Person=1|PronType=Prs": {
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Nom|Number=Plur|Person=2|PronType=Prs": {
"dere": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Nom"}
},
"PRON__Case=Nom|Number=Plur|Person=3|PronType=Prs": {
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Sing|Person=1|PronType=Prs": {
"meg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Sing|Person=2|PronType=Prs": {
"deg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Acc"},
#polite form, not sure about the tag
"Dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Acc", "Polite": "Form"}
},
"PRON__Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing|Person=3|PronType=Prs": {
"henne": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Gender=Masc|Number=Sing|Person=3|PronType=Prs": {
"ham": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Plur|Person=1|PronType=Prs": {
"oss": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"}
},
"PRON__Animacy=Anim|Case=Acc|Number=Plur|Person=2|PronType=Prs": {
"dere": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Acc"}
},
"PRON__Case=Acc|Number=Plur|Person=3|PronType=Prs": {
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"}
},
"PRON__Case=Acc|Reflex=Yes": {
"seg": {LEMMA: PRON_LEMMA, "Person": "Three", "Number": "Sing", "Reflex": "Yes"},
"seg": {LEMMA: PRON_LEMMA, "Person": "Three", "Number": "Plur", "Reflex": "Yes"}
},
"PRON__Animacy=Anim|Case=Nom|Number=Sing|PronType=Prs": {
"man": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Case": "Nom"}
},
"DET__Gender=Masc|Number=Sing|Poss=Yes": {
"min": {LEMMA: "min", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"din": {LEMMA: "din", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"sin": {LEMMA: "sin", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc", "Reflex":"Yes"},
"vår": {LEMMA: "vår", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender":"Masc"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Masc"},
#polite form, not sure about the tag
"Deres": {LEMMA: "Deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Masc", "Polite": "Form"}
},
"DET__Gender=Fem|Number=Sing|Poss=Yes": {
"mi": {LEMMA: "min", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"di": {LEMMA: "din", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"si": {LEMMA: "sin", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem", "Reflex":"Yes"},
"vår": {LEMMA: "vår", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
#polite form, not sure about the tag
"Deres": {LEMMA: "Deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Fem", "Polite": "Form"}
},
"DET__Gender=Neut|Number=Sing|Poss=Yes": {
"mitt": {LEMMA: "min", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"ditt": {LEMMA: "din", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"sitt": {LEMMA: "sin", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut", "Reflex":"Yes"},
"vårt": {LEMMA: "vår", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neut"},
#polite form, not sure about the tag
"Deres": {LEMMA: "Deres", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender":"Neut", "Polite": "Form"}
},
"DET__Number=Plur|Poss=Yes": {
"mine": {LEMMA: "min", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"dine": {LEMMA: "din", "Person": "Two", "Number": "Plur", "Poss": "Yes"},
"hennes": {LEMMA: "hennes", "Person": "Three", "Number": "Plur", "Poss": "Yes"},
"hans": {LEMMA: "hans", "Person": "Three", "Number": "Plur", "Poss": "Yes"},
"sine": {LEMMA: "sin", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex":"Yes"},
"våre": {LEMMA: "vår", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"deres": {LEMMA: "deres", "Person": "Two", "Number": "Plur", "Poss": "Yes"},
"deres": {LEMMA: "deres", "Person": "Three", "Number": "Plur", "Poss": "Yes"}
},
"PRON__Animacy=Anim|Number=Plur|PronType=Rcp": {
"hverandre": {LEMMA: PRON_LEMMA, "PronType": "Rcp", "Number": "Plur"}
},
"DET__Number=Plur|Poss=Yes|PronType=Rcp": {
"hverandres": {LEMMA: "hverandres", "PronType": "Rcp", "Number": "Plur", "Poss": "Yes"}
},
"PRON___": {
"som": {LEMMA: PRON_LEMMA},
"ikkenoe": {LEMMA: PRON_LEMMA}
},
"PRON__PronType=Int": {
"hva": {LEMMA: PRON_LEMMA, "PronType": "Int"}
},
"PRON__Animacy=Anim|PronType=Int": {
"hvem": {LEMMA: PRON_LEMMA, "PronType": "Int"}
},
"PRON__Animacy=Anim|Poss=Yes|PronType=Int": {
"hvis": {LEMMA:PRON_LEMMA, "PronType": "Int", "Poss": "Yes"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"noen": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Plur", "Person": "Three"}
},
"PRON__Gender=Fem,Masc|Number=Sing|Person=3|PronType=Prs": {
"noen": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": ("Fem", "Masc")},
"den": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": ("Fem", "Masc")}
},
"PRON__Gender=Neut|Number=Sing|Person=3|PronType=Prs": {
"noe": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": "Neut"},
"det": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": "Neut"}
},
"PRON__Gender=Fem,Masc|Number=Sing|Person=3|PronType=Prs": {
"ingen": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Person": "Three", "Gender": ("Fem", "Masc"), "Polarity": "Neg"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"ingen": {LEMMA:PRON_LEMMA, "PronType":"Prs", "Number": "Plur", "Person": "Three"}
},
"PRON__Number=Sing": {
"ingenting": {LEMMA:PRON_LEMMA, "Number": "Sing"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"alle": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Plur", "Person": "Three"}
},
"PRON__Animacy=Anim|Number=Sing|PronType=Prs": {
"en": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing"}
},
"PRON__Animacy=Anim|Case=Gen,Nom|Number=Sing|PronType=Prs": {
"ens": {LEMMA:PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Case": ("Gen", "Nom")}
},
"PRON__Animacy=Anim|Case=Gen|Number=Sing|PronType=Prs": {
"ens": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Number": "Sing", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Masc|Number=Sing": {
"ens": {LEMMA: "en", "Number": "Sing", "Case": "Gen"}
},
"DET__Gender=Masc|Number=Sing": {
"enhver": {LEMMA: "enhver", "Number": "Sing", "Gender": "Masc"},
"all": {LEMMA: "all", "Number": "Sing", "Gender": "Masc"},
"hver": {LEMMA: "hver", "Number": "Sing", "Gender": "Masc"}
},
"DET__Gender=Fem|Number=Sing": {
"enhver": {LEMMA: "enhver", "Number": "Sing", "Gender": "Fem"},
"all": {LEMMA: "all", "Number": "Sing", "Gender": "Fem"},
"hver": {LEMMA: "hver", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Neut|Number=Sing": {
"ethvert": {LEMMA: "enhver", "Number": "Sing", "Gender": "Neut"},
"alt": {LEMMA: "all", "Number": "Sing", "Gender": "Neut"},
"hvert": {LEMMA: "hver", "Number": "Sing", "Gender": "Neut"},
},
"DET__Gender=Masc|Number=Sing": {
"noen": {LEMMA: "noen", "Gender": "Masc", "Number": "Sing"},
"noe": {LEMMA: "noen", "Gender": "Masc", "Number": "Sing"}
},
"DET__Gender=Fem|Number=Sing": {
"noen": {LEMMA: "noen", "Gender": "Fem", "Number": "Sing"},
"noe": {LEMMA: "noen", "Gender": "Fem", "Number": "Sing"}
},
"DET__Gender=Neut|Number=Sing": {
"noe": {LEMMA: "noen", "Number": "Sing", "Gender": "Neut"}
},
"DET__Number=Plur": {
"noen": {LEMMA: "noen", "Number": "Plur"}
},
"DET__Gender=Neut|Number=Sing": {
"intet": {LEMMA: "ingen", "Gender": "Neut", "Number": "Sing"}
},
"DET__Gender=Masc|Number=Sing": {
"en": {LEMMA: "en", "Number": "Sing", "Gender": "Neut"}
},
"DET__Gender=Fem|Number=Sing": {
"ei": {LEMMA: "en", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Neut|Number=Sing": {
"et": {LEMMA: "en", "Number": "Sing", "Gender": "Neut"}
},
"DET__Gender=Neut|Number=Sing|PronType=Int": {
"hvilket": {LEMMA: "hvilken", "PronType": "Int", "Number": "Sing", "Gender": "Neut"}
},
"DET__Gender=Fem|Number=Sing|PronType=Int": {
"hvilken": {LEMMA: "hvilken", "PronType": "Int", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Masc|Number=Sing|PronType=Int": {
"hvilken": {LEMMA: "hvilken", "PronType": "Int", "Number": "Sing", "Gender": "Masc"}
},
"DET__Number=Plur|PronType=Int": {
"hvilke": {LEMMA: "hvilken", "PronType": "Int", "Number": "Plur"}
},
"DET__Number=Plur": {
"alle": {LEMMA: "all", "Number": "Plur"}
},
"PRON__Number=Plur|Person=3|PronType=Prs": {
"alle": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Number": "Plur", "Person": "Three"}
},
"DET__Gender=Masc|Number=Sing|PronType=Dem": {
"den": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"},
"slik": {LEMMA: "slik", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"},
"denne": {LEMMA: "denne", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"}
},
"DET__Gender=Fem|Number=Sing|PronType=Dem": {
"den": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"},
"slik": {LEMMA: "slik", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"},
"denne": {LEMMA: "denne", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"}
},
"DET__Gender=Neut|Number=Sing|PronType=Dem": {
"det": {LEMMA: "det", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"},
"slikt": {LEMMA: "slik", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"},
"dette": {LEMMA: "dette", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"}
},
"DET__Number=Plur|PronType=Dem": {
"disse": {LEMMA: "disse", "PronType": "Dem", "Number": "Plur"},
"andre": {LEMMA: "annen", "PronType": "Dem", "Number": "Plur"},
"de": {LEMMA: "de", "PronType": "Dem", "Number": "Plur"},
"slike": {LEMMA: "slik", "PronType": "Dem", "Number": "Plur"}
},
"DET__Definite=Ind|Gender=Masc|Number=Sing|PronType=Dem": {
"annen": {LEMMA: "annen", "PronType": "Dem", "Number": "Sing", "Gender": "Masc"}
},
"DET__Definite=Ind|Gender=Fem|Number=Sing|PronType=Dem": {
"annen": {LEMMA: "annen", "PronType": "Dem", "Number": "Sing", "Gender": "Fem"}
},
"DET__Definite=Ind|Gender=Neut|Number=Sing|PronType=Dem": {
"annet": {LEMMA: "annen", "PronType": "Dem", "Number": "Sing", "Gender": "Neut"}
},
"DET__Case=Gen|Definite=Ind|Gender=Masc|Number=Sing|PronType=Dem": {
"annens": {LEMMA: "annnen", "PronType": "Dem", "Number": "Sing", "Gender": "Masc", "Case": "Gen"}
},
"DET__Case=Gen|Number=Plur|PronType=Dem": {
"andres": {LEMMA: "annen", "PronType": "Dem", "Number": "Plur", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Fem|Number=Sing|PronType=Dem": {
"dens": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Fem", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Masc|Number=Sing|PronType=Dem": {
"hvis": {LEMMA: "hvis", "PronType": "Dem", "Number": "Sing", "Gender": "Masc", "Case": "Gen"},
"dens": {LEMMA: "den", "PronType": "Dem", "Number": "Sing", "Gender": "Masc", "Case": "Gen"}
},
"DET__Case=Gen|Gender=Neut|Number=Sing|PronType=Dem": {
"dets": {LEMMA: "det", "PronType": "Dem", "Number": "Sing", "Gender": "Neut", "Case": "Gen"}
},
"DET__Case=Gen|Number=Plur": {
"alles": {LEMMA: "all", "Number": "Plur", "Case": "Gen"}
},
"DET__Definite=Def|Number=Sing|PronType=Dem": {
"andre": {LEMMA: "annen", "Number": "Sing", "PronType": "Dem"}
},
"DET__Definite=Def|PronType=Dem": {
"samme": {LEMMA: "samme", "PronType": "Dem"},
"forrige": {LEMMA: "forrige", "PronType": "Dem"},
"neste": {LEMMA: "neste", "PronType": "Dem"},
},
"DET__Definite=Def": {
"selve": {LEMMA: "selve"},
"selveste": {LEMMA: "selveste"},
},
"DET___": {
"selv": {LEMMA: "selv"},
"endel": {LEMMA: "endel"}
},
"DET__Definite=Ind|Gender=Fem|Number=Sing": {
"egen": {LEMMA: "egen", "Gender": "Fem", "Number": "Sing"}
},
"DET__Definite=Ind|Gender=Masc|Number=Sing": {
"egen": {LEMMA: "egen", "Gender": "Masc", "Number": "Sing"}
},
"DET__Definite=Ind|Gender=Neut|Number=Sing": {
"eget": {LEMMA: "egen", "Gender": "Neut", "Number": "Sing"}
},
"DET__Number=Plur": {
"egne": {LEMMA: "egen", "Number": "Plur"}
},
"DET__Gender=Masc|Number=Sing": {
"ingen": {LEMMA: "ingen", "Gender": "Masc", "Number": "Sing"}
},
"DET__Number=Plur": {
"ingen": {LEMMA: "ingen", "Number": "Plur"}
},
#same wordform and pos (verb), have to specify the exact features in order to not mix them up
"VERB__Mood=Ind|Tense=Pres|VerbForm=Fin": {
"så": {LEMMA: "så", "VerbForm": "Fin", "Tense": "Pres", "Mood": "Ind"}
},
"VERB__Mood=Ind|Tense=Past|VerbForm=Fin": {
"så": {LEMMA: "se", "VerbForm": "Fin", "Tense": "Past", "Mood": "Ind"}
}
}
#copied from the English morph_rules.py
for tag, rules in MORPH_RULES.items():
for key, attrs in dict(rules).items():
rules[key.title()] = attrs
| [
"[email protected]"
] | |
68ca1ed8ae7d78c949702a6898dced089513f178 | 22279487bee5c983c13887ba11e6a4cd40e8bbe3 | /PreprocessData/all_class_files/BusinessAudience.py | 1d69ab09dfb0c3faa6591e9d1545d3de3d294f62 | [
"MIT"
] | permissive | DylanNEU/Schema | 018c9f683c683068422ed7b6392dcebd4ab4d4cd | 4854720a15894dd814691a55e03329ecbbb6f558 | refs/heads/main | 2023-08-30T01:50:20.541634 | 2021-11-01T15:30:41 | 2021-11-01T15:30:41 | 425,238,713 | 1 | 0 | MIT | 2021-11-06T12:29:12 | 2021-11-06T12:29:11 | null | UTF-8 | Python | false | false | 1,767 | py | from PreprocessData.all_class_files.Audience import Audience
import global_data
class BusinessAudience(Audience):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, audienceType=None, geographicArea=None, numberOfEmployees=None, yearlyRevenue=None, yearsInOperation=None):
Audience.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, audienceType, geographicArea)
self.numberOfEmployees = numberOfEmployees
self.yearlyRevenue = yearlyRevenue
self.yearsInOperation = yearsInOperation
def set_numberOfEmployees(self, numberOfEmployees):
self.numberOfEmployees = numberOfEmployees
def get_numberOfEmployees(self):
return self.numberOfEmployees
def set_yearlyRevenue(self, yearlyRevenue):
self.yearlyRevenue = yearlyRevenue
def get_yearlyRevenue(self):
return self.yearlyRevenue
def set_yearsInOperation(self, yearsInOperation):
self.yearsInOperation = yearsInOperation
def get_yearsInOperation(self):
return self.yearsInOperation
def __setattr__(self, key, value_list):
if type(value_list).__name__ == "NoneType" or key == "node_id":
self.__dict__[key] = value_list
return
for value in value_list:
str_value = type(value).__name__
if str_value not in global_data.get_table()[key]:
raise ValueError("非法类型!")
self.__dict__[key] = value_list
| [
"[email protected]"
] | |
5b19855578f5d80893bb70ce0fb3811426b5ca2b | 8526a11efc8f1d2309033011e9af52049986bf1f | /angular_dockerfile/generate_angular_dockerfile.py | 7a7250e61519ce8731c10e3c31f3b4b5b4066b60 | [] | no_license | Bakushin10/generate-dockerfile | 3ff4ba872d0ab463f14b7d778c054bb893e14703 | d932331527a25c9a527e15329c9d18ffff63fd6b | refs/heads/main | 2023-03-31T17:22:25.918207 | 2021-03-24T08:14:12 | 2021-03-24T08:14:12 | 350,233,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | from dockerfile_generator_interface import dockerfileGeneratorInterface
from pprint import pprint
from enums import Enum
from PyInquirer import style_from_dict, Token, prompt
from PyInquirer import Validator, ValidationError
class GenerateDockerfileForAngular(dockerfileGeneratorInterface):
def __init__(self):
self.server = ""
self.project_name = ""
self.SPECE = ""
self.questions()
self.APP_HOME = "ENV APP_HOME=/{}".format(self.project_name)
self.ENV_PROJECT_NAME = "ENV PROJECT_NAME={}".format(self.project_name)
self.apache_dockerfile = [
"FROM node:12.14.1-slim AS builder",
self.APP_HOME,
"WORKDIR $APP_HOME",
self.SPECE,
"COPY ./package.json package.json",
"RUN npm install",
"COPY . .",
"RUN npm run build",
self.SPECE,
"FROM ubuntu:18.04",
self.ENV_PROJECT_NAME,
"ENV DEBIAN_FRONTEND=noninteractive",
"RUN apt-get update && apt-get install -y \\",
" apache2 \\",
" apache2-utils \\",
self.SPECE,
"COPY --from=builder /dcp/dist/$PROJECT_NAME /var/www/html/$PROJECT_NAME",
"COPY --from=builder /dcp/config/apache/000-default.conf /etc/apache2/sites-available/000-default.conf",
"RUN a2enmod headers",
"EXPOSE 80",
'CMD ["apache2ctl", "-D", "FOREGROUND"]'
]
self.nginx_dockerfile = [
"FROM node:12.14.1-slim AS builder",
self.APP_HOME,
"WORKDIR $APP_HOME",
"COPY ./package.json package.json",
"RUN npm install",
"COPY . .",
"RUN npm run build",
self.SPECE,
"FROM nginx:1.19.0-alpine",
self.ENV_PROJECT_NAME,
"COPY --from=builder /dcp/dist/$PROJECT_NAME /dcp/$PROJECT_NAME",
"COPY --from=builder /dcp/config/nginx/nginx.conf /etc/nginx/nginx.conf",
"EXPOSE 80",
'CMD ["nginx", "-g", "daemon off;"]'
]
def generate(self):
f = open("Dockerfile", "a")
dockerfile = self.apache_dockerfile if self.server == "apache" else self.nginx_dockerfile
for i in range(len(dockerfile)):
f.write(dockerfile[i] + "\n")
f.close()
def questions(self):
style = Enum.style
questions = [
{
'type': 'list',
'name': 'server',
'message': 'What is your prefer web server?',
'choices': ['Apache', 'Nginx'],
'filter': lambda val: val.lower()
},
{
'type': 'input',
'name': 'project_name',
'message': 'What\'s your project name?',
#'validate': PhoneNumberValidator
}
]
answers = prompt(questions, style=style)
self.server = answers["server"]
self.project_name = answers["project_name"] | [
"[email protected]"
] | |
c72b48a9ab714fdd1c3751bdc762b71fe0395bae | 3c8856746c2da97abb50571a1883f8da07707633 | /core/tests/mediawikiversion_tests.py | f6a2185faece910080a9ef0488f196cb415650c1 | [
"MIT"
] | permissive | Tillsa/pywikibot_test_wikidata | 5bb7630c53e04a96f4da352921a55037e80c1c28 | c4b9a1618a5e618305f3abdd359a40f01b14fd90 | refs/heads/master | 2021-01-01T05:24:08.575795 | 2016-04-11T16:01:54 | 2016-04-11T16:01:54 | 55,986,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,771 | py | # -*- coding: utf-8 -*-
"""Tests for the tools.MediaWikiVersion class."""
#
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: c6c3f608d44eb5cd10f06136b4aed6f1c2d1e5a4 $'
from pywikibot.tools import MediaWikiVersion as V
from tests.aspects import unittest, TestCase
class TestMediaWikiVersion(TestCase):
"""Test MediaWikiVersion class comparisons."""
net = False
def _make(self, version):
"""Create a MediaWikiVersion instance and check that the str stays."""
v = V(version)
self.assertEqual(str(v), version)
return v
def test_normal_versions(self):
"""Test comparison between release versions."""
self.assertGreater(self._make('1.23'), self._make('1.22.0'))
self.assertTrue(self._make('1.23') == self._make('1.23'))
self.assertEqual(self._make('1.23'), self._make('1.23'))
def test_wmf_versions(self):
"""Test comparison between wmf versions."""
self.assertGreater(self._make('1.23wmf10'), self._make('1.23wmf9'))
self.assertEqual(self._make('1.23wmf10'), self._make('1.23wmf10'))
def test_combined_versions(self):
"""Test comparison between wmf versions and release versions."""
self.assertGreater(self._make('1.23wmf10'), self._make('1.22.3'))
self.assertGreater(self._make('1.23'), self._make('1.23wmf10'))
def test_non_wmf_scheme(self):
"""Test version numbers not following the wmf-scheme."""
self.assertGreater(self._make('1.23alpha'), self._make('1.22.3'))
self.assertGreater(self._make('1.23alpha'), self._make('1.23wmf1'))
self.assertGreater(self._make('1.23beta1'), self._make('1.23alpha'))
self.assertGreater(self._make('1.23beta2'), self._make('1.23beta1'))
self.assertGreater(self._make('1.23-rc.1'), self._make('1.23beta2'))
self.assertGreater(self._make('1.23-rc.2'), self._make('1.23-rc.1'))
self.assertGreater(self._make('1.23'), self._make('1.23-rc.2'))
self.assertEqual(self._make('1.23rc1'), self._make('1.23-rc.1'))
def _version_check(self, version, digits, dev_version, suffix):
v = self._make(version)
self.assertEqual(v.version, digits)
self.assertEqual(v._dev_version, dev_version)
self.assertEqual(v.suffix, suffix)
def test_interpretation(self):
"""Test if the data is correctly interpreted."""
self._version_check('1.23', (1, 23), (4, ), '')
self._version_check('1.23wmf1', (1, 23), (0, 1), 'wmf1')
self._version_check('1.23alpha', (1, 23), (1, ), 'alpha')
self._version_check('1.27.0-alpha', (1, 27, 0), (1, ), '-alpha')
self._version_check('1.23beta1', (1, 23), (2, 1), 'beta1')
self._version_check('1.23rc1', (1, 23), (3, 1), 'rc1')
self._version_check('1.23-rc1', (1, 23), (3, 1), '-rc1')
self._version_check('1.23-rc.1', (1, 23), (3, 1), '-rc.1')
self._version_check('1.23text', (1, 23), (4, ), 'text')
def test_invalid_versions(self):
"""Verify that insufficient version fail creating."""
self.assertRaises(ValueError, V, 'invalid')
self.assertRaises(ValueError, V, '1number')
self.assertRaises(ValueError, V, '1.missing')
self.assertRaises(AssertionError, V, '1.23wmf-1')
def test_generator(self):
"""Test from_generator classmethod."""
self.assertEqual(V.from_generator('MediaWiki 1.2.3'),
self._make('1.2.3'))
self.assertRaises(ValueError, V.from_generator, 'Invalid 1.2.3')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| [
"[email protected]"
] | |
52f0cbbc67ed8ce99700dbeab99cb58a07941358 | bede0bbf055a7cffc62808cd6ee4654c02e2a2c4 | /PlotConfiguration/ISR/2016/unfolding/unfolding_input/nuisances_muon.py | 5957ef552ada88a3fcb44bd2ff3c658455907e76 | [] | no_license | bhoh/MultiUniv | ec4d94180971542d8c4d69726c4e26a3f90596ef | 1105e8203ce650457bb9cbfb985a90323845c8b0 | refs/heads/master | 2020-04-24T07:33:41.915446 | 2020-02-13T10:13:40 | 2020-02-13T10:13:40 | 171,802,629 | 0 | 0 | null | 2019-06-03T06:49:59 | 2019-02-21T04:54:20 | C | UTF-8 | Python | false | false | 5,245 | py | #nuisances['lumi'] = {
# 'name' : 'lumi_13TeV',
# 'samples' : {
# 'DY' : '1.023',
# }
# 'type' : 'lnN',
# }
########## Efficiency and Energy Scale
trg_syst = ['muon_double_trigSFUp_DoubleMuon_POGTightWithTightIso/muon_double_trigSF_DoubleMuon_POGTightWithTightIso', 'muon_double_trigSFDn_DoubleMuon_POGTightWithTightIso/muon_double_trigSF_DoubleMuon_POGTightWithTightIso']
id_syst = ['muon_double_idSFUp_POGTightWithTightIso/muon_double_idSF_POGTightWithTightIso', 'muon_double_idSFDn_POGTightWithTightIso/muon_double_idSF_POGTightWithTightIso']
iso_syst = ['muon_double_isoSFUp_POGTightWithTightIso/muon_double_isoSF_POGTightWithTightIso','muon_double_isoSFDn_POGTightWithTightIso/muon_double_isoSF_POGTightWithTightIso']
l1prefire_syst = ['evt_weight_l1prefire_up/evt_weight_l1prefire','evt_weight_l1prefire_down/evt_weight_l1prefire']
pileup_syst = ['evt_weight_pureweight_up/evt_weight_pureweight','evt_weight_pureweight_down/evt_weight_pureweight']
alphaS_syst = 'PDFWeights_AlphaS'
pdfScale_syst = 'PDFWeights_Scale'
pdfErr_syst = 'PDFWeights_Error'
#id_syst_ele = ['LepSF'+Nlep+'l_ele_'+eleWP+'_Up', 'LepSF'+Nlep+'l_ele_'+eleWP+'_Do']
nuisances['trig_sf'] = {
'name' : 'trgSF',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : trg_syst ,
'DYJets10to50ToMuMu' : trg_syst ,
'DYJetsToTauTau' : trg_syst ,
'DYJets10to50ToTauTau' : trg_syst ,
'TTLL_powheg' : trg_syst ,
'WJets_MG' : trg_syst ,
'WW_pythia' : trg_syst ,
'WZ_pythia' : trg_syst ,
'ZZ_pythia' : trg_syst ,
},
}
nuisances['id_sf'] = {
'name' : 'IdSF',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : id_syst ,
'DYJets10to50ToMuMu' : id_syst ,
'DYJetsToTauTau' : id_syst ,
'DYJets10to50ToTauTau' : id_syst ,
'TTLL_powheg' : id_syst ,
'WJets_MG' : id_syst ,
'WW_pythia' : id_syst ,
'WZ_pythia' : id_syst ,
'ZZ_pythia' : id_syst ,
},
}
nuisances['iso_sf'] = {
'name' : 'IsoSF',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : iso_syst ,
'DYJets10to50ToMuMu' : iso_syst ,
'DYJetsToTauTau' : iso_syst ,
'DYJets10to50ToTauTau' : iso_syst ,
'TTLL_powheg' : iso_syst ,
'WJets_MG' : iso_syst ,
'WW_pythia' : iso_syst ,
'WZ_pythia' : iso_syst ,
'ZZ_pythia' : iso_syst ,
},
}
nuisances['l1prefire'] = {
'name' : 'L1Prefire',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : l1prefire_syst ,
'DYJets10to50ToMuMu' : l1prefire_syst ,
'DYJetsToTauTau' : l1prefire_syst ,
'DYJets10to50ToTauTau' : l1prefire_syst ,
'TTLL_powheg' : l1prefire_syst ,
'WJets_MG' : l1prefire_syst ,
'WW_pythia' : l1prefire_syst ,
'WZ_pythia' : l1prefire_syst ,
'ZZ_pythia' : l1prefire_syst ,
},
}
nuisances['pileup'] = {
'name' : 'PU',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : pileup_syst ,
'DYJets10to50ToMuMu' : pileup_syst ,
'DYJetsToTauTau' : pileup_syst ,
'DYJets10to50ToTauTau' : pileup_syst ,
'TTLL_powheg' : pileup_syst ,
'WJets_MG' : pileup_syst ,
'WW_pythia' : pileup_syst ,
'WZ_pythia' : pileup_syst ,
'ZZ_pythia' : pileup_syst ,
},
}
nuisances['alphaS'] = {
'name' : 'AlphaS',
'kind' : 'PDF',
'type' : 'alphaS',
'samples' : {
'DYJetsToMuMu' : alphaS_syst ,
'DYJets10to50ToMuMu' : alphaS_syst ,
'DYJetsToTauTau' : alphaS_syst ,
'DYJets10to50ToTauTau' : alphaS_syst ,
'TTLL_powheg' : alphaS_syst ,
'WJets_MG' : alphaS_syst ,
'WW_pythia' : alphaS_syst ,
'WZ_pythia' : alphaS_syst ,
'ZZ_pythia' : alphaS_syst ,
},
}
nuisances['pdfScale'] = {
'name' : 'Scale',
'kind' : 'PDF',
'type' : 'Scale',
'samples' : {
'DYJetsToMuMu' : pdfScale_syst ,
'DYJets10to50ToMuMu' : pdfScale_syst ,
'DYJetsToTauTau' : pdfScale_syst ,
'DYJets10to50ToTauTau' : pdfScale_syst ,
'TTLL_powheg' : pdfScale_syst ,
'WJets_MG' : pdfScale_syst ,
'WW_pythia' : pdfScale_syst ,
'WZ_pythia' : pdfScale_syst ,
'ZZ_pythia' : pdfScale_syst ,
},
}
nuisances['pdfErr'] = {
'name' : 'PDFerror',
'kind' : 'PDF',
'type' : 'HESSIAN',
'samples' : {
'DYJetsToMuMu' : pdfErr_syst ,
'DYJets10to50ToMuMu' : pdfErr_syst ,
'DYJetsToTauTau' : pdfErr_syst ,
'DYJets10to50ToTauTau' : pdfErr_syst ,
'TTLL_powheg' : pdfErr_syst ,
'WJets_MG' : pdfErr_syst ,
'WW_pythia' : pdfErr_syst ,
'WZ_pythia' : pdfErr_syst ,
'ZZ_pythia' : pdfErr_syst ,
},
}
| [
"[email protected]"
] | |
0ccc651bb5faebcb8a57fbc6a17b6476ed21a236 | b28d13b2e785398f1a8074e0034080539009c837 | /django-rest-routers/snippets/urls.py | f9fbae66282440d82cee06df73164e9686a966eb | [] | no_license | sdugaro/django | c58f1c290a1cadf90d723083c1bceefbbac99073 | 1704f1796cb3f25cac260c6120becd70e9f1c33f | refs/heads/main | 2023-02-06T22:06:41.872202 | 2020-12-27T09:04:12 | 2020-12-27T09:04:12 | 311,162,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | from django.urls import path, include
from snippets import views
from rest_framework.routers import DefaultRouter
#from rest_framework.urlpatterns import format_suffix_patterns
# Use a conventional Router from the rest_framework
# instead of designing your own URL configuration.
# Create a DefaultRouter and register viewsets
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
# Include conventional urls derived from Router ViewSets
# Note that the DefaultRouter also determines the api_root
# function based view automatically the view class
urlpatterns = [
#path('', views.api_root),
path('', include(router.urls))
]
# allow for suffixed endpoint redirection
#urlpatterns = format_suffix_patterns(urlpatterns)
| [
"[email protected]"
] | |
bfc1cb8fc68f40887ac4c4db66cfe3d73bc5f6da | 0d8d794d06827aea3ad460bd7ffc58a63911b21d | /Python/Piling Up!.py | 00ad5a646dd183016adf6b9a008d015313180e08 | [] | no_license | IamOmaR22/HackerRank-Problems-Solve-and-Programs-Practice-with-Python | fe1f70d2f791d15636f7a55419fd006bd952f4f5 | c3057bd92c75c771877f9f469361a063b8db0915 | refs/heads/master | 2023-02-22T05:21:35.292396 | 2021-01-25T11:43:05 | 2021-01-25T11:43:05 | 263,082,392 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ in '__main__':
t = int(input())
for i in range(0, t):
n = int(input())
lst = list(map(int, input().split()))
min_index = lst.index(min(lst))
left = lst[ : min_index]
right = lst[min_index : ]
if left == sorted(left, reverse = True) and right == sorted(right, reverse = False):
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
65fa18f14f9c38a6a56da56ca671eaf719578587 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/compareVersions_20200909134459.py | f931ca7d437ba6dcc15d17e76c8b91aeb128789e | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | def compare(version1,version2):
# split where there are ,
# then loop through both of them
# if v1 > v2 return 1
# if v1 < v2 return -1
# otherwise return 0
v1 = [int(i) for i in version1.split(".")]
v2= [int(i) for i in version2.split(".")]
# if len(v1) > len(v2):
# while len(v1) !=len(v2):
# v2.append(0)
# else:
# while len(v1) !=len(v2):
# v1.append(0)
for i in range(len(v1)):
if v1[i] > v2[i] or (v1[i] is not None and v2[i] is None):
return 1
elif v1[i] < v2[i] or (v2[i] is not None and v1[i] is None):
return -1
return 0
print(compare("1.0.1","1"))
| [
"[email protected]"
] | |
6ae0c683f2f431ac46ead2b62106f95b3dc5e8d8 | 0a46b027e8e610b8784cb35dbad8dd07914573a8 | /scripts/venv/lib/python2.7/site-packages/cogent/data/molecular_weight.py | 2f10556a9b24528be62ac1cd7c12065b83fd9cf0 | [
"MIT"
] | permissive | sauloal/cnidaria | bb492fb90a0948751789938d9ec64677052073c3 | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | refs/heads/master | 2021-01-17T13:43:17.307182 | 2016-10-05T14:14:46 | 2016-10-05T14:14:46 | 33,726,643 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | #!/usr/bin/env Python
"""Data for molecular weight calculations on proteins and nucleotides."""
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Rob Knight"
__email__ = "[email protected]"
__status__ = "Production"
ProteinWeights = {
'A': 89.09,
'C': 121.16,
'D': 133.10,
'E': 147.13,
'F': 165.19,
'G': 75.07,
'H': 155.16,
'I': 131.18,
'K': 146.19,
'L': 131.18,
'M': 149.21,
'N': 132.12,
'P': 115.13,
'Q': 146.15,
'R': 174.20,
'S': 105.09,
'T': 119.12,
'V': 117.15,
'W': 204.23,
'Y': 181.19,
'U': 168.06,
}
RnaWeights = {
'A': 313.21,
'U': 290.17,
'C': 289.19,
'G': 329.21,
}
DnaWeights = {
'A': 297.21,
'T': 274.17,
'C': 273.19,
'G': 313.21,
}
ProteinWeightCorrection = 18.0 #terminal residues not dehydrated
DnaWeightCorrection = 61.96 #assumes 5' monophosphate, 3' OH
RnaWeightCorrection = DnaWeightCorrection
class WeightCalculator(object):
"""Calculates molecular weight of a non-degenerate sequence."""
def __init__(self, Weights, Correction):
"""Returns a new WeightCalculator object (class, so serializable)."""
self.Weights = Weights
self.Correction = Correction
def __call__(self, seq, correction=None):
"""Returns the molecular weight of a specified sequence."""
if not seq:
return 0
if correction is None:
correction = self.Correction
get_mw = self.Weights.get
return sum([get_mw(i, 0) for i in seq]) + correction
DnaMW = WeightCalculator(DnaWeights, DnaWeightCorrection)
RnaMW = WeightCalculator(RnaWeights, DnaWeightCorrection)
ProteinMW = WeightCalculator(ProteinWeights, ProteinWeightCorrection)
| [
"[email protected]"
] | |
8691a9bc6ee66bfd8d8db9b0642ffaa84c910880 | 51474e20f976b9d2d85c870386ae8e7b74a98a63 | /mla/fm.py | 85964a99d83bede4a8b36a499f86ffd837c721f7 | [
"MIT"
] | permissive | Fage2016/MLAlgorithms | d191a579d97438cc593d5c1d883d8bdffe0eea78 | 035e489a879d01a84fffff74885dc6b1bca3c96f | refs/heads/master | 2023-03-07T14:50:51.861322 | 2022-01-31T06:13:40 | 2022-01-31T06:13:40 | 73,798,801 | 0 | 0 | MIT | 2023-02-04T23:47:22 | 2016-11-15T09:39:07 | Python | UTF-8 | Python | false | false | 2,594 | py | # coding:utf-8
import autograd.numpy as np
from autograd import elementwise_grad
from mla.base import BaseEstimator
from mla.metrics import mean_squared_error, binary_crossentropy
np.random.seed(9999)
"""
References:
Factorization Machines http://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf
"""
class BaseFM(BaseEstimator):
def __init__(
self, n_components=10, max_iter=100, init_stdev=0.1, learning_rate=0.01, reg_v=0.1, reg_w=0.5, reg_w0=0.0
):
"""Simplified factorization machines implementation using SGD optimizer."""
self.reg_w0 = reg_w0
self.reg_w = reg_w
self.reg_v = reg_v
self.n_components = n_components
self.lr = learning_rate
self.init_stdev = init_stdev
self.max_iter = max_iter
self.loss = None
self.loss_grad = None
def fit(self, X, y=None):
self._setup_input(X, y)
# bias
self.wo = 0.0
# Feature weights
self.w = np.zeros(self.n_features)
# Factor weights
self.v = np.random.normal(scale=self.init_stdev, size=(self.n_features, self.n_components))
self._train()
def _train(self):
for epoch in range(self.max_iter):
y_pred = self._predict(self.X)
loss = self.loss_grad(self.y, y_pred)
w_grad = np.dot(loss, self.X) / float(self.n_samples)
self.wo -= self.lr * (loss.mean() + 2 * self.reg_w0 * self.wo)
self.w -= self.lr * w_grad + (2 * self.reg_w * self.w)
self._factor_step(loss)
def _factor_step(self, loss):
for ix, x in enumerate(self.X):
for i in range(self.n_features):
v_grad = loss[ix] * (x.dot(self.v).dot(x[i])[0] - self.v[i] * x[i] ** 2)
self.v[i] -= self.lr * v_grad + (2 * self.reg_v * self.v[i])
def _predict(self, X=None):
linear_output = np.dot(X, self.w)
factors_output = np.sum(np.dot(X, self.v) ** 2 - np.dot(X ** 2, self.v ** 2), axis=1) / 2.0
return self.wo + linear_output + factors_output
class FMRegressor(BaseFM):
def fit(self, X, y=None):
super(FMRegressor, self).fit(X, y)
self.loss = mean_squared_error
self.loss_grad = elementwise_grad(mean_squared_error)
class FMClassifier(BaseFM):
def fit(self, X, y=None):
super(FMClassifier, self).fit(X, y)
self.loss = binary_crossentropy
self.loss_grad = elementwise_grad(binary_crossentropy)
def predict(self, X=None):
predictions = self._predict(X)
return np.sign(predictions)
| [
"[email protected]"
] | |
29c5d279f4d670c345637bd7f5d6167924532aa7 | cdd79cef15bdf6a0b9098e27028bbe38607bc288 | /深さ優先探索/ABC177_D_Friends_dfs.py | 943a19f60eeb924be107769dc2e7bfa0f536459a | [] | no_license | nord2sudjp/atcoder | ee35a3eb35717485dc62627172de24c9dac102fb | 6b1cc5102a615492cc7ff8a33813bbb954641782 | refs/heads/master | 2023-08-25T11:27:14.205593 | 2021-09-27T05:43:04 | 2021-09-27T05:43:04 | 302,855,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | import sys
sys.setrecursionlimit(1000000)
f=lambda:map(int,input().split())
N,M=f()
if M==0:
print(1)
exit()
G=[set() for _ in range(N+1)]
for _ in range(M):
a,b=f()
G[a].add(b)
G[b].add(a)
# print(G)
d={}
F=[0]*(N+1)
def dfs(i,n):
if F[i]:return
F[i]=1
for g in G[i]:
if F[g]:continue
t=d.get(n, set())
t.add(g)
d[n]=t
dfs(g,n)
for i in range(1,N+1):
dfs(i,i)
print(max(len(i) for i in d.values())+1) | [
"[email protected]"
] | |
85b44a6d09829fe02164b0352254195d4a9b9f10 | 2e98deb1931aca5f69434e85010153b8b4b7f76e | /tests/json-to-yaml.py | 509903970d379fc1d097d98af3568ecece39d6ad | [
"Apache-2.0"
] | permissive | marklap/taurus | 8ec1ff80bbfd3f38f620930e88500b9ff7b3528b | 8a485d05b3890bd842d627e53deccfc2d21eb2b8 | refs/heads/master | 2021-01-15T11:19:24.722950 | 2015-03-31T21:19:09 | 2015-03-31T21:19:09 | 33,209,884 | 0 | 0 | null | 2015-03-31T21:05:18 | 2015-03-31T21:05:18 | null | UTF-8 | Python | false | false | 268 | py | import os
import sys
import tempfile
from bzt.engine import Configuration
fp, filename = tempfile.mkstemp()
os.write(fp, sys.stdin.read())
conf = Configuration()
conf.load([filename])
conf.dump(filename, Configuration.YAML)
sys.stdout.write(open(filename).read()) | [
"[email protected]"
] | |
40a59c771348d96bbfa8ef02b04543eb7b724b1d | f1d2a86b7dd93f4ddafa8961780775a28e7b4508 | /GeneralPractice/1606. Find Servers That Handled Most Number of Requests.py | 879d823415d8ec7fab5bc2919834d48cf4986b64 | [] | no_license | deepika087/CompetitiveProgramming | 76f8c1451fce1a8e3c94656f81a5b04363987dc6 | d40c24736a6fee43b56aa1c80150c5f14be4ff22 | refs/heads/master | 2021-06-12T02:26:22.374506 | 2021-02-20T19:27:57 | 2021-02-20T19:27:57 | 70,208,474 | 10 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | __author__ = 'deepika'
"""
It is one of those problems in which data structue definition is more challenging than the actual code.
Time complexity : O(n log k)
"""
import heapq
class Solution(object):
def busiestServers(self, k, arrival, load):
"""
:type k: int
:type arrival: List[int]
:type load: List[int]
:rtype: List[int]
"""
avail = list(range(k))
count = [0]*k
pq = []
for i, a in enumerate(arrival):
while pq and pq[0][0] <= a:
_, x = heapq.heappop(pq)
heapq.heappush(avail, i + (x-i)%k)
if avail:
poppedServer = heapq.heappop(avail) % k
heapq.heappush(pq, (a + load[i], poppedServer))
count[poppedServer] += 1
max_count = max(count)
return [i for i in range(k) if count[i] == max_count] # this is important. Finding max again and again will increase the time complexity of this problem.
s=Solution()
print(s.busiestServers(k = 3, arrival = [1,2,3,4,5], load = [5,2,3,3,3] ))
| [
"[email protected]"
] | |
6b22025736585ebd7fb592f4545cad2786c6e33d | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/ms_data/datatypes/facets/ncname/ncname_max_length002_xsd/__init__.py | 635209195235b0d6fbe5b2195b93e98c31d1ab97 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 173 | py | from output.models.ms_data.datatypes.facets.ncname.ncname_max_length002_xsd.ncname_max_length002 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| [
"[email protected]"
] | |
5956b9e2ef9c61de2a591591bdfab2f3667d3ebd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_meticulous.py | 4c49fb55ebd4ada7e02a5ab1909db460ca891293 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py |
#calss header
class _METICULOUS():
def __init__(self,):
self.name = "METICULOUS"
self.definitions = [u'very careful and with great attention to every detail: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
d087aa0095c89a0fbc965b19d74bfc369e4e7aae | 0d0afd1dce972b4748ce8faccd992c019794ad9e | /integra/integra_caixa/models/caixa_movimento_base.py | 1041f410fce8c130cc33437f28d907cb150625b6 | [] | no_license | danimaribeiro/odoo-erp | e2ca2cfe3629fbedf413e85f7c3c0453fd16941e | d12577bf7f5266b571cbedeb930720d653320e96 | refs/heads/master | 2020-01-23T21:32:16.149716 | 2016-11-05T15:35:40 | 2016-11-05T15:35:40 | 67,892,809 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,754 | py | # -*- coding: utf-8 -*-
# from __future__ import division, print_function, unicode_literals
from osv import orm, fields
from pybrasil.data import parse_datetime, formata_data, data_hora_horario_brasilia
SALVA = True
class caixa_movimento_base(orm.AbstractModel):
_name = 'caixa.movimento_base'
def _get_data(self, cr, uid, ids, nome_campo, args=None, context={}):
res = {}
for mov_obj in self.browse(cr, uid, ids):
if nome_campo in ['data_abertura', 'dia_abertura', 'mes_abertura', 'ano_abertura', 'dia_abertura_display', 'mes_abertura_display']:
if mov_obj.data_hora_abertura:
data = parse_datetime(mov_obj.data_hora_abertura)
data = data_hora_horario_brasilia(data)
if nome_campo == 'dia_abertura_display':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'dia_abertura':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'mes_abertura_display':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'mes_abertura':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'ano_abertura':
data = formata_data(data, '%Y')
else:
data = False
elif nome_campo in ['data_fechamento', 'dia_fechamento', 'mes_fechamento', 'ano_fechamento', 'dia_fechamento_display', 'mes_fechamento_display']:
if mov_obj.data_hora_fechamento:
data = parse_datetime(mov_obj.data_hora_fechamento)
data = data_hora_horario_brasilia(data)
if nome_campo == 'dia_fechamento_display':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'dia_fechamento':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'mes_fechamento_display':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'mes_fechamento':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'ano_fechamento':
data = formata_data(data, '%Y')
else:
data = False
res[mov_obj.id] = data
return res
_columns = {
'data_hora_abertura': fields.datetime(u'Data de abertura', required=True, select=True),
'data_abertura': fields.function(_get_data, type='date', string=u'Data de abertura', store=SALVA, select=True),
'dia_abertura': fields.function(_get_data, type='char', string=u'Dia de abertura', store=SALVA, select=True),
'mes_abertura': fields.function(_get_data, type='char', string=u'Mês de abertura', store=SALVA, select=True),
'ano_abertura': fields.function(_get_data, type='char', string=u'Ano de abertura', store=SALVA, select=True),
'data_hora_fechamento': fields.datetime(u'Data de fechamento', select=True),
'data_fechamento': fields.function(_get_data, type='date', string=u'Data de fechamento', store=SALVA, select=True),
'dia_fechamento': fields.function(_get_data, type='char', string=u'Dia de fechamento', store=SALVA, select=True),
'mes_fechamento': fields.function(_get_data, type='char', string=u'Mês de fechamento', store=SALVA, select=True),
'ano_fechamento': fields.function(_get_data, type='char', string=u'Ano de fechamento', store=SALVA, select=True),
}
_defaults = {
'data_hora_abertura': fields.datetime.now,
}
caixa_movimento_base()
| [
"[email protected]"
] | |
90ea2dcaf29279ed9722bafe95c7dad77cbb51f4 | ac45b55915e634815922329195c203b1e810458c | /minionOC1304_11.py | 34d1dd610f25e9bc1d383859f22f0c01ec4c984d | [] | no_license | mj1e16lsst/iridisPeriodicNew | 96a8bfef0d09f13e18adb81b89e25ae885e30bd9 | dc0214b1e702b454e0cca67d4208b2113e1fbcea | refs/heads/master | 2020-03-23T15:01:23.583944 | 2018-07-23T18:58:59 | 2018-07-23T18:58:59 | 141,715,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,181 | py | from operator import add
#from astropy import units as u
#from astropy.coordinates import SkyCoord
#from astropy.stats import LombScargle
#from gatspy.periodic import LombScargleFast
from functools import partial
#from gatspy import periodic
#import matplotlib.pyplot as plt
#from matplotlib.font_manager import FontProperties
import lomb_scargle_multiband as periodic
from multiprocessing import Pool
import numpy as np
import os
#from sqlite3 import *
import random
from random import shuffle
from random import randint
import Observations
import Magnitudes
# In[13]:
#conn = connect('minion_1016_sqlite.db')
#conn = connect('astro_lsst_01_1004_sqlite.db')
#conn = connect('minion_1020_sqlite.db')
# In[14]:
# LSST zero points u,g,r,i,z,y
zeroPoints = [0,26.5,28.3,28.13,27.79,27.4,26.58]
FWHMeff = [0.8,0.92,0.87,0.83,0.80,0.78,0.76] # arcmins?
pixelScale = 0.2
readOut = 12.7
sigSys = 0.005
flareperiod = 4096
flarecycles = 10
dayinsec=86400
background = 40
# sat mag u,g,r,i,z,y=14.7,15.7,15.8,15.8,15.3 and 13.9
# start date 59580.033829 end date + 10 years
#maglist=[20]*7
lim = [0, 23.5, 24.8, 24.4, 23.9, 23.3, 22.1] # limiting magnitude ugry
sat = [0, 14.7, 15.7, 15.8, 15.8, 15.3, 13.9] # sat mag as above
# In[15]:
looooops = 10000
maglength = 20
freqlength = 20
processors = 20
startnumber = 0 + 11
endnumber = startnumber + 1
#observingStrategy = 'minion'
observingStrategy = 'astroD'
#observingStrategy = 'panstars'
inFile = '/home/mj1e16/periodic/in'+str(startnumber)+'.txt'
outFile = '/home/mj1e16/periodic/outminionOC1304'+str(startnumber)+'.txt'
#inFile = '/home/ubuntu/vagrant/'+observingStrategy+'/in'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
#outFile = '/home/ubuntu/vagrant/'+observingStrategy+'/out'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
obs = Observations.obsminionOC1304
for y in range(len(obs)):
for x in range(len(obs[y])):
obs[y][x] = obs[y][x] + ((random.random()*2.)-1.)
# In[19]:
def magUncertainy(Filter, objectmag, exposuretime,background, FWHM): # b is background counts per pixel
countsPS = 10**((Filter-objectmag)/2.5)
counts = countsPS * exposuretime
uncertainty = 1/(counts/((counts/2.3)+(((background/2.3)+(12.7**2))*2.266*((FWHM/0.2)**2)))**0.5) # gain assumed to be 1
return uncertainty
#from lsst should have got the website! https://smtn-002.lsst.io/
# In[20]:
def averageFlux(observations, Frequency, exptime):
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
return c
def Flux(observations,Frequency,exptime):
a = [np.sin((2*np.pi*(Frequency)*x)) for x in observations]
return a
# In[21]:
def ellipsoidalFlux(observations, Frequency,exptime):
period = 1/(Frequency)
phase = [(x % (2*period)) for x in observations]
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
for x in range(0,len(phase)):
if (phase[x]+(1.5*period)) < (3*period):
c[x] = c[x]*(1./3.)
else:
c[x] = c[x]*(2./3.)
return c
## this is doing something but not the right something, come back to it
# In[22]:
def flaring(B, length, dayinsec=86400,amplitude=1):
global flareMag, minutes
fouriers = np.linspace(0.00001,0.05,(dayinsec/30))
logF = [np.log(x) for x in fouriers] # start at 30 go to a day in 30 sec increments
real = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers] #random.gauss(mu,sigma) to change for values from zurita
# imaginary = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers]
IFT = np.fft.ifft(real)
seconds = np.linspace(0,dayinsec, (dayinsec/30)) # the day in 30 sec increments
minutes = [x for x in seconds]
minimum = (np.max(-IFT))
positive = [x + minimum for x in IFT] # what did this even achieve? it helped with normalisation!
normalised = [x/(np.mean(positive)) for x in positive] # find normalisation
normalisedmin = minimum/(np.mean(positive))
normalised = [x - normalisedmin for x in normalised]
flareMag = [amplitude * x for x in normalised] # normalise to amplitude
logmins = [np.log(d) for d in minutes] # for plotting?
# plt.plot(minutes,flareMag)
# plt.title('lightcurve')
# plt.show()
return flareMag
# In[55]:
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list
#global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel
results = {}
totperiod = []
totmperiod = []
totpower = [] # reset
SigLevel = []
filterletter = ['o','u','g','r','i','z','y']
period = 1/(frequencyRange)
if period > 0.5:
numsteps = 10000
elif period > 0.01:
numsteps = 100000
else:
numsteps = 200000
freqs = fmin + df * np.arange(numsteps) # for manuel
allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset
measuredpower = [] # reset
y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only
for z in range(1, len(y)):
#y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range
y[z] = ellipsoidalFlux(obs[z], frequencyRange,30)
y[z] = [modulationAmplitude * t for t in y[z]] # scaling
for G in range(0, len(y[z])):
flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles)))
y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs
date = []
amplitude = []
mag = []
error = []
filts = []
for z in range(1, len(y)):
if objectmag[z] > sat[z] and objectmag[z] < lim[z]:
#date.extend([x for x in obs[z]])
date.extend(obs[z])
amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise
mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag
error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude))
filts.extend([filterletter[z]]*len(amplitude))
phase = [(day % (period*2))/(period*2) for day in obs[z]]
pmag = [objectmag[z] - t for t in amplitude]
# plt.plot(phase, pmag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
# plt.plot(date, mag, 'o')
# plt.xlim(lower,higher)
# plt.xlabel('time (days)')
# plt.ylabel('mag')
# plt.gca().invert_yaxis()
# plt.show()
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
if period > 10.:
model.optimizer.period_range=(10, 110)
elif period > 0.51:
model.optimizer.period_range=(0.5, 10)
elif period > 0.011:
model.optimizer.period_range=(0.01, 0.52)
else:
model.optimizer.period_range=(0.0029, 0.012)
LSperiod = model.best_period
if period < 10:
higher = 10
else:
higher = 100
# fig, ax = plt.subplots()
# ax.plot(1./freqs, power)
# ax.set(xlim=(0, higher), ylim=(0, 1.2),
# xlabel='period (days)',
# ylabel='Lomb-Scargle Power',
# title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)));
# plt.show()
phase = [(day % (period*2))/(period*2) for day in date]
#idealphase = [(day % (period*2))/(period*2) for day in dayZ]
#print(len(phase),len(idealphase))
#plt.plot(idealphase,Zmag,'ko',)
# plt.plot(phase, mag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
#print(period, LSperiod, period*20)
# print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos]))
# print(frequencyRange[frange], 'z', z)
# totperiod.append(period)
# totmperiod.append(np.mean(LSperiod))
# totpower.append(power.max())
mpower = power.max()
measuredpower.append(power.max()) # should this correspond to period power and not max power?
maxpower = []
counter = 0.
for loop in range(0,loopNo):
random.shuffle(date)
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
maxpower.append(power.max())
for X in range(0, len(maxpower)):
if maxpower[X] > measuredpower[-1]:
counter = counter + 1.
Significance = (1.-(counter/len(maxpower)))
#print('sig', Significance, 'counter', counter)
SigLevel.append(Significance)
#freqnumber = FrangeLoop.index(frequencyRange)
#magnumber = MagRange.index(objectmag)
#print(fullmaglist)
#listnumber = (magnumber*maglength)+freqnumber
# print(listnumber)
# measuredperiodlist[listnumber] = LSperiod
# periodlist[listnumber] = period
# powerlist[listnumber] = mpower
# siglist[listnumber] = Significance
# fullmaglist[listnumber] = objectmag
# results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber
results[0] = objectmag[3]
results[1] = period
results[2] = LSperiod
results[3] = Significance
results[4] = mpower
results[5] = 0#listnumber
return results
# In[24]:
#findObservations([(630,)])
#remove25(obs)
#averageFlux(obs[0], 1, 30)
longflare = []
for floop in range(0,flarecycles):
flareone = flaring(-1, flareperiod, amplitude=0.3)
flareone = flareone[0:1440]
positiveflare = [abs(x) for x in flareone]
longflare.extend(positiveflare)
# In[25]:
PrangeLoop = np.logspace(-2.5,2,freqlength)
FrangeLoop = [(1/x) for x in PrangeLoop]
# In[26]:
# reset results file
with open(inFile,'w') as f:
f.write('fullmaglist \n\n periodlist \n\n measuredperiodlist \n\n siglist \n\n powerlist \n\n listnumberlist \n\n end of file')
# In[57]:
results = []
fullmeasuredPeriod = []
fullPeriod = []
fullPower = []
fullSigLevel = []
fullMag = []
MagRangearray = np.linspace(17,24,maglength)
MagRange = [x for x in MagRangearray]
maglist = []
for x in range(len(MagRange)):
maglist.append([MagRange[x]]*7)
newlist = Magnitudes.mag1304
pool = Pool(processors)
for h in range(startnumber,endnumber):
print(newlist[h])
results.append(pool.map(partial(lombScargle, objectmag=newlist[h]),FrangeLoop))
twoDlist = [[],[],[],[],[],[]]
for X in range(len(results)):
for Y in range(len(results[X])):
twoDlist[0].append(results[X][Y][0])
twoDlist[1].append(results[X][Y][1])
twoDlist[2].append(results[X][Y][2])
twoDlist[3].append(results[X][Y][3])
twoDlist[4].append(results[X][Y][4])
twoDlist[5].append(results[X][Y][5])
with open(inFile, 'r') as istr:
with open(outFile,'w') as ostr:
for i, line in enumerate(istr):
# Get rid of the trailing newline (if any).
line = line.rstrip('\n')
if i % 2 != 0:
line += str(twoDlist[int((i-1)/2)])+','
ostr.write(line+'\n')
| [
"[email protected]"
] | |
bd2595569bf3a56f1f02f986ee1e36549ef16666 | 62ea331d8da218e65a4aee517f4473110f80c03c | /bonus_points/migrations/0012_remove_userbonussummary_bonus_points_added.py | 3abf58d99c8ead844f6402388f99f4e7c39dbc8f | [] | no_license | maddrum/world_cup_results | 11f47a1b0f9a68a0761c7d83d25cc1efb57c2240 | 282d8f55344ba718ea371a22f34454673f23a615 | refs/heads/master | 2020-03-20T05:40:44.173185 | 2018-07-16T13:12:15 | 2018-07-16T13:12:15 | 136,724,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # Generated by Django 2.0.2 on 2018-07-08 12:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bonus_points', '0011_userbonussummary_bonus_points_added'),
]
operations = [
migrations.RemoveField(
model_name='userbonussummary',
name='bonus_points_added',
),
]
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.