blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e73aff3599ba2371422b1ab8a30e84e8a98a2ad1 | 74649c1220c68ad0af79e420d572e3769fcd7a53 | /_unittests/ut_cli/test_cli_validate_bench_doc.py | 4a3937dc7771228ee5b515f55a9fc32778a4db1d | [
"MIT"
] | permissive | sdpython/mlprodict | e62edcb428700cb2c4527e54e96431c1d2b36118 | 27d6da4ecdd76e18292f265fde61d19b66937a5c | refs/heads/master | 2023-05-08T10:44:30.418658 | 2023-03-08T22:48:56 | 2023-03-08T22:48:56 | 112,469,804 | 60 | 13 | MIT | 2023-04-19T01:21:38 | 2017-11-29T11:57:10 | Python | UTF-8 | Python | false | false | 1,157 | py | """
@brief test tree node (time=42s)
"""
import os
import unittest
from pyquickhelper.loghelper import BufferedPrint
from pyquickhelper.pycode import (
ExtTestCase, get_temp_folder, ignore_warnings)
from mlprodict.__main__ import main
class TestCliValidateBenchDoc(ExtTestCase):
@ignore_warnings(UserWarning)
def test_cli_validate_bench_doc_help(self):
st = BufferedPrint()
main(args=["benchmark_doc", "--help"], fLOG=st.fprint)
res = str(st)
self.assertIn("verbose", res)
@ignore_warnings(UserWarning)
def test_cli_validate_bench_doc(self):
temp = get_temp_folder(__file__, "temp_bench_doc")
out1 = os.path.join(temp, "raw.xlsx")
out2 = os.path.join(temp, "sum.csv")
st = BufferedPrint()
main(args=["benchmark_doc", "-o", out1, "-ou", out2, "-w",
"LinearRegression", '-d', temp,
'-r', 'python_compiled'],
fLOG=st.fprint)
res = str(st)
self.assertIn('Linear', res)
self.assertExists(out1)
self.assertExists(out2)
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"[email protected]"
] | |
f7d764ba88db6e3901e87715853fe26847484a39 | 6d71de4e88dcb7d04f6d3a18736d393e12f8d087 | /scripts/packages/database.py | f5c11749b5a1d8cec4d2441e01d576df5e077dc6 | [
"MIT"
] | permissive | wyolum/Alex | 71075c30691229e8eb28afa06a6ab44c450b14d4 | 03f1d8ae0107454d18964e33777ffc4c0c1a1951 | refs/heads/main | 2023-07-02T16:11:57.088323 | 2021-08-05T17:59:04 | 2021-08-05T17:59:04 | 338,686,528 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,870 | py | import sqlite3
class Struct:
def __init__(self, **kwargs):
self.attrs = kwargs
self.__dict__.update(kwargs)
def keys(self):
return self.attrs.keys()
def __getitem__(self, key):
return self.attrs[key]
def __repr__(self):
return f'Struct(**{self.attrs})'
class Table:
def __init__(self, name, *columns):
self.name = name
self.columns = columns
def create(self, db):
cols = ['%s' % col for col in self.columns]
sql = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (self.name, ','.join(cols))
db.execute(sql)
def drop(self, db):
sql = 'DROP TABLE %s' % self.name
response = input('Warning, dropping table %s\nY to confirm: ' % self.name)
if response[0] == 'Y':
db.execute(sql)
print ('%s Dropped' % self.name)
else:
print ('Drop not executed')
def create_index(self, db, colnames, unique=False):
idx_name = ''.join(colnames)
cols = ','.join(colnames)
unique = ['', 'UNIQUE'][unique]
sql = 'CREATE %s INDEX %s ON %s(%s)' % (unique, idx_name, self.name, cols)
db.execute(sql)
def insert(self, db, values):
place_holders = ','.join('?' * len(values[0]))
cols = ','.join([col.name for col in self.columns])
sql = 'INSERT INTO %s(%s) VALUES (%s);' % (self.name, cols, place_holders)
#print('sql:', sql)
rowcount = 0
for row in values:
### add quote to string fields
try:
rowcount += db.executemany(sql, [row]).rowcount
except sqlite3.IntegrityError:
pass
db.commit()
return rowcount
def delete(self, db, where):
sql = f'DELETE FROM {self.name} WHERE {where}'
#<print(sql)
try:
cur = db.execute(sql)
db.commit()
except sqlite3.OperationalError:
print(sql)
raise
def select(self, db, where=None):
sql = 'SELECT * FROM %s' % self.name
if where is not None:
sql += ' WHERE ' + where
try:
cur = db.execute(sql)
except sqlite3.OperationalError:
print(sql)
raise
out = []
colnames = [col.name for col in self.columns]
for row in cur.fetchall():
l = Struct(**dict(zip(colnames, row)))
out.append(l)
return out
def join(self, db, other, col, where=None):
sql = 'SELECT * FROM %s LEFT JOIN %s ON %s.%s' % (self.name, other.name, self.name, col)
if where:
sql += ' WHERE ' + where
cur = db.execute(sql)
colnames = [l[0] for l in cur.description]
out = []
for row in cur.fetchall():
l = dict(zip(colnames, row))
out.append(l)
return out
class Column:
def __init__(self, name, type, **kw):
self.name = name
self.type = type
self.kw = kw
def __str__(self):
kw = ''
for k in self.kw:
if self.kw[k]:
kw = kw + ' ' + '%s' % (k.upper())
return '%s %s %s' % (self.name, self.type.name, kw)
class DBType:
def __init__(self, name):
self.name = name
class Integer(DBType):
def __init__(self):
DBType.__init__(self, 'INTEGER')
self.convert = int
class Float(DBType):
def __init__(self):
DBType.__init__(self, 'FLOAT')
self.convert = float
class String(DBType):
def __init__(self):
DBType.__init__(self, 'STRING')
self.convert = str
class Boolean(DBType):
def __init__(self):
DBType.__init__(self, 'BOOLEAN')
self.convert = bool
class Text(DBType):
def __init__(self):
DBType.__init__(self, 'TEXT')
self.convert = str
| [
"[email protected]"
] | |
45ebacc913f5ce9cdf2fa3cc8fe300130f9e2f9b | b233a9abbf0c88ae0dcb1bb1d22d28403414c813 | /jira/venv/bin/pbr | b9c8d7f5ac5ddea76e71a9355ae5fd7137de0757 | [] | no_license | suntwin/pythonProjects1 | 238a8a4f6585ecac656349a06f02a763518e9cd5 | f0e34e4bced916fea7dc6520f14f0883c6d8b961 | refs/heads/master | 2022-12-11T08:28:53.032191 | 2020-01-01T09:13:57 | 2020-01-01T09:13:57 | 231,194,980 | 1 | 1 | null | 2022-12-08T07:02:00 | 2020-01-01T09:03:34 | Jupyter Notebook | UTF-8 | Python | false | false | 262 | #!/Users/niteshchawla/Ownstuff/pythonProjects/jira/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pbr.cmd.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
d5cef35ef32da1b2f8e8ae9cb2ab5ab3391634ea | cea45a2355c8b243a79c4c179c98a04e90e98ff7 | /astropy/table/tests/conftest.py | 65143fd816accb2c0754b2e8130f6ba656cf8cbe | [] | no_license | shanmbic/astropy | 408cfa45511cac9c64dade6350d6ba9adeb567ad | e8a6546dd210ade743eb663dd1c276ca2fd054b4 | refs/heads/master | 2021-01-15T17:20:47.626581 | 2014-04-03T16:17:03 | 2014-04-03T16:17:03 | 17,769,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
All of the py.test fixtures used by astropy.table are defined here.
The fixtures can not be defined in the modules that use them, because
those modules are imported twice: once with `from __future__ import
unicode_literals` and once without. py.test complains when the same
fixtures are defined more than once.
`conftest.py` is a "special" module name for py.test that is always
imported, but is not looked in for tests, and it is the recommended
place to put fixtures that are shared between modules. These fixtures
can not be defined in a module by a different name and still be shared
between modules.
"""
from ...tests.helper import pytest
from ... import table
@pytest.fixture(params=[table.Column, table.MaskedColumn])
def Column(request):
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
return request.param
class MaskedTable(table.Table):
def __init__(self, *args, **kwargs):
kwargs['masked'] = True
table.Table.__init__(self, *args, **kwargs)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_types(request):
class TableTypes:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
return TableTypes(request)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_data(request):
class TableData:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
self.COLS = [
self.Column(name='a', data=[1, 2, 3], description='da',
format='fa', meta={'ma': 1}, unit='ua'),
self.Column(name='b', data=[4, 5, 6], description='db',
format='fb', meta={'mb': 1}, unit='ub'),
self.Column(name='c', data=[7, 8, 9], description='dc',
format='fc', meta={'mc': 1}, unit='ub')]
self.DATA = self.Table(self.COLS)
return TableData(request)
class SubclassTable(table.Table):
pass
@pytest.fixture(params=[True, False])
def tableclass(request):
return table.Table if request.param else SubclassTable
@pytest.fixture(params=[0, 1, -1])
def protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
"""
return request.param
# Fixture to run all tests for both an unmasked (ndarray) and masked
# (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_type(request):
# return MaskedTable if request.param else table.Table
try:
request.param
return MaskedTable
except AttributeError:
return table.Table
| [
"[email protected]"
] | |
724cc2812ef2925248a2d0403762eb7599764b22 | 0a3e24df172a206a751217e5f85b334f39983101 | /python_etc_3/abstract.py | e0ea5f6e6873c182904d30b94f58820697f27011 | [] | no_license | yeboahd24/python202 | 1f399426a1f46d72da041ab3d138c582c695462d | d785a038183e52941e0cee8eb4f6cedd3c6a35ed | refs/heads/main | 2023-05-06T04:14:19.336839 | 2021-02-10T02:53:19 | 2021-02-10T02:53:19 | 309,841,303 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!usr/bin/evn/python3
import abc
class Tombola(abc.ABC):
@abc.abstractmethod
def load(self, iterable):
"""Add items from an iterable."""
@abc.abstractmethod
def pick(self):
"""Remove item at random, returning it.
This method should raise `LookupError` when the instance is empty."""
def loaded(self):
"""Return `True` if there's at least 1 item, `False` otherwise."""
return bool(self.inspect())
def inspect(self):
"""Return a sorted tuple with the items currently inside."""
items = []
while True:
try:
items.append(self.pick())
except LookupError:
break
self.load(items)
return tuple(sorted(items))
| [
"[email protected]"
] | |
e87bb97c7df0a4427908d7f0aaf88841526d6ba8 | 31a0b0749c30ff37c3a72592387f9d8195de4bd6 | /rllib/agents/ars/tests/test_ars.py | 86ccab7f45a7af95c28c303d749585d3ca419ddc | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | longshotsyndicate/ray | 15100bad514b602a3fa39bfe205288e7bec75d90 | 3341fae573868338b665bcea8a1c4ee86b702751 | refs/heads/master | 2023-01-28T15:16:00.401509 | 2022-02-18T05:35:47 | 2022-02-18T05:35:47 | 163,961,795 | 1 | 1 | Apache-2.0 | 2023-01-14T08:01:02 | 2019-01-03T11:03:35 | Python | UTF-8 | Python | false | false | 1,353 | py | import unittest
import ray
import ray.rllib.agents.ars as ars
from ray.rllib.utils.test_utils import framework_iterator, check_compute_single_action
class TestARS(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=3)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_ars_compilation(self):
"""Test whether an ARSTrainer can be built on all frameworks."""
config = ars.DEFAULT_CONFIG.copy()
# Keep it simple.
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = None
config["noise_size"] = 2500000
# Test eval workers ("normal" WorkerSet, unlike ARS' list of
# RolloutWorkers used for collecting train batches).
config["evaluation_interval"] = 1
config["evaluation_num_workers"] = 1
num_iterations = 2
for _ in framework_iterator(config):
plain_config = config.copy()
trainer = ars.ARSTrainer(config=plain_config, env="CartPole-v0")
for i in range(num_iterations):
results = trainer.train()
print(results)
check_compute_single_action(trainer)
trainer.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"[email protected]"
] | |
81c2d118d20c8e99ca595f9a35b345a98b2fe6f0 | ef5f98cdaca58bc9c1ba1a94a1ccf7bebc3f1260 | /is_analyse_cbn.py | 03e5d482bfbc35d4dd74e9ddc63f7ad70a85fab2 | [
"MIT"
] | permissive | tonygalmiche/is_plastigray | 512ad911b3118c6aa2aab49f64ad7871fb80f195 | 774feea510fc0854776016dbbbc7472ebd1248c5 | refs/heads/master | 2023-07-25T21:49:56.284434 | 2023-07-18T13:15:28 | 2023-07-18T13:15:28 | 24,811,999 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,678 | py | # -*- coding: utf-8 -*-
from openerp import models,fields,api
import time
import datetime
from collections import OrderedDict
import tempfile
class product_product(models.Model):
_inherit = "product.product"
@api.model
def analyse_cbn(self,filter=False):
cr, uid, context = self.env.args
validation = filter['validation']
if validation=='ko':
#** Lecture des critères enregistrés *******************************
code_pg_debut = self.env['is.mem.var'].get(uid,'code_pg_debut')
gest = self.env['is.mem.var'].get(uid,'gest')
cat = self.env['is.mem.var'].get(uid,'cat')
moule = self.env['is.mem.var'].get(uid,'moule')
projet = self.env['is.mem.var'].get(uid,'projet')
client = self.env['is.mem.var'].get(uid,'client')
fournisseur = self.env['is.mem.var'].get(uid,'fournisseur')
type_commande = self.env['is.mem.var'].get(uid,'type_commande')
type_rapport = self.env['is.mem.var'].get(uid,'type_rapport')
calage = self.env['is.mem.var'].get(uid,'calage')
nb_semaines = self.env['is.mem.var'].get(uid,'nb_semaines')
valorisation=''
else:
#** Lecture des filtres ********************************************
code_pg_debut = filter['code_pg_debut']
gest = filter['gest']
cat = filter['cat']
moule = filter['moule']
projet = filter['projet']
client = filter['client']
fournisseur = filter['fournisseur']
type_commande = filter['type_commande']
type_rapport = filter['type_rapport']
calage = filter['calage']
nb_semaines = filter['nb_semaines']
valorisation = filter['valorisation']
#*******************************************************************
#** Enregistrement des critères enregistrés ************************
self.env['is.mem.var'].set(uid, 'code_pg_debut', code_pg_debut)
self.env['is.mem.var'].set(uid, 'gest', gest)
self.env['is.mem.var'].set(uid, 'cat', cat)
self.env['is.mem.var'].set(uid, 'moule', moule)
self.env['is.mem.var'].set(uid, 'projet', projet)
self.env['is.mem.var'].set(uid, 'client', client)
self.env['is.mem.var'].set(uid, 'fournisseur', fournisseur)
self.env['is.mem.var'].set(uid, 'type_commande', type_commande)
self.env['is.mem.var'].set(uid, 'type_rapport', type_rapport)
self.env['is.mem.var'].set(uid, 'calage', calage)
self.env['is.mem.var'].set(uid, 'nb_semaines', nb_semaines)
#*******************************************************************
#** Valeur par défaut **************************************************
code_pg_debut = code_pg_debut or ''
gest = gest or ''
cat = cat or ''
moule = moule or ''
projet = projet or ''
client = client or ''
fournisseur = fournisseur or ''
type_commande = type_commande or ''
type_rapport = type_rapport or 'Fabrication'
calage = calage or 'Date de fin'
nb_semaines = nb_semaines or 18
nb_semaines = int(nb_semaines)
height = filter['height']
#***********************************************************************
#** Listes de choix des filtres ****************************************
select_nb_semaines=[4,8,12,16,18,20,25,30,40,60]
select_type_commande=['','ferme_uniquement','ferme','ouverte','cadencée']
select_type_rapport=['Fabrication','Achat']
select_calage=['Date de fin','Date de début']
#***********************************************************************
# ** Titre du rapport **************************************************
titre="Suggestions de fabrication"
if type_rapport=="Achat":
titre="Suggestions d'achat";
if valorisation:
titre="Valorisation stock fabrication"
if type_rapport=="Achat":
titre="Valorisation stock achat"
# **********************************************************************
# ** Recherche des catégories ******************************************
SQL="""
select id, name
from is_category
where id>0
"""
cr.execute(SQL)
result = cr.fetchall()
cat2id={}
for row in result:
cat2id[row[1]]=row[0]
# **********************************************************************
# ** Recherche des couts ***********************************************
SQL="""
select name, cout_act_total
from is_cout
"""
cr.execute(SQL)
result = cr.fetchall()
Couts={}
for row in result:
Couts[row[0]]=row[1]
# **********************************************************************
# ** Filtre pour les requêtes ******************************************
filtre="";
if code_pg_debut:
filtre=filtre+" and pt.is_code ilike '"+code_pg_debut+"%' "
if type_rapport=="Achat":
filtre=filtre+" and pt.purchase_ok=true "
else:
filtre=filtre+" and pt.purchase_ok<>true "
if cat:
filtre=filtre+" and ic.name='"+cat+"' "
if moule:
filtre=filtre+" and (im.name='"+moule+"' or id.name='"+moule+"' )"
if projet:
filtre=filtre+" and (imp1.name ilike '%"+projet+"%' or imp2.name ilike '%"+projet+"%') "
if client:
filtre=filtre+" and rp.is_code='"+client+"' "
# **********************************************************************
# ** Recherche du type de commandes d'achat ****************************
TypeCde={}
if type_rapport=="Achat":
SQL="""
SELECT
cof.type_commande,
cof.partner_id,
rp.is_code,
rp.is_adr_code,
cofp.product_id
FROM is_cde_ouverte_fournisseur cof inner join is_cde_ouverte_fournisseur_product cofp on cofp.order_id=cof.id
inner join res_partner rp on cof.partner_id=rp.id
"""
cr.execute(SQL)
result = cr.fetchall()
for row in result:
cle=('0000'+row[2])[-4:]+'-'+row[3]+'/'+str(row[4])
TypeCde[cle]=row[0]
SQL="""
SELECT
rp.is_code,
rp.is_adr_code,
product_id
FROM is_cde_ferme_cadencee cfc inner join res_partner rp on cfc.partner_id=rp.id
"""
cr.execute(SQL)
result = cr.fetchall()
for row in result:
cle=('0000'+row[0])[-4:]+'-'+row[1]+'/'+str(row[2])
TypeCde[cle]=u'cadencée'
# **********************************************************************
# ** Recherche de la liste des gestionnaires ***************************
SQL="""
SELECT distinct ig.name gest
FROM product_product pp inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
inner join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE pp.id>0 """+filtre+"""
"""
if fournisseur:
code=fournisseur.split('-')[0]
SQL=SQL+"""
and (
(
select rp2.is_code
from product_supplierinfo ps inner join res_partner rp2 on ps.name=rp2.id
where ps.product_tmpl_id=pt.id order by ps.sequence,ps.id limit 1
)='"""+code+"""'
)
"""
SQL=SQL+""" ORDER BY ig.name """
cr.execute(SQL)
result = cr.fetchall()
SelectGestionnaires={}
for row in result:
SelectGestionnaires[row[0]]=1
SelectGestionnaires=OrderedDict(sorted(SelectGestionnaires.items(), key=lambda t: t[0]))
select_gest=[]
select_gest.append('')
for x in SelectGestionnaires:
select_gest.append(x)
# **********************************************************************
# ** Recherche de la liste des fournisseurs ****************************
SQL="""
SELECT
( select concat(rp2.is_code,'-',rp2.is_adr_code)
from product_supplierinfo ps inner join res_partner rp2 on ps.name=rp2.id
where ps.product_tmpl_id=pt.id order by ps.sequence,ps.id limit 1
) code_fournisseur
FROM product_product pp inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
inner join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE pp.id>0 """+filtre+"""
"""
cr.execute(SQL)
result = cr.fetchall()
select_fournisseur=[]
select_fournisseur.append('')
for row in result:
cle=row[0]
if cle and cle not in select_fournisseur:
select_fournisseur.append(cle)
select_fournisseur.sort()
# **********************************************************************
if validation=='ko':
html='Indiquez vos critères de filtre et validez'
else:
# ** Filtre sur le fournisseur (partner_id) ****************************
partner_id=False
if fournisseur:
tab=fournisseur.split('-')
SQL="select id from res_partner where is_code='"+tab[0]+"' and is_adr_code='"+tab[1]+"' "
cr.execute(SQL)
result = cr.fetchall()
for row in result:
partner_id=row[0]
if gest:
filtre=filtre+" and ig.name='"+gest+"' "
if partner_id:
filtre=filtre+"""
and (
( select rp2.id from product_supplierinfo ps inner join res_partner rp2 on ps.name=rp2.id
where ps.product_tmpl_id=pt.id order by ps.sequence limit 1
)="""+str(partner_id)+"""
)
"""
filtre=filtre+" AND ic.name!='80' "
# **********************************************************************
# ** Tableau des semaines **********************************************
date=datetime.datetime.now()
jour=date.weekday()
date = date - datetime.timedelta(days=jour)
TabSemaines=[]
for i in range(0,int(nb_semaines)):
d=date.strftime('%Y%m%d')
TabSemaines.append(d)
date = date + datetime.timedelta(days=7)
# **********************************************************************
# ** Recherche stock A *************************************************
SQL="""
select sq.product_id, sum(sq.qty) as qt
from stock_quant sq inner join stock_location sl on sq.location_id=sl.id
where sl.usage='internal' and sl.active='t' and sl.control_quality='f'
group by sq.product_id
order by sq.product_id
"""
cr.execute(SQL)
result = cr.fetchall()
StocksA={}
for row in result:
StocksA[row[0]]=row[1]
# **********************************************************************
# ** Recherche stock Q *************************************************
SQL="""
select sq.product_id, sum(sq.qty) as qt
from stock_quant sq inner join stock_location sl on sq.location_id=sl.id
where sl.usage='internal' and sl.active='t' and sl.control_quality='t'
group by sq.product_id
order by sq.product_id
"""
cr.execute(SQL)
result = cr.fetchall()
StocksQ={}
for row in result:
StocksQ[row[0]]=row[1]
# **********************************************************************
# ** Recherche stock sécu **********************************************
SQL="""
select pp.id, pt.is_stock_secu as qt
from product_product pp inner join product_template pt on pp.product_tmpl_id=pt.id
"""
cr.execute(SQL)
result = cr.fetchall()
StocksSecu={}
for row in result:
StocksSecu[row[0]]=row[1]
# **********************************************************************
# ** Recherche des fournisseurs par défaut *****************************
SQL="""
select
a.id as product_id,
c.is_code as is_code,
c.name as name,
b.delay as delay,
c.is_adr_code
from product_product a, product_supplierinfo b, res_partner c
where a.product_tmpl_id=b.product_tmpl_id and b.name=c.id
order by b.sequence, b.id
"""
cr.execute(SQL)
result = cr.fetchall()
Fournisseurs={}
Delai_Fournisseurs={}
for row in result:
name=('0000'+row[1])[-4:]+'-'+row[4]
if name=='':
name=row[2]
Fournisseurs[row[0]]=name
Delai_Fournisseurs[row[0]]=row[3]
# **********************************************************************
# ** Recherche des prévisions du CBN ***********************************
SQL="""
SELECT
mp.id as numod,
mp.start_date as date_debut,
mp.start_date_cq as date_fin,
mp.quantity as qt,
mp.type as typeod,
mp.product_id,
pt.is_code as code,
pp.name_template as designation,
pt.is_stock_secu,
pt.produce_delay,
pt.lot_mini,
pt.multiple,
pt.is_mold_dossierf as moule,
mp.name as name,
ig.name as gest
FROM mrp_prevision mp inner join product_product pp on mp.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
left outer join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE mp.id>0 """+filtre+"""
ORDER BY mp.name
"""
cr.execute(SQL)
result = cr.fetchall()
TabIni={};
TabIni=self.RemplitTab(TabIni, result, TabSemaines, type_rapport,
StocksA, StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs,
calage, valorisation, Couts, fournisseur, TypeCde, type_commande)
# **********************************************************************
# ** Recherche des commandes client ************************************
SQL="""
SELECT
so.id as numod,
sol.is_date_expedition as date_debut,
sol.is_date_expedition as date_fin,
sol.product_uom_qty as qt,
sol.is_type_commande as typeod,
sol.product_id,
pt.is_code as code,
pp.name_template as designation,
pt.is_stock_secu,
pt.produce_delay,
pt.lot_mini,
pt.multiple,
pt.is_mold_dossierf as moule,
so.name as name
FROM sale_order_line sol inner join sale_order so on sol.order_id=so.id
inner join product_product pp on sol.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
left outer join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE sol.id>0 """+filtre+""" and sol.state<>'done' and sol.state<>'cancel'
ORDER BY sol.name
"""
cr.execute(SQL)
result = cr.fetchall()
TabIni=self.RemplitTab(TabIni, result, TabSemaines, type_rapport,
StocksA, StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs,
calage, valorisation, Couts, fournisseur, TypeCde, type_commande)
# **********************************************************************
# ** Recherche des OF **************************************************
SQL="""
SELECT
mp.id as numod,
mp.date_planned as date_debut,
mp.date_planned as date_fin,
sm.product_qty as qt,
'FL' as typeod,
sm.product_id,
pt.is_code as code,
pp.name_template as designation,
pt.is_stock_secu,
pt.produce_delay,
pt.lot_mini,
pt.multiple,
pt.is_mold_dossierf as moule,
mp.name as name
FROM stock_move sm inner join product_product pp on sm.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
inner join mrp_production mp on mp.id=sm.production_id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
left outer join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id and ic.name!='74'
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE sm.id>0 """+filtre+""" and production_id>0 and sm.state<>'done' and sm.state<>'cancel'
ORDER BY sm.name
"""
cr.execute(SQL)
result = cr.fetchall()
TabIni=self.RemplitTab(TabIni, result, TabSemaines, type_rapport,
StocksA, StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs,
calage, valorisation, Couts, fournisseur, TypeCde, type_commande)
# **********************************************************************
# ** Recherche des composants des OF ***********************************
SQL="""
SELECT
sm.id as numod,
sm.date_expected as date_debut,
sm.date_expected as date_fin,
sm.product_qty as qt,
'FM' as typeod,
sm.product_id,
pt.is_code as code,
pp.name_template as designation,
pt.is_stock_secu,
pt.produce_delay,
pt.lot_mini,
pt.multiple,
pt.is_mold_dossierf as moule,
sm.name as name
FROM stock_move sm inner join product_product pp on sm.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
left outer join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE sm.id>0 """+filtre+""" and raw_material_production_id>0 and sm.state<>'done' and sm.state<>'cancel'
ORDER BY sm.name
"""
cr.execute(SQL)
result = cr.fetchall()
TabIni=self.RemplitTab(TabIni, result, TabSemaines, type_rapport,
StocksA, StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs,
calage, valorisation, Couts, fournisseur, TypeCde, type_commande)
# **********************************************************************
# ** Recherche des commandes fournisseurs *****************************
SQL="""
SELECT
po.id as numod,
pol.date_planned as date_debut,
pol.date_planned as date_fin,
sm.product_qty as qt,
'SF' as typeod,
pol.product_id,
pt.is_code as code,
pp.name_template as designation,
pt.is_stock_secu,
pt.produce_delay,
pt.lot_mini,
pt.multiple,
pt.is_mold_dossierf as moule,
po.name as name
FROM purchase_order_line pol left outer join purchase_order po on pol.order_id=po.id
inner join product_product pp on pol.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
inner join stock_move sm on pol.id=sm.purchase_line_id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
left outer join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id and ic.name not in ('70','72','73','74')
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE pol.id>0 """+filtre+""" and pol.state<>'draft' and pol.state<>'done' and pol.state<>'cancel'
and sm.state in ('draft','waiting','confirmed','assigned')
ORDER BY pol.name
"""
cr.execute(SQL)
result = cr.fetchall()
TabIni=self.RemplitTab(TabIni, result, TabSemaines, type_rapport,
StocksA, StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs,
calage, valorisation, Couts, fournisseur, TypeCde, type_commande)
# **********************************************************************
# ** Recherche pour avoir tous les articles dans le résultat **********
if valorisation:
SQL="""
SELECT
pt.id as numod,
'' as date_debut,
'' as date_fin,
0 as qt,
'stock' as typeod,
pp.id as product_id,
pt.is_code as code,
pp.name_template as designation,
pt.is_stock_secu,
pt.produce_delay,
pt.lot_mini,
pt.multiple,
pt.is_mold_dossierf as moule,
pp.name_template as name
FROM product_product pp inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join is_mold im on pt.is_mold_id=im.id
left outer join is_dossierf id on pt.is_dossierf_id=id.id
left outer join is_gestionnaire ig on pt.is_gestionnaire_id=ig.id
left outer join is_category ic on pt.is_category_id=ic.id and ic.name not in ('70','72','73','74')
left outer join is_mold_project imp1 on im.project=imp1.id
left outer join is_mold_project imp2 on id.project=imp2.id
left outer join res_partner rp on pt.is_client_id=rp.id
WHERE pt.id>0 """+filtre+"""
ORDER BY pt.is_code
"""
cr.execute(SQL)
result = cr.fetchall()
TabIni=self.RemplitTab(TabIni, result, TabSemaines, type_rapport,
StocksA, StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs,
calage, valorisation, Couts, fournisseur, TypeCde, type_commande)
# **********************************************************************
#** RemplitTabSho ******************************************************
TabSho={};
lig=0;
Tab=TabIni
for key, val in Tab.iteritems():
Type=Tab[key]['TYPE']
if Type!='99-Stock':
color=self.color_cel(Type)
if 0 not in TabSho:
TabSho[0]={}
if 1 not in TabSho:
TabSho[1]={}
TabSho[0][lig]=Tab[key]["Code"]
TabSho[1][lig]=Type
Type=Type[3:]
for i in range(0, nb_semaines):
k=TabSemaines[i]
if k in Tab[key]:
Qt=round(Tab[key][k],2)
else:
Qt=0
Lien="#"
k=TabSemaines[i]+'OT'
if k in Tab[key]:
OT=Tab[key][k]
OT=OT[0:len(OT)-1]
else:
OT=''
k=TabSemaines[i]+'INFO'
if k in Tab[key]:
INFO=Tab[key][k]
else:
INFO=''
docid=''
if Type=='FS' or Type=='SA' or Type=='CF' or Type=='CP' or Type=='FL' or Type=='SF':
Lien="Modif_FS_Liste.php?zzTypeOD="+Type.lower()+"&zzNumOD="+str(OT)
docid=str(OT)
k=i+2
if k not in TabSho:
TabSho[k]={}
if Qt==0 and color!='Black':
val=''
else:
val="{0:10.0f}".format(Qt)
#TabSho[k][lig]="<a style=\"color:"+color+";\" class=\"info\" target='Modif_Odoo' href=\""+Lien+"\">"+"{0:10.0f}".format(Qt)+"<span>"+INFO+"</span></a>"
TabSho[k][lig]="<a style=\"color:"+color+";\" class=\"info\" type='"+Type+"' docid='"+str(docid)+"'>"+val+"<span>"+INFO+"</span></a>"
#** Calcul du stock theorique **************************
if Tab[key]['TYPE']=='90-Stock':
if i==0:
Stock=Tab[key][0]+Qt
else:
q=TabSho[1+i][lig]
Stock=TabSho[1+i][lig]+Qt
TabSho[2+i][lig]=Stock
#*******************************************************
#** Calcul du stock valorisé ***************************
if Tab[key]['TYPE']=='92-Stock Valorisé':
if i==0:
Stock=Tab[key][0]+Qt;
else:
Stock=TabSho[1+i][lig]+Qt;
TabSho[2+i][lig]=round(Stock,2)
#*******************************************************
lig+=1
Tab=TabSho
# ******************************************************************
NomCol = ["Sécu / Délai / Lot / Multi / Stock A/Q", "Type"]
Style = ["NormalLN", "NormalCN"]
Format = ["TEXT" , "TEXT" ]
Total = [0 , 0 ]
Lien = ["" , "" ]
Size = [220 , 40 ]
# ** Tableau des semaines ******************************************
date=datetime.datetime.now()
jour=date.weekday()
date = date - datetime.timedelta(days=jour)
TabSemaines=[]
for i in range(0,int(nb_semaines)):
s='S'+str(date.isocalendar()[1])+'<br />'+date.strftime('%d.%m')
TabSemaines.append(s)
date = date + datetime.timedelta(days=7)
# ******************************************************************
for i in range(0,int(nb_semaines)):
NomCol.append(TabSemaines[i])
Style.append("NormalRN")
Format.append("TEXT")
Total.append(0)
Lien.append("")
Size.append(48)
width=220+40+nb_semaines*(48+2)+22
#** Création des listes et de la clé de tri ***********************
lst={}
lst['key']=[]
for col, v1 in Tab.iteritems():
lst[col]=[]
for lig, v2 in v1.iteritems():
if col==1:
key=Tab[0][lig]+Tab[1][lig]
lst['key'].append(key)
lst[col].append(v2)
#*******************************************************************
#** Tri des listes *************************************************
z=[]
z.append(lst['key'])
for col in range(0,len(Tab)):
z.append(lst[col])
NewTab=zip(*z)
NewTab.sort()
#*******************************************************************
#** Reconsctuction du Tab ******************************************
Tab={}
lig=0
for row in NewTab:
col=0
for cel in row:
if col>0:
if lig==0:
Tab[col-1]={}
Tab[col-1][lig]=cel
col+=1
lig+=1
#*******************************************************************
#** Génération du fichier CSV **************************************
attachment_id=''
if valorisation:
csv={};
for lig in range(0,len(Tab[0])):
#** Recherche du CodePG et de la désignation ***************
Key=Tab[0][lig]
Key=Key.split('</b>')
Key=Key[0]
Key=Key.split('<b>')
Key=Key[1]
CodePG=Key
Key=Tab[0][lig]
Key=Key.split('<br />')
Key=Key[1]
Designation=Key
#***********************************************************
if CodePG not in csv:
csv[CodePG]={}
csv[CodePG][0]=CodePG
csv[CodePG][1]=Designation
Type=Tab[1][lig]
if Type=='90-Stock':
for col in range(2,len(Tab)):
csv[CodePG][col*1000+1]=Tab[col][lig]
if Type=='92-Stock Valorisé':
for col in range(2,len(Tab)):
csv[CodePG][col*1000+2]=Tab[col][lig]
#** Ecriture fichier CSV **************************************
user = self.env['res.users'].browse(uid)
name = 'analyse-cbn-'+user.login+'.csv'
path='/tmp/'+name
f = open(path,'wb')
txt=[];
txt.append('CodePG')
txt.append('Désignation')
date=datetime.datetime.now()
jour=date.weekday()
date = date - datetime.timedelta(days=jour)
for i in range(0,int(nb_semaines)):
v='Stock S'+str(date.isocalendar()[1])+' '+date.strftime('%d.%m')
txt.append(v)
v='Valorisé S'+str(date.isocalendar()[1])+' '+date.strftime('%d.%m')
txt.append(v)
date = date + datetime.timedelta(days=7)
f.write(u'\t'.join(txt)+'\r\n')
for k, v in csv.iteritems():
v=self.ksort(v)
txt=[]
for k2, v2 in v:
txt.append(str(v2).replace('.',','))
f.write(u'\t'.join(txt)+'\r\n')
f.close()
#***************************************************************
# ** Creation ou modification de la pièce jointe *******************
attachment_obj = self.env['ir.attachment']
attachments = attachment_obj.search([('res_id','=',user.id),('name','=',name)])
csv = open(path,'rb').read()
vals = {
'name': name,
'datas_fname': name,
'type': 'binary',
'res_id': user.id,
'datas': csv.encode('base64'),
}
attachment_id=False
if attachments:
for attachment in attachments:
attachment.write(vals)
attachment_id=attachment.id
else:
attachment = attachment_obj.create(vals)
attachment_id=attachment.id
#*******************************************************************
#*******************************************************************
#** Code HTML ******************************************************
if len(Tab)==0:
html='Aucune donnée !'
else:
html=''
head="<thead><tr class=\"TitreTabC\">\n"
for col in range(0,len(Tab)):
align='left'
if col>1:
align='right'
head+="<th style=\"width:"+str(Size[col])+"px;text-align:"+align+"\">"+NomCol[col]+"</th>\n"
head+="</tr></thead>\n"
html+="<div style=\"width:"+str(width+20)+"px;\" id=\"table_head\">\n"
html+="<table style=\"border-width:0px;border-spacing:0px;padding:0px;width:"+str(width)+"px;\">\n"
html+=head
html+="</table>\n"
html+="</div>\n"
alt=1
#height=10000
#height:"+str(height)+"px
html+="<div style=\"width:"+str(width+20)+"px;\" id=\"table_body\">\n";
html+="<table style=\"border-width:0px;border-spacing:0px;padding:0px;width:"+str(width)+"px;\">\n";
html+=head;
html+="<tbody class=\"tbody\">\n"
for lig in range(0,len(Tab[0])):
if lig>0:
if Tab[0][lig]!=Tab[0][lig-1]:
alt=-alt
if alt==1:
bgcolor="ffdec0"
else:
bgcolor="fae9da"
onclick = "onclick=\"clicktr('id"+str(lig)+"','"+bgcolor+"','2')\""
html+="<tr style=\"background-color:#"+bgcolor+";\" id=\"id"+str(lig)+\
"\"onmouseover=\"clicktr('id"+str(lig)+"','ffff00','1')\"onmouseout=\"clicktr('id"+str(lig)+\
"','"+bgcolor+"','1')\" "+onclick+">\n"
for col in range(0,len(Tab)):
cel=Tab[col][lig]
if(col==1):
cel=cel[3:]
if lig>0:
if col==0:
if Tab[0][lig]==Tab[0][lig-1]:
cel=" "
#** Recherche du nombre de lignes ayant le même code ***********
rowspan=1
if col==0:
if lig==0 or (lig>0 and Tab[0][lig]!=Tab[0][lig-1]):
while (lig+rowspan) in Tab[0] and Tab[0][lig]==Tab[0][lig+rowspan]:
rowspan+=1
#***************************************************************
color="black"
if col>0:
color=self.color_cel(Tab[1][lig],cel)
if col>1:
if type(cel)==float:
if cel==0 and color!='Black':
cel=''
else:
cel="{:10.0f}".format(cel)
cel="<b>"+str(cel)+"</b>"
align='left'
if col>1:
align='right'
if lig==0 or col>0 or (lig>0 and Tab[0][lig]!=Tab[0][lig-1]):
html+="<td style=\"width:"+str(Size[col])+"px;color:"+color+";text-align:"+align+";\" rowspan=\""+str(rowspan)+"\" class=\""+Style[col]+"\">"+str(cel)+"</td>\n"
html+="</tr>\n"
html+="</tbody>\n"
html+="</table>\n"
html+="</div>"
if valorisation:
html+="<a class=\"info\" type='stock-valorise' attachment_id='"+str(attachment_id)+"'>Stock valorisé</a>\n"
#***********************************************************************
vals={
'titre' : titre,
'code_pg_debut' : code_pg_debut,
'moule' : moule,
'cat' : cat,
'projet' : projet,
'client' : client,
'valorisation' : valorisation,
'select_gest' : select_gest,
'gest' : gest,
'select_fournisseur' : select_fournisseur,
'fournisseur' : fournisseur,
'select_nb_semaines' : select_nb_semaines,
'nb_semaines' : nb_semaines,
'select_type_commande': select_type_commande,
'type_commande' : type_commande,
'select_type_rapport' : select_type_rapport,
'type_rapport' : type_rapport,
'select_calage' : select_calage,
'calage' : calage,
'html' : html,
}
return vals
@api.model
def ksort(self,d):
return [(k,d[k]) for k in sorted(d.keys())]
@api.model
def RemplitTab(self,Tab, result, TabSemaines, type_rapport, StocksA,
StocksQ, StocksSecu, Fournisseurs, Delai_Fournisseurs, calage,
valorisation, Couts, fournisseur, TypeCde, type_commande):
for row in result:
numod = row[0]
date_debut = row[1]
date_fin = row[2]
qt = row[3]
typeod = row[4].strip()
product_id = row[5]
code_pg = row[6]
designation = row[7]
is_stock_secu = row[8]
produce_delay = row[9]
lot_mini = row[10]
multiple = row[11]
moule = row[12] or ''
name = row[13]
test=True
if type_rapport=='Achat':
is_code=''
if product_id in Fournisseurs:
is_code = Fournisseurs[product_id]
cle = is_code+'/'+str(product_id);
if cle in TypeCde:
t=TypeCde[cle]
Code=code_pg+' ('+t+')';
if type_commande!='' and type_commande!=t:
test=False
StockA=0;
StockQ=0;
if product_id in StocksA:
StockA=StocksA[product_id]
if product_id in StocksQ:
StockQ=StocksQ[product_id]
if typeod=='stock' and StockA==0:
test=False
if test:
Tri=moule
if type_rapport=='Achat':
if product_id in Fournisseurs:
Fournisseur=Fournisseurs[product_id]
else:
Fournisseur='0000'
Tri=Fournisseur
if typeod=='':
typeod='ferme'
Cle=code_pg+typeod
Code=Tri+" / <b>"+code_pg+"</b>"
if type_rapport=='Achat' and moule!='':
Code=Code+' / '+moule
if type_rapport=='Achat':
k = is_code+'/'+str(product_id);
if k in TypeCde:
t=TypeCde[k]
Code=Code+' ('+t+')';
if type_rapport=='Achat':
Delai=0
if product_id in Delai_Fournisseurs:
Delai=Delai_Fournisseurs[product_id]
else:
Delai=produce_delay
Code=Code+\
'<br />'+\
designation+'<br />'+\
"{0:10.0f}".format(is_stock_secu)+' / '+\
"{0:10.0f}".format(Delai)+' / '+\
"{0:10.0f}".format(lot_mini)+' / '+\
"{0:10.0f}".format(multiple)+' / '+\
"{0:10.0f}".format(StockA)+' / '+\
"{0:10.0f}".format(StockQ)
if Cle not in Tab:
Tab[Cle]={}
Tab[Cle]["Code"] = Code
#** Les chiffres permettent de trier les lignes ****************
t={
"ferme" : "10-CF",
"previsionnel" : "20-CP",
"FL" : "30-FL",
"FM" : "40-FM",
"SF" : "50-SF",
"ft" : "60-FT",
"fs" : "70-FS",
"sa" : "80-SA",
"stock" : "99-Stock",
}
Tab[Cle]["TYPE"] = typeod
if typeod in t:
Tab[Cle]["TYPE"] = t[typeod]
#***************************************************************
#** Permet de déterminer le sens dans le calcul du stock *******
t={
"ferme" : -1,
"previsionnel" : -1,
"FL" : 1,
"FM" : -1,
"SF" : 1,
"fs" : 1,
"ft" : -1,
"sa" : 1,
"stock" : 1,
}
Sens=1
if typeod in t:
Sens = t[typeod]
#***************************************************************
if calage=='' or calage=='Date de fin':
DateLundi=self.datelundi(date_fin, TabSemaines)
else:
DateLundi=self.datelundi(date_debut, TabSemaines)
if typeod=='FL' and qt<0:
qt=-0.01
if DateLundi not in Tab[Cle]:
Tab[Cle][DateLundi]=0
Tab[Cle][DateLundi]=Tab[Cle][DateLundi]+round(Sens*qt,2);
if DateLundi+'OT' not in Tab[Cle]:
Tab[Cle][DateLundi+'OT']=''
Tab[Cle][DateLundi+'OT']=Tab[Cle][DateLundi+'OT']+str(numod)+","
if DateLundi+'INFO' not in Tab[Cle]:
Tab[Cle][DateLundi+'INFO']=''
Tab[Cle][DateLundi+'INFO']=Tab[Cle][DateLundi+'INFO']+name+" : "+str(round(qt,2))+'<br />'
#** Calcul du stock theorique **********************************
Cle=code_pg+'90-Stock'
if Cle not in Tab:
Tab[Cle]={}
Tab[Cle]['Code'] = Code
Tab[Cle]['TYPE'] = '90-Stock'
StockSecu=0
if product_id in StocksSecu:
StockSecu=StocksSecu[product_id]
Tab[Cle][0]=StockA-StockSecu
if DateLundi not in Tab[Cle]:
Tab[Cle][DateLundi]=0
Tab[Cle][DateLundi]=Tab[Cle][DateLundi]+round(Sens*qt,2);
#***************************************************************
#** Valorisation stock *****************************************
if valorisation:
Cout=0
if product_id in Couts:
Cout=Couts[product_id]
Cle1=code_pg+u'90-Stock'
Cle2=code_pg+u'92-Stock Valorisé'
if Cle2 not in Tab:
Tab[Cle2]={}
Tab[Cle2]['Code'] = Code
Tab[Cle2]['TYPE']=u'92-Stock Valorisé'
Tab[Cle2][0]=Tab[Cle1][0]*Cout
Tab[Cle2][DateLundi]=Tab[Cle1][DateLundi]*Cout
#***************************************************************
return Tab
@api.model
def datelundi(self,date,TabSemaines):
if date=='':
return TabSemaines[0]
date=date[:10]
date=datetime.datetime.strptime(date, '%Y-%m-%d')
jour=date.weekday()
date = date - datetime.timedelta(days=jour)
date = date.strftime('%Y%m%d')
if date<TabSemaines[0]:
date=TabSemaines[0]
return date
@api.model
def color_cel(self,TypeOD,cel=0):
TypeOD=TypeOD[3:]
t={
"CF" : "DarkRed",
"CP" : "DarkGreen",
"FL" : "DarkMagenta",
"FM" : "#000000",
"SF" : "DarkMagenta",
"FS" : "DarkBlue",
"FT" : "#000000",
"SA" : "DarkBlue",
"Stock" : "Black",
}
color="Gray"
if TypeOD in t:
color=t[TypeOD]
if TypeOD=='Stock' and cel<0:
color='Red'
return color
| [
"[email protected]"
] | |
a88a1a1ab247a5f750af8f9e792c8ecee63957ab | 52a61caff0aeb434c32e5657e38762643e9f57dd | /Basics/TwoDimensionalLists(arrays)/is_matrix_symmetric.py | 98436819ed551b845d293a2907ee30e2f2c3f3d4 | [] | no_license | AndrewErmakov/PythonTrainingBasics | 1480a6378d1ec59884760e2b3014ccc3d28f058f | 639e15bbfc54da762cb9e366497754cfece30691 | refs/heads/master | 2021-06-10T15:57:58.682335 | 2021-03-25T13:37:30 | 2021-03-25T13:37:30 | 153,678,760 | 0 | 0 | null | 2018-10-30T13:52:51 | 2018-10-18T19:45:47 | Python | UTF-8 | Python | false | false | 288 | py | size_list = int(input())
numbers_list = [[int(j) for j in input().split()] for i in range(size_list)]
answer = "yes"
for i in range(size_list):
for j in range(size_list):
if numbers_list[i][j] != numbers_list[j][i]:
answer = "no"
break
print(answer)
| [
"[email protected]"
] | |
6d482b7fc6f41fd53b8ad4099680500cf4ef92cc | 0da9d2a15305421e224795cdf078838bd97eccc8 | /Algorithms/Strings/SeparateTheNumbers.py | 3ca2f20969d9e354e5da69e9dfad840c9e4c0624 | [] | no_license | LysanderGG/HackerRank | ac1300eea2f4e00f7d4e5084b5d570aa6fae0cfb | 039ec4414612cff84a941a7e7538fb36e10d427f | refs/heads/master | 2021-01-21T16:09:59.174131 | 2017-07-09T12:33:32 | 2017-07-09T12:33:32 | 91,877,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #!/bin/python3
import sys
# Build a string of length at least n starting with i
def build_str(i, n):
res = ""
while len(res) < n:
res += str(i)
i += 1
return res
def solve(s):
for i in range(1, len(s) // 2 + 1):
first = int(s[:i])
if build_str(first, len(s)) == s:
return "YES " + str(first)
return "NO"
q = int(input().strip())
for a0 in range(q):
s = input().strip()
print(solve(s))
| [
"[email protected]"
] | |
8ed9f49a0b93692d31d34f2bb953214484a2f5ff | 3bfa43cd86d1fb3780f594c181debc65708af2b8 | /cs61a/project/scheme/scheme.py.ref | b0c4bb4b736f02ade237131c0c93cc13cd381858 | [] | no_license | ninjaboynaru/my-python-demo | 2fdb6e75c88e07519d91ee8b0e650fed4a2f9a1d | d679a06a72e6dc18aed95c7e79e25de87e9c18c2 | refs/heads/master | 2022-11-06T14:05:14.848259 | 2020-06-21T20:10:05 | 2020-06-21T20:10:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,024 | ref | """A Scheme interpreter and its read-eval-print loop."""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env, tail=False): # Optional third argument is ignored
"""Evaluate Scheme expression EXPR in environment ENV.
>>> expr = read_line('(+ 2 2)')
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
4
"""
# Evaluate atoms
if scheme_symbolp(expr):
return env.lookup(expr)
elif self_evaluating(expr):
return expr
# All non-atomic expressions are lists (combinations)
if not scheme_listp(expr):
raise SchemeError('malformed list: {0}'.format(repl_str(expr)))
first, rest = expr.first, expr.second
if scheme_symbolp(first) and first in SPECIAL_FORMS:
return SPECIAL_FORMS[first](rest, env)
else:
# BEGIN PROBLEM 5
"*** YOUR CODE HERE ***"
operator = scheme_eval(first, env)
if isinstance(operator, MacroProcedure):
result = operator.apply_macro(rest, env)
return scheme_eval(result, env)
else:
args=rest.map(lambda function: scheme_eval(function, env))
return scheme_apply(operator, args, env)
# END PROBLEM 5
def self_evaluating(expr):
"""Return whether EXPR evaluates to itself."""
return scheme_atomp(expr) or scheme_stringp(expr) or expr is None
def scheme_apply(procedure, args, env):
"""Apply Scheme PROCEDURE to argument values ARGS (a Scheme list) in
environment ENV."""
check_procedure(procedure)
if isinstance(procedure, PrimitiveProcedure):
return procedure.apply(args, env)
else:
new_env = procedure.make_call_frame(args, env)
return eval_all(procedure.body, new_env)
def eval_all(expressions, env):
"""Evaluate each expression im the Scheme list EXPRESSIONS in
environment ENV and return the value of the last."""
# BEGIN PROBLEM 8
if expressions == nil:
return None
new_expressions = expressions
while new_expressions.second != nil:
scheme_eval(new_expressions.first, env)
new_expressions = new_expressions.second
return scheme_eval(new_expressions.first, env, True)
# END PROBLEM 8
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with parent frame PARENT (which may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return '<Global Frame>'
s = sorted(['{0}: {1}'.format(k, v) for k, v in self.bindings.items()])
return '<{{{0}}} -> {1}>'.format(', '.join(s), repr(self.parent))
def define(self, symbol, value):
"""Define Scheme SYMBOL to have VALUE."""
# BEGIN PROBLEM 3
"*** YOUR CODE HERE ***"
self.bindings[symbol] = value
# END PROBLEM 3
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found."""
# BEGIN PROBLEM 3
"*** YOUR CODE HERE ***"
if symbol in self.bindings:
return self.bindings[symbol]
elif self.parent != None:
return self.parent.lookup(symbol)
# END PROBLEM 3
raise SchemeError('unknown identifier: {0}'.format(symbol))
def make_child_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in a Scheme list of formal parameters FORMALS are bound to the Scheme
values in the Scheme list VALS. Raise an error if too many or too few
vals are given.
>>> env = create_global_frame()
>>> formals, expressions = read_line('(a b c)'), read_line('(1 2 3)')
>>> env.make_child_frame(formals, expressions)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
child = Frame(self) # Create a new child with self as the parent
# BEGIN PROBLEM 11
"*** YOUR CODE HERE ***"
if formals.__len__() != vals.__len__():
raise(SchemeError)
else:
f, v = formals, vals
while f != nil:
child.bindings[f.first] = v.first
f, v = f.second, v.second
# END PROBLEM 11
return child
##############
# Procedures #
##############
class Procedure:
"""The supertype of all Scheme procedures."""
def scheme_procedurep(x):
return isinstance(x, Procedure)
class PrimitiveProcedure(Procedure):
"""A Scheme procedure defined as a Python function."""
def __init__(self, fn, use_env=False, name='primitive'):
self.name = name
self.fn = fn
self.use_env = use_env
def __str__(self):
return '#[{0}]'.format(self.name)
def apply(self, args, env):
"""Apply SELF to ARGS in ENV, where ARGS is a Scheme list.
>>> env = create_global_frame()
>>> plus = env.bindings['+']
>>> twos = Pair(2, Pair(2, nil))
>>> plus.apply(twos, env)
4
"""
if not scheme_listp(args):
raise SchemeError('arguments are not in a list: {0}'.format(args))
# Convert a Scheme list to a Python list
python_args = []
while args is not nil:
python_args.append(args.first)
args = args.second
# BEGIN PROBLEM 4
"*** YOUR CODE HERE ***"
if self.use_env:
python_args.append(env)
try:
return self.fn(*python_args)
except:
raise SchemeError
# END PROBLEM 4
class LambdaProcedure(Procedure):
"""A procedure defined by a lambda expression or a define form."""
def __init__(self, formals, body, env):
"""A procedure with formal parameter list FORMALS (a Scheme list),
whose body is the Scheme list BODY, and whose parent environment
starts with Frame ENV."""
self.formals = formals
self.body = body
self.env = env
def make_call_frame(self, args, env):
"""Make a frame that binds my formal parameters to ARGS, a Scheme list
of values, for a lexically-scoped call evaluated in environment ENV."""
# BEGIN PROBLEM 12
"*** YOUR CODE HERE ***"
new_frame = env.make_child_frame(self.formals, args)
if hasattr(self, 'env'):
new_frame = self.env.make_child_frame(self.formals, args)
return new_frame
# END PROBLEM 12
def __str__(self):
return str(Pair('lambda', Pair(self.formals, self.body)))
def __repr__(self):
return 'LambdaProcedure({0}, {1}, {2})'.format(
repr(self.formals), repr(self.body), repr(self.env))
class MacroProcedure(LambdaProcedure):
"""A macro: a special form that operates on its unevaluated operands to
create an expression that is evaluated in place of a call."""
def apply_macro(self, operands, env):
"""Apply this macro to the operand expressions."""
return complete_apply(self, operands, env)
def add_primitives(frame, funcs_and_names):
"""Enter bindings in FUNCS_AND_NAMES into FRAME, an environment frame,
as primitive procedures. Each item in FUNCS_AND_NAMES has the form
(NAME, PYTHON-FUNCTION, INTERNAL-NAME)."""
for name, fn, proc_name in funcs_and_names:
frame.define(name, PrimitiveProcedure(fn, name=proc_name))
#################
# Special Forms #
#################
# Each of the following do_xxx_form functions takes the cdr of a special form as
# its first argument---a Scheme list representing a special form without the
# initial identifying symbol (if, lambda, quote, ...). Its second argument is
# the environment in which the form is to be evaluated.
def do_define_form(expressions, env):
"""Evaluate a define form."""
check_form(expressions, 2)
target = expressions.first
if scheme_symbolp(target):
check_form(expressions, 2, 2)
# BEGIN PROBLEM 6
"*** YOUR CODE HERE ***"
env.define(target, scheme_eval(expressions.second.first, env))
return target
# END PROBLEM 6
elif isinstance(target, Pair) and scheme_symbolp(target.first):
# BEGIN PROBLEM 10
"*** YOUR CODE HERE ***"
env.define(target.first, LambdaProcedure(target.second, expressions.second, env))
return target.first
# END PROBLEM 10
else:
bad_target = target.first if isinstance(target, Pair) else target
raise SchemeError('non-symbol: {0}'.format(bad_target))
def do_quote_form(expressions, env):
"""Evaluate a quote form."""
check_form(expressions, 1, 1)
# BEGIN PROBLEM 7
"*** YOUR CODE HERE ***"
return expressions.first
# END PROBLEM 7
def do_begin_form(expressions, env):
"""Evaluate a begin form."""
check_form(expressions, 1)
return eval_all(expressions, env)
def do_lambda_form(expressions, env):
"""Evaluate a lambda form."""
check_form(expressions, 2)
formals = expressions.first
check_formals(formals)
# BEGIN PROBLEM 9
"*** YOUR CODE HERE ***"
return LambdaProcedure(formals, expressions.second, env)
# END PROBLEM 9
def do_if_form(expressions, env):
"""Evaluate an if form."""
check_form(expressions, 2, 3)
if scheme_truep(scheme_eval(expressions.first, env)):
return scheme_eval(expressions.second.first, env, True)
elif len(expressions) == 3:
return scheme_eval(expressions.second.second.first, env, True)
def do_and_form(expressions, env):
"""Evaluate a (short-circuited) and form."""
# BEGIN PROBLEM 13
"*** YOUR CODE HERE ***"
if len(expressions) == 0:
return True
elif len(expressions) == 1:
return scheme_eval(expressions.first, env, True)
elif scheme_falsep(scheme_eval(expressions.first, env)):
return False
return do_and_form(expressions.second, env)
# END PROBLEM 13
def do_or_form(expressions, env):
"""Evaluate a (short-circuited) or form."""
# BEGIN PROBLEM 13
"*** YOUR CODE HERE ***"
if expressions is nil:
return False
if expressions.second is not nil:
current = scheme_eval(expressions.first, env)
if scheme_falsep(current):
return do_or_form(expressions.second, env)
else:
return current
else:
return scheme_eval(expressions.first, env, True)
# END PROBLEM 13
def do_cond_form(expressions, env):
"""Evaluate a cond form."""
while expressions is not nil:
clause = expressions.first
check_form(clause, 1)
if clause.first == 'else':
test = True
if expressions.second != nil:
raise SchemeError('else must be last')
else:
test = scheme_eval(clause.first, env)
if scheme_truep(test):
# BEGIN PROBLEM 14
"*** YOUR CODE HERE ***"
if clause.second == nil:
return test
return eval_all(clause.second, env)
# END PROBLEM 14
expressions = expressions.second
def do_let_form(expressions, env):
"""Evaluate a let form."""
check_form(expressions, 2)
let_env = make_let_frame(expressions.first, env)
return eval_all(expressions.second, let_env)
def make_let_frame(bindings, env):
"""Create a child frame of ENV that contains the definitions given in
BINDINGS. The Scheme list BINDINGS must have the form of a proper bindings
list in a let expression: each item must be a list containing a symbol
and a Scheme expression."""
if not scheme_listp(bindings):
raise SchemeError('bad bindings list in let form')
# BEGIN PROBLEM 15
"*** YOUR CODE HERE ***"
b = bindings
formals = nil
vals = nil
while b != nil:
check_form(b.first, 2, 2)
formals = Pair(b.first.first, formals)
vals = Pair(scheme_eval(b.first.second.first, env), vals)
b = b.second
check_formals(formals)
return env.make_child_frame(formals, vals)
# END PROBLEM 15
def do_define_macro(expressions, env):
"""Evaluate a define-macro form."""
# BEGIN Problem 21
"*** YOUR CODE HERE ***"
check_form(expressions, 2)
target = expressions.first
if isinstance(target, Pair):
name = expressions.first.first
body = expressions.second
if scheme_symbolp(name):
check_formals(target.second)
env.bindings[name] = MacroProcedure(target.second, body, env)
return name
raise SchemeError
# END Problem 21
SPECIAL_FORMS = {
'and': do_and_form,
'begin': do_begin_form,
'cond': do_cond_form,
'define': do_define_form,
'if': do_if_form,
'lambda': do_lambda_form,
'let': do_let_form,
'or': do_or_form,
'quote': do_quote_form,
'define-macro': do_define_macro,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max=float('inf')):
"""Check EXPR is a proper list whose length is at least MIN and no more
than MAX (default: no maximum). Raises a SchemeError if this is not the
case.
>>> check_form(read_line('(a b)'), 2)
"""
if not scheme_listp(expr):
raise SchemeError('badly formed expression: ' + repl_str(expr))
length = len(expr)
if length < min:
raise SchemeError('too few operands in form')
elif length > max:
raise SchemeError('too many operands in form')
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of
formals is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line('(a b c)'))
"""
symbols = set()
def check_and_add(symbol):
if not scheme_symbolp(symbol):
raise SchemeError('non-symbol: {0}'.format(symbol))
if symbol in symbols:
raise SchemeError('duplicate symbol: {0}'.format(symbol))
symbols.add(symbol)
while isinstance(formals, Pair):
check_and_add(formals.first)
formals = formals.second
def check_procedure(procedure):
"""Check that PROCEDURE is a valid Scheme procedure."""
if not scheme_procedurep(procedure):
raise SchemeError('{0} is not callable: {1}'.format(
type(procedure).__name__.lower(), repl_str(procedure)))
#################
# Dynamic Scope #
#################
class MuProcedure(Procedure):
"""A procedure defined by a mu expression, which has dynamic scope.
_________________
< Scheme is cool! >
-----------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
"""
def __init__(self, formals, body):
"""A procedure with formal parameter list FORMALS (a Scheme list) and
Scheme list BODY as its definition."""
self.formals = formals
self.body = body
# BEGIN PROBLEM 16
"*** YOUR CODE HERE ***"
def make_call_frame(self, args, env):
new_frame = env.make_child_frame(self.formals, args)
if hasattr(self, 'env'):
new_frame = self.env.make_child_frame(self.formals, args)
return new_frame
# END PROBLEM 16
def __str__(self):
return str(Pair('mu', Pair(self.formals, self.body)))
def __repr__(self):
return 'MuProcedure({0}, {1})'.format(
repr(self.formals), repr(self.body))
def do_mu_form(expressions, env):
"""Evaluate a mu form."""
check_form(expressions, 2)
formals = expressions.first
check_formals(formals)
# BEGIN PROBLEM 16
"*** YOUR CODE HERE ***"
return MuProcedure(formals, expressions.second)
# END PROBLEM 16
SPECIAL_FORMS['mu'] = do_mu_form
###########
# Streams #
###########
class Promise:
"""A promise."""
def __init__(self, expression, env):
self.expression = expression
self.env = env
def evaluate(self):
if self.expression is not None:
self.value = scheme_eval(self.expression, self.env.make_child_frame(nil, nil))
self.expression = None
return self.value
def __str__(self):
return '#[promise ({0}forced)]'.format(
'not ' if self.expression is not None else '')
def do_delay_form(expressions, env):
"""Evaluates a delay form."""
check_form(expressions, 1, 1)
return Promise(expressions.first, env)
def do_cons_stream_form(expressions, env):
"""Evaluate a cons-stream form."""
check_form(expressions, 2, 2)
return Pair(scheme_eval(expressions.first, env),
do_delay_form(expressions.second, env))
SPECIAL_FORMS['cons-stream'] = do_cons_stream_form
SPECIAL_FORMS['delay'] = do_delay_form
##################
# Tail Recursion #
##################
class Thunk:
"""An expression EXPR to be evaluated in environment ENV."""
def __init__(self, expr, env):
self.expr = expr
self.env = env
def complete_apply(procedure, args, env):
"""Apply procedure to args in env; ensure the result is not a Thunk."""
val = scheme_apply(procedure, args, env)
if isinstance(val, Thunk):
return scheme_eval(val.expr, val.env)
else:
return val
def optimize_tail_calls(original_scheme_eval):
"""Return a properly tail recursive version of an eval function."""
def optimized_eval(expr, env, tail=False):
"""Evaluate Scheme expression EXPR in environment ENV. If TAIL,
return a Thunk containing an expression for further evaluation.
"""
if tail and not scheme_symbolp(expr) and not self_evaluating(expr):
return Thunk(expr, env)
else:
result = Thunk(expr, env)
# BEGIN
"*** YOUR CODE HERE ***"
while isinstance(result, Thunk):
expr, env = result.expr, result.env
result = original_scheme_eval(expr, env)
return result
# END
return optimized_eval
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
scheme_eval = optimize_tail_calls(scheme_eval)
####################
# Extra Procedures #
####################
def scheme_map(fn, s, env):
check_type(fn, scheme_procedurep, 0, 'map')
check_type(s, scheme_listp, 1, 'map')
return s.map(lambda x: complete_apply(fn, Pair(x, nil), env))
def scheme_filter(fn, s, env):
check_type(fn, scheme_procedurep, 0, 'filter')
check_type(s, scheme_listp, 1, 'filter')
head, current = nil, nil
while s is not nil:
item, s = s.first, s.second
if complete_apply(fn, Pair(item, nil), env):
if head is nil:
head = Pair(item, nil)
current = head
else:
current.second = Pair(item, nil)
current = current.second
return head
def scheme_reduce(fn, s, env):
check_type(fn, scheme_procedurep, 0, 'reduce')
check_type(s, lambda x: x is not nil, 1, 'reduce')
check_type(s, scheme_listp, 1, 'reduce')
value, s = s.first, s.second
while s is not nil:
value = complete_apply(fn, scheme_list(value, s.first), env)
s = s.second
return value
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, interactive=False, quiet=False,
startup=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(filename, True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
print(repl_str(result))
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in getattr(err, 'args')[0]):
raise
elif isinstance(err, RuntimeError):
print('Error: maximum recursion depth exceeded')
else:
print('Error:', err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print()
print('KeyboardInterrupt')
if not interactive:
return
except EOFError: # <Control>-D, etc.
print()
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or
(SYM, QUIET, ENV). The file named SYM is loaded into environment ENV,
with verbosity determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
expressions = args[:-1]
raise SchemeError('"load" given incorrect number of arguments: '
'{0}'.format(len(expressions)))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = eval(sym)
check_type(sym, scheme_symbolp, 0, 'load')
with scheme_open(sym) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env, quiet=quiet)
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define('eval',
PrimitiveProcedure(scheme_eval, True, 'eval'))
env.define('apply',
PrimitiveProcedure(complete_apply, True, 'apply'))
env.define('load',
PrimitiveProcedure(scheme_load, True, 'load'))
env.define('procedure?',
PrimitiveProcedure(scheme_procedurep, False, 'procedure?'))
env.define('map',
PrimitiveProcedure(scheme_map, True, 'map'))
env.define('filter',
PrimitiveProcedure(scheme_filter, True, 'filter'))
env.define('reduce',
PrimitiveProcedure(scheme_reduce, True, 'reduce'))
env.define('undefined', None)
add_primitives(env, PRIMITIVES)
return env
@main
def run(*argv):
import argparse
parser = argparse.ArgumentParser(description='CS 61A Scheme Interpreter')
parser.add_argument('-load', '-i', action='store_true',
help='run file interactively')
parser.add_argument('file', nargs='?',
type=argparse.FileType('r'), default=None,
help='Scheme file to run')
args = parser.parse_args()
next_line = buffer_input
interactive = True
load_files = []
if args.file is not None:
if args.load:
load_files.append(getattr(args.file, 'name'))
else:
lines = args.file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick() | [
"[email protected]"
] | |
f8e180729c3092f31dd14b405f694eda6ea55dd0 | 549270020f6c8724e2ef1b12e38d11b025579f8d | /recipes/msgpack-c/all/conanfile.py | eac066163d08578e6216ffddb784ee899746d0ca | [
"MIT"
] | permissive | conan-io/conan-center-index | 1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43 | 3b17e69bb4e5601a850b6e006e44775e690bac33 | refs/heads/master | 2023-08-31T11:34:45.403978 | 2023-08-31T11:13:23 | 2023-08-31T11:13:23 | 204,671,232 | 844 | 1,820 | MIT | 2023-09-14T21:22:42 | 2019-08-27T09:43:58 | Python | UTF-8 | Python | false | false | 3,964 | py | from conan import ConanFile
from conan.tools.files import get, copy, rmdir, save
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.scm import Version
import os
import textwrap
required_conan_version = ">=1.53.0"
class MsgpackCConan(ConanFile):
name = "msgpack-c"
description = "MessagePack implementation for C"
license = "BSL-1.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/msgpack/msgpack-c"
topics = ("msgpack", "message-pack", "serialization")
package_type = "library"
settings = "os", "arch", "build_type", "compiler"
options = {
"fPIC": [True, False],
"shared": [True, False],
}
default_options = {
"fPIC": True,
"shared": False,
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["MSGPACK_ENABLE_SHARED"] = self.options.shared
tc.variables["MSGPACK_ENABLE_STATIC"] = not self.options.shared
tc.variables["MSGPACK_32BIT"] = self.settings.arch == "x86"
tc.variables["MSGPACK_BUILD_EXAMPLES"] = False
tc.cache_variables["MSGPACK_BUILD_TESTS"] = False
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="LICENSE_1_0.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
# TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"msgpackc": "msgpack::msgpack"}
)
def _create_cmake_module_alias_targets(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
save(self, module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "msgpack")
self.cpp_info.set_property("pkg_config_name", "msgpack")
if Version(self.version) < "6.0.0":
self.cpp_info.libs = ["msgpackc"]
self.cpp_info.set_property("cmake_target_name", "msgpackc")
else:
self.cpp_info.libs = ["msgpack-c"]
self.cpp_info.set_property("cmake_target_name", "msgpack-c")
# TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed
self.cpp_info.names["cmake_find_package"] = "msgpack"
self.cpp_info.names["cmake_find_package_multi"] = "msgpack"
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.names["pkg_config"] = "msgpack"
| [
"[email protected]"
] | |
9cbfa772353ca6774d58a800b97ac7adc0d3df4c | 3326ed6fa75623aca9f94242c06ba736af1fe1e4 | /src/qutip_symbolic/commutators.py | 0743e0d0810341df1aae9f59bfd53b74d568e58a | [
"BSD-3-Clause"
] | permissive | ZeroInfinite/qutip-symbolic | d443193981ea223c0ea0f9675288593f8371fc5e | bbc5d1e9f7928cd88e568140a6ff49bb060ce20d | refs/heads/master | 2023-05-01T12:52:52.037165 | 2021-05-19T14:34:47 | 2021-05-19T14:34:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py |
from .compat.commutator import Commutator
def recursive_commutator(a, b, n=1):
"""
Generate a recursive commutator of order n:
[a, b]_1 = [a, b]
[a, b]_2 = [a, [a, b]]
[a, b]_3 = [a, [a, b]_2] = [a, [a, [a, b]]]
...
"""
if n == 1:
return Commutator(a, b)
else:
return Commutator(a, recursive_commutator(a, b, n-1))
| [
"[email protected]"
] | |
e458a806a907109ff03c60bc02d49f659e96156e | 431dadb72b70ab4d604d6f7722e1554e151fda83 | /examples/predict_demo_by_seq.py | fe13d90be2ae34c639eaac6850b563f74b2408ea | [
"Apache-2.0"
] | permissive | smilelight/nymph | fe3c04fb9145bb16993d81791ac7d3fe0f0b7587 | c8da2211f7a8f58d1c6d327b243e419ed9e64ead | refs/heads/master | 2022-12-08T16:59:24.823290 | 2020-09-01T08:48:03 | 2020-09-01T08:48:03 | 286,218,355 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # -*- coding: utf-8 -*-
import os
import pandas as pd
from nymph.data import SeqDataset, split_dataset
from nymph.modules import SeqClassifier
project_path = os.path.abspath(os.path.join(__file__, '../../'))
data_path = os.path.join(project_path, r'data\test.csv')
save_path = 'demo_saves_seq'
def split_fn(dataset: list):
return list(range(len(dataset)+1))
if __name__ == '__main__':
# 读取数据
data = pd.read_csv(data_path)
# 构建分类器
classifier = SeqClassifier()
# 加载分类器
classifier.load(save_path)
# 构建数据集
seq_ds = SeqDataset(data, split_fn=split_fn, min_len=4)
# 预测模型
pred = classifier.predict(seq_ds)
print(pred)
# 获取各类别分类结果,并保存信息至文件中
classifier.report(seq_ds, 'seq_demo_report.csv')
# 对数据进行预测,并将数据和预测结果写入到新的文件中
classifier.summary(seq_ds, 'seq_demo_summary.csv')
| [
"[email protected]"
] | |
c271d3cf73452571a1c93a1185eb93f88ff3c1bf | 694c187c8a00bee8c670c1690170099bad9b16b3 | /palindrome.py | 220c4d3ad21d12c2a38242879e2e212ed2181a00 | [] | no_license | ajayvenkat10/Competitive | 301f220b6d296f7e34328f192c43c4d7ef208cb1 | 14f2ecebe10eb19f72cc412dd0c414b3b1de9b4d | refs/heads/master | 2022-11-20T14:31:33.590099 | 2020-07-23T15:39:14 | 2020-07-23T15:39:14 | 281,599,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | n=int(raw_input(""))
count=0
A=[]
a=raw_input("")
a=a.split()
for i in range(n):
b=int(a[i])
A.append(b)
while(len(A)>0):
count = count + pali(A)
print count
| [
"[email protected]"
] | |
96faca6dc627aa0ad4cd2cb5e017015d35a80cb7 | 1feae7286c5d61981b40520e2c1e1028b86bb8cc | /blog_newsapi/asgi.py | 05c058cfe761a8558228cfd03981b89811503c23 | [] | no_license | mukeshrakesh123/newsapp-blog | b97adb486f2d463e11fc054243833a2db6944865 | 596eac8981792fc368d2abfc4e19650332347f08 | refs/heads/main | 2023-06-16T11:44:10.665862 | 2021-07-10T16:34:50 | 2021-07-10T16:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for blog_newsapi project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_newsapi.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
909e293fb34797f5f75d7502c620042158498e08 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /sesv2_write_f/bulk-email_send.py | a349241910bf4f467effed81bbf205506059170e | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
"""
write_parameter("sesv2", "send-bulk-email") | [
"[email protected]"
] | |
b8bc971de51c0b9c9209b3f1c24ac435f7161b4c | c1d9dc8351241a3bd519a6f8ebc4531bfed2de6f | /backup/Python/152.py | 3c06c1daf0f1af89b7d26c587e3d1a7fae8027fa | [] | no_license | yichenluan/LeetCodeSolution | 3cf4b31e36f32c6b689b7b724c5cf57c3efb70bc | 26af13bbac60d656415bbba0c3bc7acbaa5a7d63 | refs/heads/master | 2021-05-23T06:05:07.183561 | 2020-10-20T09:11:41 | 2020-10-20T09:11:41 | 52,205,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
maxSoFar = nums[0]
maxEndingHere = nums[0]
minEndingHere = nums[0]
for i in xrange(1, len(nums)):
maxTemp = maxEndingHere
maxEndingHere = max(maxEndingHere * nums[i], nums[i], minEndingHere * nums[i])
minEndingHere = min(minEndingHere * nums[i], nums[i], maxTemp * nums[i])
maxSoFar = max(maxSoFar, maxEndingHere)
return maxSoFar
| [
"[email protected]"
] | |
89d77e01921b73651991e522906f13a394d8776d | a81d84fdb57e1b90812fc5b5b523685ba5b663c0 | /python/2021_08/Question1480.py | 2e09e851c818b1ea3b6986e1b3573c1a418ae62b | [] | no_license | KujouNozom/LeetCode | 1919081001126924daa7549493a0823702631a37 | 4de1e601274de1336d669e41f732a8cb056880b9 | refs/heads/master | 2023-07-17T12:17:45.156451 | 2021-09-04T11:57:40 | 2021-09-04T11:57:40 | 268,075,373 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # 1480. 一维数组的动态和 [前缀和]
from typing import List
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
ans = []
pre_sum = 0
for num in nums:
pre_sum += num
ans.append(pre_sum)
return ans
| [
"[email protected]"
] | |
85bd66f0c2cbda22e5141aa476470c6e2f8467c8 | 226b1c73a706f4734834196d18305d4d2c873589 | /synlib/llbin/synlibMsg.py | dd8afb0eb47864fd086bc66601a0086bcb225c2f | [] | no_license | ocakgun/vlsistuff | 43b4b07ae186b8d2360d11c57cd10b861e96bcbe | 776c07f5d0c40fe7d410b5c85e7381017d4dab64 | refs/heads/master | 2022-06-13T14:40:22.641310 | 2020-05-08T11:09:00 | 2020-05-08T11:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,131 | py | #! /usr/bin/python
import os,sys,string,pickle
import traceback,logs
import msgsim2c
Cells = {}
Types = {}
class cellClass:
def __init__(self,Name):
self.Name=Name
self.pins={}
self.busses={}
self.pairs={}
self.ff=0
self.latch=0
self.statetable=0
self.memory = 0
self.PinJobs={}
self.arcs=[]
def main():
if (len(sys.argv)>1):
Fname = sys.argv[1]
if os.path.exists(Fname):
os.system('/bin/rm lex.out db0.pickle')
os.system('llbin/synlib_lexer %s'%Fname)
os.system('llbin/synlibyacc.py lex.out')
os.system('llbin/simplify_pickle.py db0.pickle db1.pickle')
else:
print 'file cannot be read "%s"'%Fname
return
pythonConnection = '-python' in sys.argv
load_db0('db1.pickle')
dump_dump()
Top = ('Library',1)
LL = DataBase[Top]
Lib0 = match("library ( ?Lib ) { ?Items }",LL)
if Lib0:
Items = Lib0['Items']
scan_lib_items(Items)
else:
print 'failed library'
Fcc=open('msgsim_cells.ccc','w')
for Cell in Cells:
Cells[Cell].Types=Types
msgsim2c.cell_dump_msgsim_c(Cells[Cell],Fcc,pythonConnection)
Fcc.close()
Fcc=open('cell.descriptions','w')
for Cell in Cells:
Cells[Cell].Types=Types
msgsim2c.cell_dump_description(Cells[Cell],Fcc,pythonConnection)
Fcc.close()
def scan_lib_items(Items):
if len(Items[0])==4:
one_lib_item(Items)
else:
for Key in Items:
LL = DataBase[Key]
one_lib_item(LL)
def one_lib_item(LL):
Head = LL[0][0]
if Head in knownUselessKeys:
pass
elif (Head=='cell'):
deal_cell(LL)
elif (Head=='type'):
deal_type(LL)
else:
print 'unknown item',Head,LL
def deal_cell(LL):
Lb0 = match('cell ( ?Name ) { ?Items }',LL)
if not Lb0:
print 'bad cell %s'%str(LL)
return
Name = Lb0['Name']
Cells[Name]=cellClass(Name)
Items = Lb0['Items']
if len(Items[0])==4:
Lb2 = match('?Param : ?Val ;',Items)
if Lb2:
Cells[Name].pairs[Lb2['Param']]=Lb2['Val']
else:
print 'wrong for deal_cell '
return
for Key in Lb0['Items']:
LL2 = DataBase[Key]
Lb2 = match('?Param : ?Val ;',LL2)
taken=False
if Lb2:
taken=True
print Lb0['Name'],'cell item ',Lb2['Param'],Lb2['Val']
Cells[Name].pairs[Lb2['Param']]=Lb2['Val']
Lb2 = match('pin ( ?Pin ) { ?Items }',LL2)
if not Lb2:
Lb2 = match('pin ( ?Pin [ ?Ind ] ) { ?Items }',LL2)
if Lb2:
Pin = Lb2['Pin']
Ind = Lb2['Ind']
Lb2['Pin'] = '%s[%s]'%(Pin,Ind)
if Lb2:
taken=True
Pin = Lb2['Pin']
Cells[Name].pins[Pin]={}
Items = Lb2['Items']
if len(Items[0])==4:
Lb3 = match('?Param : ?Val ;',Items)
if Lb3:
Param,Val = (string.replace(Lb3['Param'],'"',''),string.replace(Lb3['Val'],'"',''))
Cells[Name].pins[Pin][Param]=Val
else:
for Thing in Lb2['Items']:
Lparam = getPair(Thing)
if len(Thing)==2:
LLx = DataBase[Thing]
else:
LLx=Thing
if Lparam:
Cells[Name].pins[Pin][Lparam[0]]=Lparam[1]
elif (LLx[0][0] in ['timing']):
LLy = DataBase[LLx[4]]
workOnTiming(Name,Pin,LLy)
elif (LLx[0][0] in knownUselessPinItems):
pass
else:
print Lb0['Name'],'cell pin unknown',Lb2['Pin'],LLx
Lb2 = match('bus ( ?Pin ) { ?Items }',LL2)
if Lb2:
taken=True
Bus = Lb2['Pin']
Cells[Name].busses[Bus]={}
for Thing in Lb2['Items']:
Lparam = getPair(Thing)
LLx = DataBase[Thing]
if Lparam:
Cells[Name].busses[Bus][Lparam[0]]=Lparam[1]
elif (LLx[0][0] in knownUselessPinItems):
pass
else:
print Lb0['Name'],'cell bus unknown',Lb2['Pin'],LLx
Lb2 = match('memory ( ) { ?Items }',LL2)
if Lb2:
taken=True
gather={}
for Thing in Lb2['Items']:
Lparam = getPair(Thing)
print Lb0['Name'],'cell memory',Lparam
gather[Lparam[0]]=Lparam[1]
Cells[Name].memory = gather
if LL2[0][0] in ['leakage_power','test_cell']:
taken=True
Lb2 = match('ff ( ?Toks ) { ?Pairs }',LL2)
if Lb2:
taken=True
Pairs = getPairs2(Lb2['Pairs'])
Toks = getToks(Lb2['Toks'])
Cells[Name].ff=(Toks,Pairs)
Lb2 = match('latch ( ?Toks ) { ?Pairs }',LL2)
if Lb2:
taken=True
Pairs = getPairs2(Lb2['Pairs'])
Toks = getToks(Lb2['Toks'])
Cells[Name].latch=(Toks,Pairs)
Lb2 = match('statetable ( ?Exprs ) { ?Pair }',LL2)
if Lb2:
taken=True
Pair = Lb2['Pair']
Lines = Pair[2][0]
Toks = getToks(Lb2['Exprs'])
print 'pair',Lines,'toks',Toks
Cells[Name].statetable=(Toks,Lines)
if (not taken):
print 'cell item unknown ',LL2
def workOnTiming(Name,Pin,LLy):
Pairs = getPairs(LLy)
if 'related_pin' in Pairs:
Who = Pairs['related_pin']
if 'timing_type' in Pairs:
Type = Pairs['timing_type']
else:
Type = 'combi'
Tuple = (Pin,Who,Type)
if Tuple not in Cells[Name].arcs:
Cells[Name].arcs.append(Tuple)
def getToks(List):
res=[]
for XX in List:
Tok = string.replace(XX[0],'"','')
if Tok !=',':
res.append(Tok)
return res
knownUselessPinItems = string.split('internal_power timing fall_transition rise_transition cell_rise cell_fall rise_constraint fall_constraint')
def getPair(Key):
if Key in DataBase:
LL2 = DataBase[Key]
Lb2 = match('?Param : ?Val ;',LL2)
if Lb2:
return (string.replace(Lb2['Param'],'"',''),string.replace(Lb2['Val'],'"',''))
return False
def getPairs(List):
res = {}
for Thing in List:
Lparam = getPair(Thing)
LLx = DataBase[Thing]
if Lparam:
res[Lparam[0]] = string.replace(Lparam[1],'"','')
elif (LLx[0][0] in knownUselessPinItems):
pass
else:
print 'error! get pair got %s lparam=%s llx=%s'%(str(Thing),Lparam,LLx)
return res
def getPairs2(List):
res = []
for Thing in List:
Lparam = getPair(Thing)
LLx = DataBase[Thing]
if Lparam:
Tok = string.replace(Lparam[1],'"','')
res.append((Lparam[0],Tok))
elif (LLx[0][0] in knownUselessPinItems):
pass
else:
print 'error! get pair got %s'%str(Thing)
return res
def deal_type(LL):
Lb0 = match('type ( ?Name ) { ?Pairs }',LL)
Type = Lb0['Name']
Types[Type]={}
for Key in Lb0['Pairs']:
LL2 = DataBase[Key]
Lb2 = match('?Param : ?Val ;',LL2)
if Lb2:
# print Lb0['Name'],'type lb2',Lb2['Param'],Lb2['Val']
Types[Type][Lb2['Param']]=Lb2['Val']
else:
print 'bad type thing',LL2
def load_db0(Fname):
global DataBase
File = open(Fname)
DataBase = pickle.load(File)
def match(Pattern,List):
if (len(List)==2)and(List in DataBase):
return match(Pattern,DataBase[List])
Words = string.split(Pattern)
if len(Words)!=len(List): return False
res={}
for ind,Key in enumerate(Words):
Act = List[ind]
if (Key==Act[0]):
pass
elif(Key[0]=='?'):
if len(Act)==2:
res[Key[1:]]=DataBase[Act]
elif len(Act)==4:
res[Key[1:]]=Act[0]
else:
print 'error!'
return 0
else:
return 0
return res
def dump_dump():
Fout = open('ttt.ttt','w')
for Key in DataBase:
Fout.write('key %s = %s\n'%(Key,DataBase[Key]))
Fout.close()
knownUselessKeys = string.split('''
default_intrinsic_fall default_inout_pin_fall_res default_intrinsic_rise default_slope_rise
default_output_pin_fall_res default_slope_fall default_inout_pin_rise_res default_output_pin_rise_res
delay_model revision date comment time_unit
voltage_unit current_unit leakage_power_unit nom_process
nom_temperature nom_voltage capacitive_load_unit pulling_resistance_unit
default_cell_leakage_power default_fanout_load default_inout_pin_cap default_input_pin_cap
default_output_pin_cap default_max_transition default_leakage_power_density slew_derate_from_library
slew_lower_threshold_pct_fall slew_upper_threshold_pct_fall slew_lower_threshold_pct_rise slew_upper_threshold_pct_rise
input_threshold_pct_fall input_threshold_pct_rise output_threshold_pct_fall output_threshold_pct_rise
k_process_cell_fall k_process_cell_leakage_power k_process_cell_rise k_process_fall_transition
k_process_hold_fall k_process_hold_rise k_process_internal_power k_process_min_pulse_width_high
k_process_min_pulse_width_low k_process_pin_cap k_process_recovery_fall k_process_recovery_rise
k_process_rise_transition k_process_setup_fall k_process_setup_rise k_process_wire_cap
k_process_wire_res k_temp_cell_fall k_temp_cell_rise k_temp_hold_fall
k_temp_hold_rise k_temp_min_pulse_width_high k_temp_min_pulse_width_low k_temp_min_period
k_temp_rise_propagation k_temp_fall_propagation k_temp_rise_transition k_temp_fall_transition
k_temp_recovery_fall k_temp_recovery_rise k_temp_setup_fall k_temp_setup_rise
k_volt_cell_fall k_volt_cell_rise k_volt_hold_fall k_volt_hold_rise
k_volt_min_pulse_width_high k_volt_min_pulse_width_low k_volt_min_period k_volt_rise_propagation
k_volt_fall_propagation k_volt_rise_transition k_volt_fall_transition k_volt_recovery_fall
k_volt_recovery_rise k_volt_setup_fall k_volt_setup_rise operating_conditions
default_operating_conditions wire_load output_voltage input_voltage
input_voltage lu_table_template lu_table_template lu_table_template
power_lut_template library_features technology simulation define_cell_area
define_cell_area wire_load_selection wire_load_selection default_wire_load_selection default_wire_load
default_wire_load_mode in_place_swap_mode default_max_fanout k_volt_cell_leakage_power
k_temp_cell_leakage_power k_volt_internal_power k_temp_internal_power
k_temp_wire_cap k_volt_wire_cap k_temp_wire_res k_volt_wire_res k_temp_pin_cap k_volt_pin_cap
voltage_map
''')
main()
| [
"[email protected]"
] | |
baf5f510f103558e54208fd4851c7324a7084c61 | 2f219acf03442e2aa502cd8fffce02f3c1118298 | /Py2D/Py2D_projectBuilder.py | cce1aec2b30db8faf14bf3a0ec396d22613990c0 | [] | no_license | AFlyingCar/Py2D | 27a5ec03c015de16533978315d654c338d9fa4f7 | c06d95ac0f716598a7be2af0d7cfaaeebd189bbc | refs/heads/master | 2020-12-24T06:04:24.821244 | 2016-07-05T04:00:18 | 2016-07-05T04:00:18 | 30,786,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | #########################
# AFlyingCar #
# 2/19/15 #
# Py2D Project Builder #
#########################
import shutil,errno,os
PY2D_INSTALLATION_PATH = "C:\\Program Files\\Py2D"
# PY2D_INSTALLATION_PATH = ".\\Py2D" # Use to build with lastet build rather than the recommended one
def copyFiles(source,target):
try:
shutil.copytree(source,target)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(source,target)
else:
raise
def buildProject():
verifyPy2DInstallation()
path = raw_input("Full path to the project: ")
bin_path = os.path.join(path,"bin\\Py2D")
resource_path = os.path.join(path,"resources")
config_path = os.path.join(path,"Settings")
if not os.path.exists(resource_path):
os.makedirs(resource_path)
if not os.path.exists(config_path):
os.makedirs(config_path)
open(bin_path[:4] + "__init__.py",'w').write("")
try:
copyFiles(PY2D_INSTALLATION_PATH,bin_path)
except WindowsError as e:
print "Files already copied. Skipping."
def verifyPy2DInstallation():
if not os.path.exists(PY2D_INSTALLATION_PATH):
print "Unable to find valid copy of Py2D. Please check that it properly installed in %s." % PY2D_INSTALLATION_PATH
raise OSError("ERROR - Py2D not installed.")
if __name__ == '__main__':
buildProject() | [
"[email protected]"
] | |
f7b3500ae91f51a525d53781d284efa1a7bfc990 | 72e11a80587342b3f278d4df18406cd4ce7531e8 | /dulwich/porcelain.py | c0646f2a1aeeb047e6eaace836bf41384d626691 | [] | no_license | EnjoyLifeFund/Debian_py36_packages | 740666f290cef73a4f634558ccf3fd4926addeda | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | refs/heads/master | 2021-08-24T02:17:24.349195 | 2017-12-06T06:18:35 | 2017-12-06T06:18:35 | 113,167,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,916 | py | # porcelain.py -- Porcelain-like layer on top of Dulwich
# Copyright (C) 2013 Jelmer Vernooij <[email protected]>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Simple wrapper that provides porcelain-like functions on top of Dulwich.
Currently implemented:
* archive
* add
* branch{_create,_delete,_list}
* check-ignore
* checkout
* clone
* commit
* commit-tree
* daemon
* diff-tree
* fetch
* init
* ls-remote
* ls-tree
* pull
* push
* rm
* remote{_add}
* receive-pack
* reset
* rev-list
* tag{_create,_delete,_list}
* upload-pack
* update-server-info
* status
* symbolic-ref
These functions are meant to behave similarly to the git subcommands.
Differences in behaviour are considered bugs.
"""
from collections import namedtuple
from contextlib import (
closing,
contextmanager,
)
from io import BytesIO
import os
import posixpath
import stat
import sys
import time
from dulwich.archive import (
tar_stream,
)
from dulwich.client import (
get_transport_and_path,
)
from dulwich.config import (
StackedConfig,
)
from dulwich.diff_tree import (
CHANGE_ADD,
CHANGE_DELETE,
CHANGE_MODIFY,
CHANGE_RENAME,
CHANGE_COPY,
RENAME_CHANGE_TYPES,
)
from dulwich.errors import (
SendPackError,
UpdateRefsError,
)
from dulwich.ignore import IgnoreFilterManager
from dulwich.index import (
blob_from_path_and_stat,
get_unstaged_changes,
)
from dulwich.object_store import (
tree_lookup_path,
)
from dulwich.objects import (
Commit,
Tag,
format_timezone,
parse_timezone,
pretty_format_tree_entry,
)
from dulwich.objectspec import (
parse_commit,
parse_object,
parse_ref,
parse_reftuples,
parse_tree,
)
from dulwich.pack import (
write_pack_index,
write_pack_objects,
)
from dulwich.patch import write_tree_diff
from dulwich.protocol import (
Protocol,
ZERO_SHA,
)
from dulwich.refs import ANNOTATED_TAG_SUFFIX
from dulwich.repo import (BaseRepo, Repo)
from dulwich.server import (
FileSystemBackend,
TCPGitServer,
ReceivePackHandler,
UploadPackHandler,
update_server_info as server_update_server_info,
)
# Module level tuple definition for status output
GitStatus = namedtuple('GitStatus', 'staged unstaged untracked')
default_bytes_out_stream = getattr(sys.stdout, 'buffer', sys.stdout)
default_bytes_err_stream = getattr(sys.stderr, 'buffer', sys.stderr)
DEFAULT_ENCODING = 'utf-8'
class RemoteExists(Exception):
"""Raised when the remote already exists."""
def open_repo(path_or_repo):
"""Open an argument that can be a repository or a path for a repository."""
if isinstance(path_or_repo, BaseRepo):
return path_or_repo
return Repo(path_or_repo)
@contextmanager
def _noop_context_manager(obj):
"""Context manager that has the same api as closing but does nothing."""
yield obj
def open_repo_closing(path_or_repo):
"""Open an argument that can be a repository or a path for a repository.
returns a context manager that will close the repo on exit if the argument
is a path, else does nothing if the argument is a repo.
"""
if isinstance(path_or_repo, BaseRepo):
return _noop_context_manager(path_or_repo)
return closing(Repo(path_or_repo))
def path_to_tree_path(repopath, path):
"""Convert a path to a path usable in e.g. an index.
:param repo: Repository
:param path: A path
:return: A path formatted for use in e.g. an index
"""
os.path.relpath(path, repopath)
if os.path.sep != '/':
path = path.replace(os.path.sep, '/')
return path.encode(sys.getfilesystemencoding())
def archive(repo, committish=None, outstream=default_bytes_out_stream,
errstream=default_bytes_err_stream):
"""Create an archive.
:param repo: Path of repository for which to generate an archive.
:param committish: Commit SHA1 or ref to use
:param outstream: Output stream (defaults to stdout)
:param errstream: Error stream (defaults to stderr)
"""
if committish is None:
committish = "HEAD"
with open_repo_closing(repo) as repo_obj:
c = repo_obj[committish]
for chunk in tar_stream(
repo_obj.object_store, repo_obj.object_store[c.tree],
c.commit_time):
outstream.write(chunk)
def update_server_info(repo="."):
"""Update server info files for a repository.
:param repo: path to the repository
"""
with open_repo_closing(repo) as r:
server_update_server_info(r)
def symbolic_ref(repo, ref_name, force=False):
"""Set git symbolic ref into HEAD.
:param repo: path to the repository
:param ref_name: short name of the new ref
:param force: force settings without checking if it exists in refs/heads
"""
with open_repo_closing(repo) as repo_obj:
ref_path = b'refs/heads/' + ref_name
if not force and ref_path not in repo_obj.refs.keys():
raise ValueError('fatal: ref `%s` is not a ref' % ref_name)
repo_obj.refs.set_symbolic_ref(b'HEAD', ref_path)
def commit(repo=".", message=None, author=None, committer=None):
"""Create a new commit.
:param repo: Path to repository
:param message: Optional commit message
:param author: Optional author name and email
:param committer: Optional committer name and email
:return: SHA1 of the new commit
"""
# FIXME: Support --all argument
# FIXME: Support --signoff argument
with open_repo_closing(repo) as r:
return r.do_commit(message=message, author=author, committer=committer)
def commit_tree(repo, tree, message=None, author=None, committer=None):
"""Create a new commit object.
:param repo: Path to repository
:param tree: An existing tree object
:param author: Optional author name and email
:param committer: Optional committer name and email
"""
with open_repo_closing(repo) as r:
return r.do_commit(
message=message, tree=tree, committer=committer, author=author)
def init(path=".", bare=False):
"""Create a new git repository.
:param path: Path to repository.
:param bare: Whether to create a bare repository.
:return: A Repo instance
"""
if not os.path.exists(path):
os.mkdir(path)
if bare:
return Repo.init_bare(path)
else:
return Repo.init(path)
def clone(source, target=None, bare=False, checkout=None,
errstream=default_bytes_err_stream, outstream=None,
origin=b"origin"):
"""Clone a local or remote git repository.
:param source: Path or URL for source repository
:param target: Path to target repository (optional)
:param bare: Whether or not to create a bare repository
:param checkout: Whether or not to check-out HEAD after cloning
:param errstream: Optional stream to write progress to
:param outstream: Optional stream to write progress to (deprecated)
:param origin: Name of remote from the repository used to clone
:return: The new repository
"""
if outstream is not None:
import warnings
warnings.warn(
"outstream= has been deprecated in favour of errstream=.",
DeprecationWarning, stacklevel=3)
errstream = outstream
if checkout is None:
checkout = (not bare)
if checkout and bare:
raise ValueError("checkout and bare are incompatible")
config = StackedConfig.default()
client, host_path = get_transport_and_path(source, config=config)
if target is None:
target = host_path.split("/")[-1]
if not os.path.exists(target):
os.mkdir(target)
if bare:
r = Repo.init_bare(target)
else:
r = Repo.init(target)
try:
remote_refs = client.fetch(
host_path, r, determine_wants=r.object_store.determine_wants_all,
progress=errstream.write)
r.refs.import_refs(
b'refs/remotes/' + origin,
{n[len(b'refs/heads/'):]: v for (n, v) in remote_refs.items()
if n.startswith(b'refs/heads/')})
r.refs.import_refs(
b'refs/tags',
{n[len(b'refs/tags/'):]: v for (n, v) in remote_refs.items()
if n.startswith(b'refs/tags/') and
not n.endswith(ANNOTATED_TAG_SUFFIX)})
target_config = r.get_config()
if not isinstance(source, bytes):
source = source.encode(DEFAULT_ENCODING)
target_config.set((b'remote', origin), b'url', source)
target_config.set(
(b'remote', origin), b'fetch',
b'+refs/heads/*:refs/remotes/' + origin + b'/*')
target_config.write_to_path()
if checkout and not bare:
# TODO(jelmer): Support symref capability,
# https://github.com/jelmer/dulwich/issues/485
try:
head = r[remote_refs[b"HEAD"]]
except KeyError:
pass
else:
r[b'HEAD'] = head.id
errstream.write(b'Checking out ' + head.id + b'\n')
r.reset_index(head.tree)
except BaseException:
r.close()
raise
return r
def add(repo=".", paths=None):
"""Add files to the staging area.
:param repo: Repository for the files
:param paths: Paths to add. No value passed stages all modified files.
:return: Tuple with set of added files and ignored files
"""
ignored = set()
with open_repo_closing(repo) as r:
ignore_manager = IgnoreFilterManager.from_repo(r)
if not paths:
paths = list(
get_untracked_paths(os.getcwd(), r.path, r.open_index()))
relpaths = []
if not isinstance(paths, list):
paths = [paths]
for p in paths:
relpath = os.path.relpath(p, r.path)
if relpath.startswith('../'):
raise ValueError('path %r is not in repo' % relpath)
# FIXME: Support patterns, directories.
if ignore_manager.is_ignored(relpath):
ignored.add(relpath)
continue
relpaths.append(relpath)
r.stage(relpaths)
return (relpaths, ignored)
def remove(repo=".", paths=None, cached=False):
"""Remove files from the staging area.
:param repo: Repository for the files
:param paths: Paths to remove
"""
with open_repo_closing(repo) as r:
index = r.open_index()
for p in paths:
full_path = os.path.abspath(p).encode(sys.getfilesystemencoding())
tree_path = path_to_tree_path(r.path, p)
try:
index_sha = index[tree_path].sha
except KeyError:
raise Exception('%s did not match any files' % p)
if not cached:
try:
st = os.lstat(full_path)
except OSError:
pass
else:
try:
blob = blob_from_path_and_stat(full_path, st)
except IOError:
pass
else:
try:
committed_sha = tree_lookup_path(
r.__getitem__, r[r.head()].tree, tree_path)[1]
except KeyError:
committed_sha = None
if blob.id != index_sha and index_sha != committed_sha:
raise Exception(
'file has staged content differing '
'from both the file and head: %s' % p)
if index_sha != committed_sha:
raise Exception(
'file has staged changes: %s' % p)
os.remove(full_path)
del index[tree_path]
index.write()
rm = remove
def commit_decode(commit, contents, default_encoding=DEFAULT_ENCODING):
if commit.encoding is not None:
return contents.decode(commit.encoding, "replace")
return contents.decode(default_encoding, "replace")
def print_commit(commit, decode, outstream=sys.stdout):
"""Write a human-readable commit log entry.
:param commit: A `Commit` object
:param outstream: A stream file to write to
"""
outstream.write("-" * 50 + "\n")
outstream.write("commit: " + commit.id.decode('ascii') + "\n")
if len(commit.parents) > 1:
outstream.write(
"merge: " +
"...".join([c.decode('ascii') for c in commit.parents[1:]]) + "\n")
outstream.write("Author: " + decode(commit.author) + "\n")
if commit.author != commit.committer:
outstream.write("Committer: " + decode(commit.committer) + "\n")
time_tuple = time.gmtime(commit.author_time + commit.author_timezone)
time_str = time.strftime("%a %b %d %Y %H:%M:%S", time_tuple)
timezone_str = format_timezone(commit.author_timezone).decode('ascii')
outstream.write("Date: " + time_str + " " + timezone_str + "\n")
outstream.write("\n")
outstream.write(decode(commit.message) + "\n")
outstream.write("\n")
def print_tag(tag, decode, outstream=sys.stdout):
"""Write a human-readable tag.
:param tag: A `Tag` object
:param decode: Function for decoding bytes to unicode string
:param outstream: A stream to write to
"""
outstream.write("Tagger: " + decode(tag.tagger) + "\n")
outstream.write("Date: " + decode(tag.tag_time) + "\n")
outstream.write("\n")
outstream.write(decode(tag.message) + "\n")
outstream.write("\n")
def show_blob(repo, blob, decode, outstream=sys.stdout):
"""Write a blob to a stream.
:param repo: A `Repo` object
:param blob: A `Blob` object
:param decode: Function for decoding bytes to unicode string
:param outstream: A stream file to write to
"""
outstream.write(decode(blob.data))
def show_commit(repo, commit, decode, outstream=sys.stdout):
"""Show a commit to a stream.
:param repo: A `Repo` object
:param commit: A `Commit` object
:param decode: Function for decoding bytes to unicode string
:param outstream: Stream to write to
"""
print_commit(commit, decode=decode, outstream=outstream)
if commit.parents:
parent_commit = repo[commit.parents[0]]
base_tree = parent_commit.tree
else:
base_tree = None
diffstream = BytesIO()
write_tree_diff(
diffstream,
repo.object_store, base_tree, commit.tree)
diffstream.seek(0)
outstream.write(
diffstream.getvalue().decode(
commit.encoding or DEFAULT_ENCODING, 'replace'))
def show_tree(repo, tree, decode, outstream=sys.stdout):
"""Print a tree to a stream.
:param repo: A `Repo` object
:param tree: A `Tree` object
:param decode: Function for decoding bytes to unicode string
:param outstream: Stream to write to
"""
for n in tree:
outstream.write(decode(n) + "\n")
def show_tag(repo, tag, decode, outstream=sys.stdout):
"""Print a tag to a stream.
:param repo: A `Repo` object
:param tag: A `Tag` object
:param decode: Function for decoding bytes to unicode string
:param outstream: Stream to write to
"""
print_tag(tag, decode, outstream)
show_object(repo, repo[tag.object[1]], outstream)
def show_object(repo, obj, decode, outstream):
return {
b"tree": show_tree,
b"blob": show_blob,
b"commit": show_commit,
b"tag": show_tag,
}[obj.type_name](repo, obj, decode, outstream)
def print_name_status(changes):
"""Print a simple status summary, listing changed files.
"""
for change in changes:
if not change:
continue
if isinstance(change, list):
change = change[0]
if change.type == CHANGE_ADD:
path1 = change.new.path
path2 = ''
kind = 'A'
elif change.type == CHANGE_DELETE:
path1 = change.old.path
path2 = ''
kind = 'D'
elif change.type == CHANGE_MODIFY:
path1 = change.new.path
path2 = ''
kind = 'M'
elif change.type in RENAME_CHANGE_TYPES:
path1 = change.old.path
path2 = change.new.path
if change.type == CHANGE_RENAME:
kind = 'R'
elif change.type == CHANGE_COPY:
kind = 'C'
yield '%-8s%-20s%-20s' % (kind, path1, path2)
def log(repo=".", paths=None, outstream=sys.stdout, max_entries=None,
reverse=False, name_status=False):
"""Write commit logs.
:param repo: Path to repository
:param paths: Optional set of specific paths to print entries for
:param outstream: Stream to write log output to
:param reverse: Reverse order in which entries are printed
:param name_status: Print name status
:param max_entries: Optional maximum number of entries to display
"""
with open_repo_closing(repo) as r:
walker = r.get_walker(
max_entries=max_entries, paths=paths, reverse=reverse)
for entry in walker:
def decode(x):
return commit_decode(entry.commit, x)
print_commit(entry.commit, decode, outstream)
if name_status:
outstream.writelines(
[l+'\n' for l in print_name_status(entry.changes())])
# TODO(jelmer): better default for encoding?
def show(repo=".", objects=None, outstream=sys.stdout,
default_encoding=DEFAULT_ENCODING):
"""Print the changes in a commit.
:param repo: Path to repository
:param objects: Objects to show (defaults to [HEAD])
:param outstream: Stream to write to
:param default_encoding: Default encoding to use if none is set in the
commit
"""
if objects is None:
objects = ["HEAD"]
if not isinstance(objects, list):
objects = [objects]
with open_repo_closing(repo) as r:
for objectish in objects:
o = parse_object(r, objectish)
if isinstance(o, Commit):
def decode(x):
return commit_decode(o, x, default_encoding)
else:
def decode(x):
return x.decode(default_encoding)
show_object(r, o, decode, outstream)
def diff_tree(repo, old_tree, new_tree, outstream=sys.stdout):
"""Compares the content and mode of blobs found via two tree objects.
:param repo: Path to repository
:param old_tree: Id of old tree
:param new_tree: Id of new tree
:param outstream: Stream to write to
"""
with open_repo_closing(repo) as r:
write_tree_diff(outstream, r.object_store, old_tree, new_tree)
def rev_list(repo, commits, outstream=sys.stdout):
"""Lists commit objects in reverse chronological order.
:param repo: Path to repository
:param commits: Commits over which to iterate
:param outstream: Stream to write to
"""
with open_repo_closing(repo) as r:
for entry in r.get_walker(include=[r[c].id for c in commits]):
outstream.write(entry.commit.id + b"\n")
def tag(*args, **kwargs):
import warnings
warnings.warn("tag has been deprecated in favour of tag_create.",
DeprecationWarning)
return tag_create(*args, **kwargs)
def tag_create(
repo, tag, author=None, message=None, annotated=False,
objectish="HEAD", tag_time=None, tag_timezone=None):
"""Creates a tag in git via dulwich calls:
:param repo: Path to repository
:param tag: tag string
:param author: tag author (optional, if annotated is set)
:param message: tag message (optional)
:param annotated: whether to create an annotated tag
:param objectish: object the tag should point at, defaults to HEAD
:param tag_time: Optional time for annotated tag
:param tag_timezone: Optional timezone for annotated tag
"""
with open_repo_closing(repo) as r:
object = parse_object(r, objectish)
if annotated:
# Create the tag object
tag_obj = Tag()
if author is None:
# TODO(jelmer): Don't use repo private method.
author = r._get_user_identity()
tag_obj.tagger = author
tag_obj.message = message
tag_obj.name = tag
tag_obj.object = (type(object), object.id)
if tag_time is None:
tag_time = int(time.time())
tag_obj.tag_time = tag_time
if tag_timezone is None:
# TODO(jelmer) Use current user timezone rather than UTC
tag_timezone = 0
elif isinstance(tag_timezone, str):
tag_timezone = parse_timezone(tag_timezone)
tag_obj.tag_timezone = tag_timezone
r.object_store.add_object(tag_obj)
tag_id = tag_obj.id
else:
tag_id = object.id
r.refs[b'refs/tags/' + tag] = tag_id
def list_tags(*args, **kwargs):
import warnings
warnings.warn("list_tags has been deprecated in favour of tag_list.",
DeprecationWarning)
return tag_list(*args, **kwargs)
def tag_list(repo, outstream=sys.stdout):
"""List all tags.
:param repo: Path to repository
:param outstream: Stream to write tags to
"""
with open_repo_closing(repo) as r:
tags = sorted(r.refs.as_dict(b"refs/tags"))
return tags
def tag_delete(repo, name):
"""Remove a tag.
:param repo: Path to repository
:param name: Name of tag to remove
"""
with open_repo_closing(repo) as r:
if isinstance(name, bytes):
names = [name]
elif isinstance(name, list):
names = name
else:
raise TypeError("Unexpected tag name type %r" % name)
for name in names:
del r.refs[b"refs/tags/" + name]
def reset(repo, mode, treeish="HEAD"):
"""Reset current HEAD to the specified state.
:param repo: Path to repository
:param mode: Mode ("hard", "soft", "mixed")
:param treeish: Treeish to reset to
"""
if mode != "hard":
raise ValueError("hard is the only mode currently supported")
with open_repo_closing(repo) as r:
tree = parse_tree(r, treeish)
r.reset_index(tree.id)
def push(repo, remote_location, refspecs,
outstream=default_bytes_out_stream,
errstream=default_bytes_err_stream):
"""Remote push with dulwich via dulwich.client
:param repo: Path to repository
:param remote_location: Location of the remote
:param refspecs: Refs to push to remote
:param outstream: A stream file to write output
:param errstream: A stream file to write errors
"""
# Open the repo
with open_repo_closing(repo) as r:
# Get the client and path
client, path = get_transport_and_path(
remote_location, config=r.get_config_stack())
selected_refs = []
def update_refs(refs):
selected_refs.extend(parse_reftuples(r.refs, refs, refspecs))
new_refs = {}
# TODO: Handle selected_refs == {None: None}
for (lh, rh, force) in selected_refs:
if lh is None:
new_refs[rh] = ZERO_SHA
else:
new_refs[rh] = r.refs[lh]
return new_refs
err_encoding = getattr(errstream, 'encoding', None) or DEFAULT_ENCODING
remote_location_bytes = client.get_url(path).encode(err_encoding)
try:
client.send_pack(
path, update_refs, r.object_store.generate_pack_contents,
progress=errstream.write)
errstream.write(
b"Push to " + remote_location_bytes + b" successful.\n")
except (UpdateRefsError, SendPackError) as e:
errstream.write(b"Push to " + remote_location_bytes +
b" failed -> " + e.message.encode(err_encoding) +
b"\n")
def pull(repo, remote_location=None, refspecs=None,
outstream=default_bytes_out_stream,
errstream=default_bytes_err_stream):
"""Pull from remote via dulwich.client
:param repo: Path to repository
:param remote_location: Location of the remote
:param refspec: refspecs to fetch
:param outstream: A stream file to write to output
:param errstream: A stream file to write to errors
"""
# Open the repo
with open_repo_closing(repo) as r:
if remote_location is None:
# TODO(jelmer): Lookup 'remote' for current branch in config
raise NotImplementedError(
"looking up remote from branch config not supported yet")
if refspecs is None:
refspecs = [b"HEAD"]
selected_refs = []
def determine_wants(remote_refs):
selected_refs.extend(
parse_reftuples(remote_refs, r.refs, refspecs))
return [remote_refs[lh] for (lh, rh, force) in selected_refs]
client, path = get_transport_and_path(
remote_location, config=r.get_config_stack())
remote_refs = client.fetch(
path, r, progress=errstream.write, determine_wants=determine_wants)
for (lh, rh, force) in selected_refs:
r.refs[rh] = remote_refs[lh]
if selected_refs:
r[b'HEAD'] = remote_refs[selected_refs[0][1]]
# Perform 'git checkout .' - syncs staged changes
tree = r[b"HEAD"].tree
r.reset_index(tree=tree)
def status(repo=".", ignored=False):
"""Returns staged, unstaged, and untracked changes relative to the HEAD.
:param repo: Path to repository or repository object
:param ignored: Whether to include ignoed files in `untracked`
:return: GitStatus tuple,
staged - list of staged paths (diff index/HEAD)
unstaged - list of unstaged paths (diff index/working-tree)
untracked - list of untracked, un-ignored & non-.git paths
"""
with open_repo_closing(repo) as r:
# 1. Get status of staged
tracked_changes = get_tree_changes(r)
# 2. Get status of unstaged
index = r.open_index()
unstaged_changes = list(get_unstaged_changes(index, r.path))
ignore_manager = IgnoreFilterManager.from_repo(r)
untracked_paths = get_untracked_paths(r.path, r.path, index)
if ignored:
untracked_changes = list(untracked_paths)
else:
untracked_changes = [
p for p in untracked_paths
if not ignore_manager.is_ignored(p)]
return GitStatus(tracked_changes, unstaged_changes, untracked_changes)
def get_untracked_paths(frompath, basepath, index):
"""Get untracked paths.
;param frompath: Path to walk
:param basepath: Path to compare to
:param index: Index to check against
"""
# If nothing is specified, add all non-ignored files.
for dirpath, dirnames, filenames in os.walk(frompath):
# Skip .git and below.
if '.git' in dirnames:
dirnames.remove('.git')
if dirpath != basepath:
continue
if '.git' in filenames:
filenames.remove('.git')
if dirpath != basepath:
continue
for filename in filenames:
ap = os.path.join(dirpath, filename)
ip = path_to_tree_path(basepath, ap)
if ip not in index:
yield os.path.relpath(ap, frompath)
def get_tree_changes(repo):
"""Return add/delete/modify changes to tree by comparing index to HEAD.
:param repo: repo path or object
:return: dict with lists for each type of change
"""
with open_repo_closing(repo) as r:
index = r.open_index()
# Compares the Index to the HEAD & determines changes
# Iterate through the changes and report add/delete/modify
# TODO: call out to dulwich.diff_tree somehow.
tracked_changes = {
'add': [],
'delete': [],
'modify': [],
}
try:
tree_id = r[b'HEAD'].tree
except KeyError:
tree_id = None
for change in index.changes_from_tree(r.object_store, tree_id):
if not change[0][0]:
tracked_changes['add'].append(change[0][1])
elif not change[0][1]:
tracked_changes['delete'].append(change[0][0])
elif change[0][0] == change[0][1]:
tracked_changes['modify'].append(change[0][0])
else:
raise AssertionError('git mv ops not yet supported')
return tracked_changes
def daemon(path=".", address=None, port=None):
"""Run a daemon serving Git requests over TCP/IP.
:param path: Path to the directory to serve.
:param address: Optional address to listen on (defaults to ::)
:param port: Optional port to listen on (defaults to TCP_GIT_PORT)
"""
# TODO(jelmer): Support git-daemon-export-ok and --export-all.
backend = FileSystemBackend(path)
server = TCPGitServer(backend, address, port)
server.serve_forever()
def web_daemon(path=".", address=None, port=None):
"""Run a daemon serving Git requests over HTTP.
:param path: Path to the directory to serve
:param address: Optional address to listen on (defaults to ::)
:param port: Optional port to listen on (defaults to 80)
"""
from dulwich.web import (
make_wsgi_chain,
make_server,
WSGIRequestHandlerLogger,
WSGIServerLogger)
backend = FileSystemBackend(path)
app = make_wsgi_chain(backend)
server = make_server(address, port, app,
handler_class=WSGIRequestHandlerLogger,
server_class=WSGIServerLogger)
server.serve_forever()
def upload_pack(path=".", inf=None, outf=None):
"""Upload a pack file after negotiating its contents using smart protocol.
:param path: Path to the repository
:param inf: Input stream to communicate with client
:param outf: Output stream to communicate with client
"""
if outf is None:
outf = getattr(sys.stdout, 'buffer', sys.stdout)
if inf is None:
inf = getattr(sys.stdin, 'buffer', sys.stdin)
path = os.path.expanduser(path)
backend = FileSystemBackend(path)
def send_fn(data):
outf.write(data)
outf.flush()
proto = Protocol(inf.read, send_fn)
handler = UploadPackHandler(backend, [path], proto)
# FIXME: Catch exceptions and write a single-line summary to outf.
handler.handle()
return 0
def receive_pack(path=".", inf=None, outf=None):
"""Receive a pack file after negotiating its contents using smart protocol.
:param path: Path to the repository
:param inf: Input stream to communicate with client
:param outf: Output stream to communicate with client
"""
if outf is None:
outf = getattr(sys.stdout, 'buffer', sys.stdout)
if inf is None:
inf = getattr(sys.stdin, 'buffer', sys.stdin)
path = os.path.expanduser(path)
backend = FileSystemBackend(path)
def send_fn(data):
outf.write(data)
outf.flush()
proto = Protocol(inf.read, send_fn)
handler = ReceivePackHandler(backend, [path], proto)
# FIXME: Catch exceptions and write a single-line summary to outf.
handler.handle()
return 0
def branch_delete(repo, name):
"""Delete a branch.
:param repo: Path to the repository
:param name: Name of the branch
"""
with open_repo_closing(repo) as r:
if isinstance(name, bytes):
names = [name]
elif isinstance(name, list):
names = name
else:
raise TypeError("Unexpected branch name type %r" % name)
for name in names:
del r.refs[b"refs/heads/" + name]
def branch_create(repo, name, objectish=None, force=False):
"""Create a branch.
:param repo: Path to the repository
:param name: Name of the new branch
:param objectish: Target object to point new branch at (defaults to HEAD)
:param force: Force creation of branch, even if it already exists
"""
with open_repo_closing(repo) as r:
if objectish is None:
objectish = "HEAD"
object = parse_object(r, objectish)
refname = b"refs/heads/" + name
if refname in r.refs and not force:
raise KeyError("Branch with name %s already exists." % name)
r.refs[refname] = object.id
def branch_list(repo):
"""List all branches.
:param repo: Path to the repository
"""
with open_repo_closing(repo) as r:
return r.refs.keys(base=b"refs/heads/")
def fetch(repo, remote_location, outstream=sys.stdout,
errstream=default_bytes_err_stream):
"""Fetch objects from a remote server.
:param repo: Path to the repository
:param remote_location: String identifying a remote server
:param outstream: Output stream (defaults to stdout)
:param errstream: Error stream (defaults to stderr)
:return: Dictionary with refs on the remote
"""
with open_repo_closing(repo) as r:
client, path = get_transport_and_path(
remote_location, config=r.get_config_stack())
remote_refs = client.fetch(path, r, progress=errstream.write)
return remote_refs
def ls_remote(remote):
"""List the refs in a remote.
:param remote: Remote repository location
:return: Dictionary with remote refs
"""
config = StackedConfig.default()
client, host_path = get_transport_and_path(remote, config=config)
return client.get_refs(host_path)
def repack(repo):
"""Repack loose files in a repository.
Currently this only packs loose objects.
:param repo: Path to the repository
"""
with open_repo_closing(repo) as r:
r.object_store.pack_loose_objects()
def pack_objects(repo, object_ids, packf, idxf, delta_window_size=None):
"""Pack objects into a file.
:param repo: Path to the repository
:param object_ids: List of object ids to write
:param packf: File-like object to write to
:param idxf: File-like object to write to (can be None)
"""
with open_repo_closing(repo) as r:
entries, data_sum = write_pack_objects(
packf,
r.object_store.iter_shas((oid, None) for oid in object_ids),
delta_window_size=delta_window_size)
if idxf is not None:
entries = sorted([(k, v[0], v[1]) for (k, v) in entries.items()])
write_pack_index(idxf, entries, data_sum)
def ls_tree(repo, treeish=b"HEAD", outstream=sys.stdout, recursive=False,
name_only=False):
"""List contents of a tree.
:param repo: Path to the repository
:param tree_ish: Tree id to list
:param outstream: Output stream (defaults to stdout)
:param recursive: Whether to recursively list files
:param name_only: Only print item name
"""
def list_tree(store, treeid, base):
for (name, mode, sha) in store[treeid].iteritems():
if base:
name = posixpath.join(base, name)
if name_only:
outstream.write(name + b"\n")
else:
outstream.write(pretty_format_tree_entry(name, mode, sha))
if stat.S_ISDIR(mode):
list_tree(store, sha, name)
with open_repo_closing(repo) as r:
tree = parse_tree(r, treeish)
list_tree(r.object_store, tree.id, "")
def remote_add(repo, name, url):
"""Add a remote.
:param repo: Path to the repository
:param name: Remote name
:param url: Remote URL
"""
if not isinstance(name, bytes):
name = name.encode(DEFAULT_ENCODING)
if not isinstance(url, bytes):
url = url.encode(DEFAULT_ENCODING)
with open_repo_closing(repo) as r:
c = r.get_config()
section = (b'remote', name)
if c.has_section(section):
raise RemoteExists(section)
c.set(section, b"url", url)
c.write_to_path()
def check_ignore(repo, paths, no_index=False):
"""Debug gitignore files.
:param repo: Path to the repository
:param paths: List of paths to check for
:param no_index: Don't check index
:return: List of ignored files
"""
with open_repo_closing(repo) as r:
index = r.open_index()
ignore_manager = IgnoreFilterManager.from_repo(r)
for path in paths:
if os.path.isabs(path):
path = os.path.relpath(path, r.path)
if not no_index and path_to_tree_path(r.path, path) in index:
continue
if ignore_manager.is_ignored(path):
yield path
def update_head(repo, target, detached=False, new_branch=None):
"""Update HEAD to point at a new branch/commit.
Note that this does not actually update the working tree.
:param repo: Path to the repository
:param detach: Create a detached head
:param target: Branch or committish to switch to
:param new_branch: New branch to create
"""
with open_repo_closing(repo) as r:
if new_branch is not None:
to_set = b"refs/heads/" + new_branch.encode(DEFAULT_ENCODING)
else:
to_set = b"HEAD"
if detached:
# TODO(jelmer): Provide some way so that the actual ref gets
# updated rather than what it points to, so the delete isn't
# necessary.
del r.refs[to_set]
r.refs[to_set] = parse_commit(r, target).id
else:
r.refs.set_symbolic_ref(to_set, parse_ref(r, target))
if new_branch is not None:
r.refs.set_symbolic_ref(b"HEAD", to_set)
| [
"[email protected]"
] | |
ec2ee291e8a5d13b909ff667fa3c265d94191d2a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_265/ch59_2020_09_06_16_20_08_424667.py | 85dfd925cd472b6628093ea4f0c3327fe9db1126 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | def asteriscos (n):
y=n*int('*')
return y
n= int
teste= asteriscos(n)
print (teste) | [
"[email protected]"
] | |
c042d866799b19a4c0a033cdd67920cd11b294f2 | 31698241ee3485d8b053da0d0da203c215a958f9 | /test.py | b087fd7b04be98ca7891c6e9c241bc700c1fa8db | [] | no_license | martin-martin/save-udacity-forum-posts | 06f588d875477e7579c8fafe50b07e2fd16820a2 | 7abcede5c288cfe430b8b47fc1d7e1736bebe774 | refs/heads/master | 2021-06-10T06:39:10.868435 | 2020-03-03T09:00:38 | 2020-03-03T09:00:38 | 146,982,630 | 0 | 0 | null | 2021-06-01T22:35:47 | 2018-09-01T08:45:11 | Python | UTF-8 | Python | false | false | 640 | py | # getting out the thread name to be used as file name
# testing on how to deal with the fact that there are thread *replies*
# that have a different URL structure to *original posts*
li = [
"https://discussions.udacity.com/t/logic-improvisation-needed/191902/2",
"https://discussions.udacity.com/t/so-you-want-to-post-some-code/33561"
]
# with number at the end (=reply)
print(len(li[0].split('/'))) # len = 7
# replies need the 3rd-last item
print(li[0].split('/')[-3])
# without number at the end (=original post)
print(len(li[1].split('/'))) # len = 6
# original posts need the 2nd-last item
print(li[1].split('/')[-2])
| [
"[email protected]"
] | |
3c40e6e237efe724302daafbcb9ecb8d7168cf24 | dc93174785fb64ca91264fa8dee50c0b0ce616c8 | /DeepFried2/criteria/__init__.py | 5316b9014cc0a8dfc8d9251a659b7883978485fe | [
"MIT"
] | permissive | elPistolero/DeepFried2 | 789ee2362f47a65efe6d4f64cf81657777ee12b3 | 56efebede5469de706071a5ba645b2b74b3adf3e | refs/heads/master | 2020-12-25T16:35:39.932731 | 2015-09-18T06:48:14 | 2015-09-18T06:48:14 | 40,975,386 | 0 | 0 | null | 2015-08-18T14:02:46 | 2015-08-18T14:02:45 | null | UTF-8 | Python | false | false | 143 | py | from .ClassNLLCriterion import ClassNLLCriterion
from .BCECriterion import BCECriterion
from .RMSECriterion import RMSECriterion, MSECriterion
| [
"[email protected]"
] | |
ce7496e15e265dd8728a7ca0b81b7d914ea7bd5f | e78f1c5347069cec56c42149a1d4de3103936ee7 | /quantum_gates/find_resonant_interactions.py | 3af82839e1fa720a4e958cb7590b7cd4a0762e8b | [] | no_license | twobackfromtheend/CircularStates | 8882627923bdfce42f72d0a9401206acd7043f47 | ab0471362b444620d48a902ac237caead3b18a8f | refs/heads/master | 2023-08-04T13:20:48.494564 | 2021-09-16T19:23:29 | 2021-09-16T19:23:29 | 323,437,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import numpy as np
from arc import StarkMapResonances, Rubidium
n = 51
n = 70
n = 90
n = 30
s = 0.5
calculation = StarkMapResonances(
Rubidium(),
[n, n - 1, n - 1 + s, n - 1 + s],
Rubidium(),
[n, n - 1, n - 1 + s, n - 1 + s],
)
n_buffer = 10
calculation.findResonances(
nMin=n - n_buffer, nMax=n + n_buffer, maxL=5000,
eFieldList=np.linspace(0, 100, 200),
# energyRange=[-0.8e9, 4.e9],
energyRange=[-10e9, 10.e9],
progressOutput=True,
)
calculation.showPlot()
| [
"[email protected]"
] | |
2131a1ba50f8f1a9a88ef10152942581cf16b5fa | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/iothub/azure-iot-deviceprovisioning/azure/iot/deviceprovisioning/_configuration.py | 92106ddf94ce39971ca1d644c17886ebe2376f08 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,143 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class DeviceProvisioningClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for DeviceProvisioningClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:keyword api_version: Api Version. Default value is "2021-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", **kwargs: Any) -> None:
super(DeviceProvisioningClientConfiguration, self).__init__(**kwargs)
api_version: str = kwargs.pop("api_version", "2021-10-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://azure-devices-provisioning.net/.default"])
kwargs.setdefault("sdk_moniker", "iot-deviceprovisioning/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| [
"[email protected]"
] | |
418464640708b824c506f3e90e4f9b8c4ff03368 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/program/qiskit/QC/startQiskit_QC67.py | a9d853e26832f658881463c5f7c58a05494423ab | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | # qubit number=2
# total number=7
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.y(input_qubit[0]) # number=5
prog.y(input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC67.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
09fc949aa59fee950e73b5334a4796b8c4013da1 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /gtwisted/test/twisted_test.py | 75f2164247362286e2d7ba3653753983433f88b7 | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | #coding:utf8
'''
Created on 2014年2月21日
@author: lan (www.9miao.com)
'''
# from twisted.core.greactor import GeventReactor
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory,Protocol
reactor = reactor
class MyProtocol(Protocol):
def connectionMade(self):
pass
# print "connectionMade:",self.transport.sessionno
def dataReceived(self, data):
# print "dataReceived:",data
self.transport.write('HTTP/1.1 200 OK\n\nHello World!!')
self.transport.loseConnection()
def connectionLost(self, reason):
# pass
print "connectionLost", reason
class MyServerFactory(ServerFactory):
def __init__(self):
self.protocol = MyProtocol
from gfirefly.server.logobj import logger
ss = MyServerFactory()
import sys
# log.startLogging(sys.stdout)
reactor.listenTCP(8080, ss)
reactor.callLater(5, logger.info, "asdfasdf")
reactor.run() | [
"[email protected]"
] | |
2ae927007cd3599a2365302b8d151333af023a05 | 9c81c170f03ba925bf3d0682526245c202e384a7 | /tests/unit_tests/databases/dao/dao_tests.py | b792a65336a4e7a5f0df807fae1292ac4bf9de25 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | zcong1993/incubator-superset | 2a08177641eff178dee9db852887ad2d19d70d54 | 269c99293f42089958dc98b5d6e5899509fc3111 | refs/heads/master | 2023-08-17T12:24:59.438120 | 2023-08-17T10:50:24 | 2023-08-17T10:50:24 | 209,522,299 | 0 | 0 | Apache-2.0 | 2023-03-06T08:10:31 | 2019-09-19T10:09:21 | TypeScript | UTF-8 | Python | false | false | 2,188 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections.abc import Iterator
import pytest
from sqlalchemy.orm.session import Session
@pytest.fixture
def session_with_data(session: Session) -> Iterator[Session]:
from superset.connectors.sqla.models import SqlaTable
from superset.databases.ssh_tunnel.models import SSHTunnel
from superset.models.core import Database
engine = session.get_bind()
SqlaTable.metadata.create_all(engine) # pylint: disable=no-member
db = Database(database_name="my_database", sqlalchemy_uri="sqlite://")
sqla_table = SqlaTable(
table_name="my_sqla_table",
columns=[],
metrics=[],
database=db,
)
ssh_tunnel = SSHTunnel(
database_id=db.id,
database=db,
)
session.add(db)
session.add(sqla_table)
session.add(ssh_tunnel)
session.flush()
yield session
session.rollback()
def test_database_get_ssh_tunnel(session_with_data: Session) -> None:
from superset.daos.database import DatabaseDAO
from superset.databases.ssh_tunnel.models import SSHTunnel
result = DatabaseDAO.get_ssh_tunnel(1)
assert result
assert isinstance(result, SSHTunnel)
assert 1 == result.database_id
def test_database_get_ssh_tunnel_not_found(session_with_data: Session) -> None:
from superset.daos.database import DatabaseDAO
result = DatabaseDAO.get_ssh_tunnel(2)
assert result is None
| [
"[email protected]"
] | |
80c36a7e86e77966f54c631cb257fd0ebc75cc31 | 8edd63a42469bf09fcad1c1070995ceda6e49646 | /env/lib/python2.7/site-packages/observations/r/codling.py | 86cc11b0e65757702630c0d2a4ac144e458b4164 | [] | no_license | silky/bell-ppls | fa0b5418f40dab59de48b7220ff30caba5945b56 | 369e7602c810b694a70ac1e875017480c8910ac8 | refs/heads/master | 2020-04-06T08:40:28.588492 | 2018-11-01T06:51:33 | 2018-11-01T06:51:33 | 157,312,221 | 1 | 0 | null | 2018-11-13T03:04:18 | 2018-11-13T03:04:18 | null | UTF-8 | Python | false | false | 2,203 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def codling(path):
"""Dose-mortality data, for fumigation of codling moth with methyl bromide
Data are from trials that studied the mortality response of codling moth
to fumigation with methyl bromide.
A data frame with 99 observations on the following 10 variables.
dose
Injected dose of methyl bromide, in gm per cubic meter
tot
Number of insects in chamber
dead
Number of insects dying
pobs
Proportion dying
cm
Control mortality, i.e., at dose 0
ct
Concentration-time sum
Cultivar
a factor with levels `BRAEBURN` `FUJI` `GRANNY` `Gala`
`ROYAL` `Red Delicious` `Splendour`
gp
a factor which has a different level for each different combination
of `Cultivar`, `year` and `rep` (replicate).
year
a factor with levels `1988` `1989`
numcm
a numeric vector: total number of control insects
Maindonald, J.H.; Waddell, B.C.; Petry, R.J. 2001. Apple cultivar
effects on codling moth (Lepidoptera: Tortricidae) egg mortality
following fumigation with methyl bromide. Postharvest Biology and
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `codling.csv`.
Returns:
Tuple of np.ndarray `x_train` with 99 rows and 10 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'codling.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/codling.csv'
maybe_download_and_extract(path, url,
save_file_name='codling.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
"[email protected]"
] | |
95b356426471b1afe72a949f674a499ccac4cfc9 | 849e95a72f4f380d6b31573a0a13e9eccd288838 | /data-tool/flows/common/affiliation_queries.py | a8e6b91b8895cf4e643efd69cb521b2eb8a4ba11 | [
"Apache-2.0"
] | permissive | bcgov/lear | d9b27e2b44ba607ca13878357a62a0623d54ddee | d90f11a7b14411b02c07fe97d2c1fc31cd4a9b32 | refs/heads/main | 2023-09-01T11:26:11.058427 | 2023-08-31T20:25:24 | 2023-08-31T20:25:24 | 168,396,249 | 13 | 117 | Apache-2.0 | 2023-09-14T20:52:02 | 2019-01-30T18:49:09 | Python | UTF-8 | Python | false | false | 567 | py | def get_unaffiliated_firms_query(data_load_env: str):
query = f"""
select ap.account_id, ap.corp_num, ap.contact_email, c.admin_email
from affiliation_processing ap
join corporation c on ap.corp_num = c.corp_num
where environment = '{data_load_env}'
-- and processed_status is null
--or processed_status <> 'COMPLETED'
and (processed_status is null or processed_status not in ('COMPLETED', 'FAILED'))
limit 5
;
"""
return query
| [
"[email protected]"
] | |
37ba9ed4370eaf0e92f4bec9ffa8a8614598bd76 | 9247c6081930c215a1543e95a2567cfa60214c5a | /mlsegment/shape.py | fbec3ac1046ffc61ef8dbf4294a320694d26f67d | [] | no_license | luispedro/mlsegment | 5cec535dbb2476f860ae0ab629b62383e0850143 | 13bf1f2b19aa22f82c9bc07fdf7e44d0fefe79af | refs/heads/master | 2020-04-01T21:12:48.459146 | 2010-12-01T19:46:57 | 2010-12-01T20:39:34 | 1,127,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # Copyright (C) 2010, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# License: MIT
from __future__ import division
import numpy as np
from pyslic.features.hullfeatures import hullfeatures
def shape_features(binimg):
size = binimg.sum()
return np.concatenate(([size], hullfeatures(binimg)))
def extract1(img, solution):
labeled, n_regions = solution
for i in xrange(n_regions):
shape = (labeled == (i+1))
yield shape_features(shape)
log_limit = -100
limit = np.exp(log_limit)
def apply(img, solution, shape_model):
values = [shape_model(feats) for feats in extract1(img, solution)]
values = np.array(values)
n = len(values)
return (np.sum(np.log(values[values > limit])) + log_limit * np.sum(values <= limit))/n
shapes = apply
| [
"[email protected]"
] | |
c4f319cc552d88af43f227b1f826513811c0f29f | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/virtual_usb_remote_client_backing_info.py | 14e557038cd02b58c8e04e8e355118e8cb07b301 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualUSBRemoteClientBackingInfo(vim, *args, **kwargs):
'''The virtual remote client USB device backing class.'''
obj = vim.client.factory.create('{urn:vim25}VirtualUSBRemoteClientBackingInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'hostname', 'deviceName' ]
optional = [ 'useAutoDetect', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
] | |
f435aa2b121f93f5780e2858c25946185360188a | 09bd584a3eb73ec77693343d135ed664d96b3258 | /server/tests/test_vanilla.py | d874e5410f6400c9d80e978e3f1294ab2a2c35ae | [] | no_license | cameronmaske/pytest-django | ba83c84e74312f47d98f7787f1fb01dda81af825 | 09942d46277bfa6e7a2c71bdafe100a455cf9f2f | refs/heads/master | 2023-03-20T10:17:55.658522 | 2014-06-12T10:39:29 | 2014-06-12T10:39:29 | 20,763,872 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from django.test import TestCase
from base.models import Person, Dog, Tag, SimpleDog
class ModelTestCase(TestCase):
def test_person(self):
person = Person()
print "Vanilla - test_person. Person Count", Person.objects.all()
def test_dog(self):
dog = Dog()
print "Vanilla - test_dog. Person Count", Person.objects.all()
def test_tag(self):
dog = SimpleDog(id=1)
dog.save()
tag = Tag(content_object=dog)
tag.save()
self.assertIsNotNone(tag.content_object)
print "Vanilla - test_tag. Dog Count", SimpleDog.objects.all()
print "Vanilla - test_tag. Person Count", Person.objects.all()
| [
"[email protected]"
] | |
d0b58a0f9cdd16d4048b28b51ba3708e95714a3a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03035/s850980087.py | 9e645bcde5e5ae67ff5e7d0b5ccaf14af566a570 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | a,b=map(int,input().split())
if a>12:
print(b)
elif 12>=a>5:
print(b//2)
else:
print(0) | [
"[email protected]"
] | |
90f50bfebc84cd823817012a773583c73aaa9b9b | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/groupsonenote/azext_groupsonenote/vendored_sdks/groupsonenote/operations/_group_onenote_page_parent_notebook_section_operations.py | 4a5e423d8719d4d682d4b07cf3fd2220cb6a3db6 | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,398 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GroupOnenotePageParentNotebookSectionOperations(object):
"""GroupOnenotePageParentNotebookSectionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~groups_one_note.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_page(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum116"]]]
select=None, # type: Optional[List[Union[str, "models.Enum117"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum118"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfOnenotePage3"]
"""Get pages from groups.
Get pages from groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~groups_one_note.models.Enum116]
:param select: Select properties to be returned.
:type select: list[str or ~groups_one_note.models.Enum117]
:param expand: Expand related entities.
:type expand: list[str or ~groups_one_note.models.Enum118]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfOnenotePage3 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~groups_one_note.models.CollectionOfOnenotePage3]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfOnenotePage3"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_page.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfOnenotePage3', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_page.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/pages'} # type: ignore
def create_page(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
body, # type: "models.MicrosoftGraphOnenotePage"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenotePage"
"""Create new navigation property to pages for groups.
Create new navigation property to pages for groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param body: New navigation property.
:type body: ~groups_one_note.models.MicrosoftGraphOnenotePage
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenotePage, or the result of cls(response)
:rtype: ~groups_one_note.models.MicrosoftGraphOnenotePage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenotePage"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_page.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenotePage')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenotePage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_page.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/pages'} # type: ignore
def get_page(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
onenote_page_id1, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum119"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum120"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenotePage"
"""Get pages from groups.
Get pages from groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id1: key: onenotePage-id of onenotePage.
:type onenote_page_id1: str
:param select: Select properties to be returned.
:type select: list[str or ~groups_one_note.models.Enum119]
:param expand: Expand related entities.
:type expand: list[str or ~groups_one_note.models.Enum120]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenotePage, or the result of cls(response)
:rtype: ~groups_one_note.models.MicrosoftGraphOnenotePage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenotePage"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_page.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id1': self._serialize.url("onenote_page_id1", onenote_page_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenotePage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_page.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/pages/{onenotePage-id1}'} # type: ignore
def update_page(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
onenote_page_id1, # type: str
body, # type: "models.MicrosoftGraphOnenotePage"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property pages in groups.
Update the navigation property pages in groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id1: key: onenotePage-id of onenotePage.
:type onenote_page_id1: str
:param body: New navigation property values.
:type body: ~groups_one_note.models.MicrosoftGraphOnenotePage
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_page.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id1': self._serialize.url("onenote_page_id1", onenote_page_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenotePage')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_page.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/pages/{onenotePage-id1}'} # type: ignore
def get_parent_notebook(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum121"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum122"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphNotebook"
"""Get parentNotebook from groups.
Get parentNotebook from groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param select: Select properties to be returned.
:type select: list[str or ~groups_one_note.models.Enum121]
:param expand: Expand related entities.
:type expand: list[str or ~groups_one_note.models.Enum122]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphNotebook, or the result of cls(response)
:rtype: ~groups_one_note.models.MicrosoftGraphNotebook
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphNotebook"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_parent_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphNotebook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_parent_notebook.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/parentNotebook'} # type: ignore
def update_parent_notebook(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
id=None, # type: Optional[str]
self_parameter=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
display_name=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
application=None, # type: Optional["models.MicrosoftGraphIdentity"]
device=None, # type: Optional["models.MicrosoftGraphIdentity"]
user=None, # type: Optional["models.MicrosoftGraphIdentity"]
microsoft_graph_identity_application=None, # type: Optional["models.MicrosoftGraphIdentity"]
microsoft_graph_identity_device=None, # type: Optional["models.MicrosoftGraphIdentity"]
microsoft_graph_identity_user=None, # type: Optional["models.MicrosoftGraphIdentity"]
is_default=None, # type: Optional[bool]
user_role=None, # type: Optional[Union[str, "models.MicrosoftGraphOnenoteUserRole"]]
is_shared=None, # type: Optional[bool]
sections_url=None, # type: Optional[str]
section_groups_url=None, # type: Optional[str]
sections=None, # type: Optional[List["models.MicrosoftGraphOnenoteSection"]]
section_groups=None, # type: Optional[List["models.MicrosoftGraphSectionGroup"]]
one_note_client_url=None, # type: Optional["models.MicrosoftGraphExternalLink"]
one_note_web_url=None, # type: Optional["models.MicrosoftGraphExternalLink"]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property parentNotebook in groups.
Update the navigation property parentNotebook in groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param id: Read-only.
:type id: str
:param self_parameter: The endpoint where you can get details about the page. Read-only.
:type self_parameter: str
:param created_date_time: The date and time when the page was created. The timestamp represents
date and time information using ISO 8601 format and is always in UTC time. For example,
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'. Read-only.
:type created_date_time: ~datetime.datetime
:param display_name: The name of the notebook.
:type display_name: str
:param last_modified_date_time: The date and time when the notebook was last modified. The
timestamp represents date and time information using ISO 8601 format and is always in UTC time.
For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'. Read-
only.
:type last_modified_date_time: ~datetime.datetime
:param application: identity.
:type application: ~groups_one_note.models.MicrosoftGraphIdentity
:param device: identity.
:type device: ~groups_one_note.models.MicrosoftGraphIdentity
:param user: identity.
:type user: ~groups_one_note.models.MicrosoftGraphIdentity
:param microsoft_graph_identity_application: identity.
:type microsoft_graph_identity_application: ~groups_one_note.models.MicrosoftGraphIdentity
:param microsoft_graph_identity_device: identity.
:type microsoft_graph_identity_device: ~groups_one_note.models.MicrosoftGraphIdentity
:param microsoft_graph_identity_user: identity.
:type microsoft_graph_identity_user: ~groups_one_note.models.MicrosoftGraphIdentity
:param is_default: Indicates whether this is the user's default notebook. Read-only.
:type is_default: bool
:param user_role:
:type user_role: str or ~groups_one_note.models.MicrosoftGraphOnenoteUserRole
:param is_shared: Indicates whether the notebook is shared. If true, the contents of the
notebook can be seen by people other than the owner. Read-only.
:type is_shared: bool
:param sections_url: The URL for the sections navigation property, which returns all the
sections in the notebook. Read-only.
:type sections_url: str
:param section_groups_url: The URL for the sectionGroups navigation property, which returns all
the section groups in the notebook. Read-only.
:type section_groups_url: str
:param sections: The sections in the notebook. Read-only. Nullable.
:type sections: list[~groups_one_note.models.MicrosoftGraphOnenoteSection]
:param section_groups: The section groups in the notebook. Read-only. Nullable.
:type section_groups: list[~groups_one_note.models.MicrosoftGraphSectionGroup]
:param one_note_client_url: externalLink.
:type one_note_client_url: ~groups_one_note.models.MicrosoftGraphExternalLink
:param one_note_web_url: externalLink.
:type one_note_web_url: ~groups_one_note.models.MicrosoftGraphExternalLink
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphNotebook(id=id, self_property=self_parameter, created_date_time=created_date_time, display_name=display_name, last_modified_date_time=last_modified_date_time, application_last_modified_by_application=application, device_last_modified_by_device=device, user_last_modified_by_user=user, application_created_by_application=microsoft_graph_identity_application, device_created_by_device=microsoft_graph_identity_device, user_created_by_user=microsoft_graph_identity_user, is_default=is_default, user_role=user_role, is_shared=is_shared, sections_url=sections_url, section_groups_url=section_groups_url, sections=sections, section_groups=section_groups, one_note_client_url=one_note_client_url, one_note_web_url=one_note_web_url)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_parent_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphNotebook')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_parent_notebook.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/parentNotebook'} # type: ignore
def get_parent_section_group(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum123"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum124"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphSectionGroup"
"""Get parentSectionGroup from groups.
Get parentSectionGroup from groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param select: Select properties to be returned.
:type select: list[str or ~groups_one_note.models.Enum123]
:param expand: Expand related entities.
:type expand: list[str or ~groups_one_note.models.Enum124]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSectionGroup, or the result of cls(response)
:rtype: ~groups_one_note.models.MicrosoftGraphSectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSectionGroup"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_parent_section_group.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_parent_section_group.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/parentSectionGroup'} # type: ignore
def update_parent_section_group(
self,
group_id, # type: str
onenote_page_id, # type: str
onenote_section_id, # type: str
id=None, # type: Optional[str]
self_parameter=None, # type: Optional[str]
created_date_time=None, # type: Optional[datetime.datetime]
display_name=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
application=None, # type: Optional["models.MicrosoftGraphIdentity"]
device=None, # type: Optional["models.MicrosoftGraphIdentity"]
user=None, # type: Optional["models.MicrosoftGraphIdentity"]
microsoft_graph_identity_application=None, # type: Optional["models.MicrosoftGraphIdentity"]
microsoft_graph_identity_device=None, # type: Optional["models.MicrosoftGraphIdentity"]
microsoft_graph_identity_user=None, # type: Optional["models.MicrosoftGraphIdentity"]
sections_url=None, # type: Optional[str]
section_groups_url=None, # type: Optional[str]
parent_notebook=None, # type: Optional["models.MicrosoftGraphNotebook"]
parent_section_group=None, # type: Optional["models.MicrosoftGraphSectionGroup"]
sections=None, # type: Optional[List["models.MicrosoftGraphOnenoteSection"]]
section_groups=None, # type: Optional[List["models.MicrosoftGraphSectionGroup"]]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property parentSectionGroup in groups.
Update the navigation property parentSectionGroup in groups.
:param group_id: key: group-id of group.
:type group_id: str
:param onenote_page_id: key: onenotePage-id of onenotePage.
:type onenote_page_id: str
:param onenote_section_id: key: onenoteSection-id of onenoteSection.
:type onenote_section_id: str
:param id: Read-only.
:type id: str
:param self_parameter: The endpoint where you can get details about the page. Read-only.
:type self_parameter: str
:param created_date_time: The date and time when the page was created. The timestamp represents
date and time information using ISO 8601 format and is always in UTC time. For example,
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'. Read-only.
:type created_date_time: ~datetime.datetime
:param display_name: The name of the notebook.
:type display_name: str
:param last_modified_date_time: The date and time when the notebook was last modified. The
timestamp represents date and time information using ISO 8601 format and is always in UTC time.
For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'. Read-
only.
:type last_modified_date_time: ~datetime.datetime
:param application: identity.
:type application: ~groups_one_note.models.MicrosoftGraphIdentity
:param device: identity.
:type device: ~groups_one_note.models.MicrosoftGraphIdentity
:param user: identity.
:type user: ~groups_one_note.models.MicrosoftGraphIdentity
:param microsoft_graph_identity_application: identity.
:type microsoft_graph_identity_application: ~groups_one_note.models.MicrosoftGraphIdentity
:param microsoft_graph_identity_device: identity.
:type microsoft_graph_identity_device: ~groups_one_note.models.MicrosoftGraphIdentity
:param microsoft_graph_identity_user: identity.
:type microsoft_graph_identity_user: ~groups_one_note.models.MicrosoftGraphIdentity
:param sections_url: The URL for the sections navigation property, which returns all the
sections in the section group. Read-only.
:type sections_url: str
:param section_groups_url: The URL for the sectionGroups navigation property, which returns all
the section groups in the section group. Read-only.
:type section_groups_url: str
:param parent_notebook: notebook.
:type parent_notebook: ~groups_one_note.models.MicrosoftGraphNotebook
:param parent_section_group: sectionGroup.
:type parent_section_group: ~groups_one_note.models.MicrosoftGraphSectionGroup
:param sections: The sections in the section group. Read-only. Nullable.
:type sections: list[~groups_one_note.models.MicrosoftGraphOnenoteSection]
:param section_groups: The section groups in the section. Read-only. Nullable.
:type section_groups: list[~groups_one_note.models.MicrosoftGraphSectionGroup]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphSectionGroup(id=id, self_property=self_parameter, created_date_time=created_date_time, display_name=display_name, last_modified_date_time=last_modified_date_time, application_last_modified_by_application=application, device_last_modified_by_device=device, user_last_modified_by_user=user, application_created_by_application=microsoft_graph_identity_application, device_created_by_device=microsoft_graph_identity_device, user_created_by_user=microsoft_graph_identity_user, sections_url=sections_url, section_groups_url=section_groups_url, parent_notebook=parent_notebook, parent_section_group=parent_section_group, sections=sections, section_groups=section_groups)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_parent_section_group.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_parent_section_group.metadata = {'url': '/groups/{group-id}/onenote/pages/{onenotePage-id}/parentNotebook/sections/{onenoteSection-id}/parentSectionGroup'} # type: ignore
| [
"[email protected]"
] | |
9da35b41038706ad71614404f62e1afe9af8e375 | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/PyQt4/QtGui/QHideEvent.py | cfd848238474f2b4a649fb575cb2b6a102c3dc84 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib64/python2.6/site-packages/PyQt4/QtGui.so
# by generator 1.136
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QHideEvent(__PyQt4_QtCore.QEvent):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
| [
"[email protected]"
] | |
3fa2ec3f7e947fa2481b0da91125eff20d196d3b | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/google/protobuf/internal/symbol_database_test.py | b4fa37fe40064f9e02c9b48bf3a1a5f9a22aae14 | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/fb/52/e7/708e3742aa691c784a82b3c4285c6477288dd5d5aa400d2d7b47c88990 | [
"[email protected]"
] | |
ac64b34973362b7ba0bb20f19fa422398239fe6d | 8613ec7f381a6683ae24b54fb2fb2ac24556ad0b | /boot/hard/divrem.py | c7180bf7bddacb57c97c3650dba1a60803a2326d | [] | no_license | Forest-Y/AtCoder | 787aa3c7dc4d999a71661465349428ba60eb2f16 | f97209da3743026920fb4a89fc0e4d42b3d5e277 | refs/heads/master | 2023-08-25T13:31:46.062197 | 2021-10-29T12:54:24 | 2021-10-29T12:54:24 | 301,642,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | n = int(input())
ans = 0
def calc(x):
x -= 1
if x <= 0:
return 0
elif n // x == n % x:
return x
return 0
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0:
ans += calc(n // i) + calc(i)
print(ans) | [
"[email protected]"
] | |
6f906b95dab3f59bdaf57c7c66e43031b13bd885 | b909406a1f838b2bb9e8eca90fd1bdf412e67286 | /13.微信投票机器人/VoteRobot-master/VoteRobot.py | 6f6fee18970cba741315d9928fbbc95582572440 | [] | no_license | kaishuibaicai/mini-Python-Projects | 2f12a5349e389c73080443443fcd293aae04a521 | e18d8bbecb8baaa345720011e67789b123523457 | refs/heads/master | 2021-09-13T02:55:12.429216 | 2018-04-24T04:44:33 | 2018-04-24T04:44:33 | 104,319,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | #encoding=utf-8
import sys
import time
import random
import string
import httplib
import urllib
# 生成指定位数的随机字符串,字符为字母或数字
def getRandomString(id_length):
charSeq = string.ascii_letters + string.digits
randString = 'owzeBj'
for i in range(id_length):
randString += random.choice(charSeq)
return randString
# 对指定的作品(zpid)投一张票
def voteOnce(zpid):
conn = httplib.HTTPConnection("weixinmp.fjedu.gov.cn/31/408")
opid = getRandomString(22)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
postParams = urllib.urlencode({'zpid': zpid, 'opid': opid, 'md_id': 70, 'act': 'zuopin_toupiao'})
conn.request("POST", "/wtg1/mobile/user.php", postParams, headers)
conn.close()
# 投票控制器:指定作品(zpid)和投票张数(voteNum),并随机出投票间隔时间
def voteController(zpid, voteNum):
print '======== Start to vote zpid({0}), Total votes: {1}'.format(zpid, voteNum)
for i in range(voteNum):
voteOnce(zpid)
randomSleepTime = random.randint(1, 4)
print '{0} tickets has been voted, the next ticket will be voted after {1} seconds.'.format(i+1, randomSleepTime)
time.sleep(randomSleepTime)
print '======== Voting Ended!'
if __name__ == '__main__':
# voteOnce(38)
voteController(38, 3)
| [
"[email protected]"
] | |
f5da0d359a2950a5309e2631ef994614b76aaeec | 24f75f7ab2c38a028e99466a26b0957ab3a5424b | /pyscf/hessian/rks.py | 93f37e9c6200c821a8f075b97cb93a9274a7ac4b | [
"BSD-2-Clause"
] | permissive | wwjCMP/pyscf | 2a0e5d13ef42b537939e3fd3d63cb3b9cbd1ef28 | 3526cda9537ae2a4d90adee12c707a02dd66d445 | refs/heads/master | 2021-08-28T12:36:07.882737 | 2017-12-12T08:53:37 | 2017-12-12T08:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,433 | py | #!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic RKS analytical Hessian
'''
import time
import copy
import tempfile
import numpy
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
from pyscf.hessian import rhf
from pyscf.dft import numint
from pyscf import dft
USE_XCFUN = True
XX, XY, XZ = 4, 5, 6
YX, YY, YZ = 5, 7, 8
ZX, ZY, ZZ = 6, 8, 9
XXX, XXY, XXZ, XYY, XYZ, XZZ = 10, 11, 12, 13, 14, 15
YYY, YYZ, YZZ, ZZZ = 16, 17, 18, 19
def hess_elec(hess_mf, mo_energy=None, mo_coeff=None, mo_occ=None,
atmlst=None, max_memory=4000, verbose=None):
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(hess_mf.stdout, hess_mf.verbose)
time0 = (time.clock(), time.time())
mf = hess_mf._scf
mol = hess_mf.mol
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(mol.natm)
nao, nmo = mo_coeff.shape
nocc = int(mo_occ.sum()) // 2
mocc = mo_coeff[:,:nocc]
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
ni = copy.copy(mf._numint)
if USE_XCFUN:
try:
ni.libxc = dft.xcfun
xctype = ni._xc_type(mf.xc)
except (ImportError, KeyError, NotImplementedError):
ni.libxc = dft.libxc
xctype = ni._xc_type(mf.xc)
else:
xctype = ni._xc_type(mf.xc)
if mf.grids.coords is None:
mf.grids.build(with_non0tab=True)
grids = mf.grids
hyb = ni.libxc.hybrid_coeff(mf.xc)
max_memory = 4000
h1aos = hess_mf.make_h1(mo_coeff, mo_occ, hess_mf.chkfile, atmlst, log)
t1 = log.timer('making H1', *time0)
def fx(mo1):
# *2 for alpha + beta
dm1 = numpy.einsum('xai,pa,qi->xpq', mo1, mo_coeff, mocc*2)
dm1 = dm1 + dm1.transpose(0,2,1)
vindxc = _contract_xc_kernel(mf, mf.xc, dm1, max_memory)
if abs(hyb) > 1e-10:
vj, vk = mf.get_jk(mol, dm1)
veff = vj - hyb * .5 * vk + vindxc
else:
vj = mf.get_j(mol, dm1)
veff = vj + vindxc
v1 = numpy.einsum('xpq,pa,qi->xai', veff, mo_coeff, mocc)
return v1.reshape(v1.shape[0],-1)
mo1s, e1s = hess_mf.solve_mo1(mo_energy, mo_coeff, mo_occ, h1aos,
fx, atmlst, max_memory, log)
t1 = log.timer('solving MO1', *t1)
tmpf = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
with h5py.File(tmpf.name, 'w') as f:
for i0, ia in enumerate(atmlst):
mol.set_rinv_origin(mol.atom_coord(ia))
f['rinv2aa/%d'%ia] = (mol.atom_charge(ia) *
mol.intor('int1e_ipiprinv', comp=9))
f['rinv2ab/%d'%ia] = (mol.atom_charge(ia) *
mol.intor('int1e_iprinvip', comp=9))
h1aa =(mol.intor('int1e_ipipkin', comp=9) +
mol.intor('int1e_ipipnuc', comp=9))
h1ab =(mol.intor('int1e_ipkinip', comp=9) +
mol.intor('int1e_ipnucip', comp=9))
s1aa = mol.intor('int1e_ipipovlp', comp=9)
s1ab = mol.intor('int1e_ipovlpip', comp=9)
s1a =-mol.intor('int1e_ipovlp', comp=3)
# Energy weighted density matrix
dme0 = numpy.einsum('pi,qi,i->pq', mocc, mocc, mo_energy[:nocc]) * 2
int2e_ipip1 = mol._add_suffix('int2e_ipip1')
if abs(hyb) > 1e-10:
vj1, vk1 = _vhf.direct_mapdm(int2e_ipip1, 's2kl',
('lk->s1ij', 'jk->s1il'), dm0, 9,
mol._atm, mol._bas, mol._env)
veff1ii = vj1 - hyb * .5 * vk1
else:
vj1 = _vhf.direct_mapdm(int2e_ipip1, 's2kl', 'lk->s1ij', dm0, 9,
mol._atm, mol._bas, mol._env)
veff1ii = vj1.copy()
vj1[:] = 0
if xctype == 'LDA':
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')
vxc = ni.eval_xc(mf.xc, rho, 0, deriv=1)[1]
vrho = vxc[0]
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho)
for i in range(6):
vj1[i] += lib.dot(ao[i+4].T, aow)
aow = aow1 = None
elif xctype == 'GGA':
ao_deriv = 3
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')
vxc = ni.eval_xc(mf.xc, rho, 0, deriv=1)[1]
vrho, vgamma = vxc[:2]
wv = numpy.empty_like(rho)
wv[0] = weight * vrho
wv[1:] = rho[1:] * (weight * vgamma * 2)
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
for i in range(6):
vj1[i] += lib.dot(ao[i+4].T, aow)
aow = numpy.einsum('npi,np->pi', ao[[XXX,XXY,XXZ]], wv[1:4])
vj1[0] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[XXY,XYY,XYZ]], wv[1:4])
vj1[1] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[XXZ,XYZ,XZZ]], wv[1:4])
vj1[2] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[XYY,YYY,YYZ]], wv[1:4])
vj1[3] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[XYZ,YYZ,YZZ]], wv[1:4])
vj1[4] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[XZZ,YZZ,ZZZ]], wv[1:4])
vj1[5] += lib.dot(aow.T, ao[0])
rho = vxc = vrho = vgamma = wv = aow = None
else:
raise NotImplementedError('meta-GGA')
veff1ii += vj1[[0,1,2,1,3,4,2,4,5]]
vj1 = vk1 = None
t1 = log.timer('contracting int2e_ipip1', *t1)
offsetdic = mol.offset_nr_by_atom()
frinv = h5py.File(tmpf.name, 'r')
rinv2aa = frinv['rinv2aa']
rinv2ab = frinv['rinv2ab']
de2 = numpy.zeros((mol.natm,mol.natm,3,3))
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
h_2 = rinv2ab[str(ia)] + rinv2aa[str(ia)].value.transpose(0,2,1)
h_2[:,p0:p1] += h1ab[:,p0:p1]
s1ao = numpy.zeros((3,nao,nao))
s1ao[:,p0:p1] += s1a[:,p0:p1]
s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1oo = numpy.einsum('xpq,pi,qj->xij', s1ao, mocc, mocc)
shls_slice = (shl0, shl1) + (0, mol.nbas)*3
int2e_ip1ip2 = mol._add_suffix('int2e_ip1ip2')
int2e_ipvip1 = mol._add_suffix('int2e_ipvip1')
if abs(hyb) > 1e-10:
vj1, vk1, vk2 = _vhf.direct_bindm(int2e_ip1ip2, 's1',
('ji->s1kl', 'li->s1kj', 'lj->s1ki'),
(dm0[:,p0:p1], dm0[:,p0:p1], dm0), 9,
mol._atm, mol._bas, mol._env,
shls_slice=shls_slice)
veff2 = vj1 * 2 - hyb * .5 * vk1
veff2[:,:,p0:p1] -= hyb * .5 * vk2
t1 = log.timer('contracting int2e_ip1ip2 for atom %d'%ia, *t1)
vj1, vk1 = _vhf.direct_bindm(int2e_ipvip1, 's2kl',
('lk->s1ij', 'li->s1kj'),
(dm0, dm0[:,p0:p1]), 9,
mol._atm, mol._bas, mol._env,
shls_slice=shls_slice)
veff2[:,:,p0:p1] += vj1.transpose(0,2,1)
veff2 -= hyb * .5 * vk1.transpose(0,2,1)
vj1 = vk1 = vk2 = None
t1 = log.timer('contracting int2e_ipvip1 for atom %d'%ia, *t1)
else:
vj1 = _vhf.direct_bindm(int2e_ip1ip2, 's1',
'ji->s1kl', dm0[:,p0:p1], 9,
mol._atm, mol._bas, mol._env,
shls_slice=shls_slice)
veff2 = vj1 * 2
t1 = log.timer('contracting int2e_ip1ip2 for atom %d'%ia, *t1)
vj1 = _vhf.direct_bindm(int2e_ipvip1, 's2kl',
'lk->s1ij', dm0, 9,
mol._atm, mol._bas, mol._env,
shls_slice=shls_slice)
veff2[:,:,p0:p1] += vj1.transpose(0,2,1)
t1 = log.timer('contracting int2e_ipvip1 for atom %d'%ia, *t1)
if xctype == 'LDA':
ao_deriv = 1
vj1[:] = 0
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')
vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]
vrho = vxc[0]
frr = fxc[0]
half = lib.dot(ao[0], dm0[:,p0:p1].copy())
# *2 for \nabla|ket> in rho1
rho1 = numpy.einsum('xpi,pi->xp', ao[1:,:,p0:p1], half) * 2
aow = numpy.einsum('pi,xp->xpi', ao[0], weight*frr*rho1)
veff2[0] += lib.dot(ao[1].T, aow[0]) # d/d X_i ~ aow ~ rho1
veff2[1] += lib.dot(ao[2].T, aow[0])
veff2[2] += lib.dot(ao[3].T, aow[0])
veff2[3] += lib.dot(ao[1].T, aow[1])
veff2[4] += lib.dot(ao[2].T, aow[1])
veff2[5] += lib.dot(ao[3].T, aow[1])
veff2[6] += lib.dot(ao[1].T, aow[2])
veff2[7] += lib.dot(ao[2].T, aow[2])
veff2[8] += lib.dot(ao[3].T, aow[2])
aow = numpy.einsum('xpi,p->xpi', ao[1:,:,p0:p1], weight*vrho)
vj1[0] += lib.dot(aow[0].T, ao[1])
vj1[1] += lib.dot(aow[0].T, ao[2])
vj1[2] += lib.dot(aow[0].T, ao[3])
vj1[3] += lib.dot(aow[1].T, ao[1])
vj1[4] += lib.dot(aow[1].T, ao[2])
vj1[5] += lib.dot(aow[1].T, ao[3])
vj1[6] += lib.dot(aow[2].T, ao[1])
vj1[7] += lib.dot(aow[2].T, ao[2])
vj1[8] += lib.dot(aow[2].T, ao[3])
half = aow = None
veff2[:,:,p0:p1] += vj1.transpose(0,2,1)
elif xctype == 'GGA':
def get_wv(rho, rho1, weight, vxc, fxc):
vgamma = vxc[1]
frr, frg, fgg = fxc[:3]
ngrid = weight.size
sigma1 = numpy.einsum('xi,xi->i', rho[1:], rho1[1:])
wv = numpy.empty((4,ngrid))
wv[0] = frr * rho1[0]
wv[0] += frg * sigma1 * 2
wv[1:] = (fgg * sigma1 * 4 + frg * rho1[0] * 2) * rho[1:]
wv[1:] += vgamma * rho1[1:] * 2
wv *= weight
return wv
ao_deriv = 2
vj1[:] = 0
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')
vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]
vrho, vgamma = vxc[:2]
# (d_X \nabla_x mu) nu DM_{mu,nu}
half = lib.dot(ao[0], dm0[:,p0:p1].copy())
rho1X = numpy.einsum('xpi,pi->xp', ao[[1,XX,XY,XZ],:,p0:p1], half)
rho1Y = numpy.einsum('xpi,pi->xp', ao[[2,YX,YY,YZ],:,p0:p1], half)
rho1Z = numpy.einsum('xpi,pi->xp', ao[[3,ZX,ZY,ZZ],:,p0:p1], half)
# (d_X mu) (\nabla_x nu) DM_{mu,nu}
half = lib.dot(ao[1], dm0[:,p0:p1].copy())
rho1X[1] += numpy.einsum('pi,pi->p', ao[1,:,p0:p1], half)
rho1Y[1] += numpy.einsum('pi,pi->p', ao[2,:,p0:p1], half)
rho1Z[1] += numpy.einsum('pi,pi->p', ao[3,:,p0:p1], half)
half = lib.dot(ao[2], dm0[:,p0:p1].copy())
rho1X[2] += numpy.einsum('pi,pi->p', ao[1,:,p0:p1], half)
rho1Y[2] += numpy.einsum('pi,pi->p', ao[2,:,p0:p1], half)
rho1Z[2] += numpy.einsum('pi,pi->p', ao[3,:,p0:p1], half)
half = lib.dot(ao[3], dm0[:,p0:p1].copy())
rho1X[3] += numpy.einsum('pi,pi->p', ao[1,:,p0:p1], half)
rho1Y[3] += numpy.einsum('pi,pi->p', ao[2,:,p0:p1], half)
rho1Z[3] += numpy.einsum('pi,pi->p', ao[3,:,p0:p1], half)
wv = get_wv(rho, rho1X, weight, vxc, fxc) * 2 # ~ vj1*2
aow = numpy.einsum('npi,np->pi', ao[[1,XX,XY,XZ]], wv) # dX
veff2[0] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[2,YX,YY,YZ]], wv) # dY
veff2[1] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[3,ZX,ZY,ZZ]], wv) # dZ
veff2[2] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[1:4], wv[1:4])
veff2[0] += lib.dot(ao[1].T, aow)
veff2[1] += lib.dot(ao[2].T, aow)
veff2[2] += lib.dot(ao[3].T, aow)
wv = get_wv(rho, rho1Y, weight, vxc, fxc) * 2
aow = numpy.einsum('npi,np->pi', ao[[1,XX,XY,XZ]], wv)
veff2[3] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[2,YX,YY,YZ]], wv)
veff2[4] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[3,ZX,ZY,ZZ]], wv)
veff2[5] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[1:4], wv[1:4])
veff2[3] += lib.dot(ao[1].T, aow)
veff2[4] += lib.dot(ao[2].T, aow)
veff2[5] += lib.dot(ao[3].T, aow)
wv = get_wv(rho, rho1Z, weight, vxc, fxc) * 2
aow = numpy.einsum('npi,np->pi', ao[[1,XX,XY,XZ]], wv)
veff2[6] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[2,YX,YY,YZ]], wv)
veff2[7] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[3,ZX,ZY,ZZ]], wv)
veff2[8] += lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[1:4], wv[1:4])
veff2[6] += lib.dot(ao[1].T, aow)
veff2[7] += lib.dot(ao[2].T, aow)
veff2[8] += lib.dot(ao[3].T, aow)
wv = numpy.empty_like(rho)
wv[0] = weight * vrho * .5
wv[1:] = rho[1:] * (weight * vgamma * 2)
aowx = numpy.einsum('npi,np->pi', ao[[1,XX,XY,XZ]], wv)
aowy = numpy.einsum('npi,np->pi', ao[[2,YX,YY,YZ]], wv)
aowz = numpy.einsum('npi,np->pi', ao[[3,ZX,ZY,ZZ]], wv)
ao1 = aowx[:,p0:p1].copy()
ao2 = aowy[:,p0:p1].copy()
ao3 = aowz[:,p0:p1].copy()
vj1[0] += lib.dot(ao1.T, ao[1])
vj1[1] += lib.dot(ao1.T, ao[2])
vj1[2] += lib.dot(ao1.T, ao[3])
vj1[3] += lib.dot(ao2.T, ao[1])
vj1[4] += lib.dot(ao2.T, ao[2])
vj1[5] += lib.dot(ao2.T, ao[3])
vj1[6] += lib.dot(ao3.T, ao[1])
vj1[7] += lib.dot(ao3.T, ao[2])
vj1[8] += lib.dot(ao3.T, ao[3])
ao1 = ao[1,:,p0:p1].copy()
ao2 = ao[2,:,p0:p1].copy()
ao3 = ao[3,:,p0:p1].copy()
vj1[0] += lib.dot(ao1.T, aowx)
vj1[1] += lib.dot(ao1.T, aowy)
vj1[2] += lib.dot(ao1.T, aowz)
vj1[3] += lib.dot(ao2.T, aowx)
vj1[4] += lib.dot(ao2.T, aowy)
vj1[5] += lib.dot(ao2.T, aowz)
vj1[6] += lib.dot(ao3.T, aowx)
vj1[7] += lib.dot(ao3.T, aowy)
vj1[8] += lib.dot(ao3.T, aowz)
veff2[:,:,p0:p1] += vj1.transpose(0,2,1)
else:
raise NotImplementedError('meta-GGA')
for j0, ja in enumerate(atmlst):
q0, q1 = offsetdic[ja][2:]
# *2 for double occupancy, *2 for +c.c.
mo1 = lib.chkfile.load(hess_mf.chkfile, 'scf_mo1/%d'%ja)
h1ao = lib.chkfile.load(hess_mf.chkfile, 'scf_h1ao/%d'%ia)
dm1 = numpy.einsum('ypi,qi->ypq', mo1, mocc)
de = numpy.einsum('xpq,ypq->xy', h1ao, dm1) * 4
dm1 = numpy.einsum('ypi,qi,i->ypq', mo1, mocc, mo_energy[:nocc])
de -= numpy.einsum('xpq,ypq->xy', s1ao, dm1) * 4
de -= numpy.einsum('xpq,ypq->xy', s1oo, e1s[j0]) * 2
de = de.reshape(-1)
v2aa = rinv2aa[str(ja)].value
v2ab = rinv2ab[str(ja)].value
de += numpy.einsum('xpq,pq->x', v2aa[:,p0:p1], dm0[p0:p1])*2
de += numpy.einsum('xpq,pq->x', v2ab[:,p0:p1], dm0[p0:p1])*2
de += numpy.einsum('xpq,pq->x', h_2[:,:,q0:q1], dm0[:,q0:q1])*2
de += numpy.einsum('xpq,pq->x', veff2[:,q0:q1], dm0[q0:q1])*2
de -= numpy.einsum('xpq,pq->x', s1ab[:,p0:p1,q0:q1], dme0[p0:p1,q0:q1])*2
if ia == ja:
de += numpy.einsum('xpq,pq->x', h1aa[:,p0:p1], dm0[p0:p1])*2
de -= numpy.einsum('xpq,pq->x', v2aa, dm0)*2
de -= numpy.einsum('xpq,pq->x', v2ab, dm0)*2
de += numpy.einsum('xpq,pq->x', veff1ii[:,p0:p1], dm0[p0:p1])*2
de -= numpy.einsum('xpq,pq->x', s1aa[:,p0:p1], dme0[p0:p1])*2
de2[i0,j0] = de.reshape(3,3)
frinv.close()
log.timer('RHF hessian', *time0)
return de2
def make_h1(mf, mo_coeff, mo_occ, chkfile=None, atmlst=None, verbose=logger.WARN):
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mf.stdout, mf.verbose)
mol = mf.mol
if atmlst is None:
atmlst = range(mol.natm)
nao, nmo = mo_coeff.shape
mocc = mo_coeff[:,mo_occ>0]
dm0 = numpy.dot(mocc, mocc.T) * 2
ni = copy.copy(mf._numint)
if USE_XCFUN:
try:
ni.libxc = dft.xcfun
xctype = ni._xc_type(mf.xc)
except (ImportError, KeyError, NotImplementedError):
ni.libxc = dft.libxc
xctype = ni._xc_type(mf.xc)
else:
xctype = ni._xc_type(mf.xc)
grids = mf.grids
hyb = ni.libxc.hybrid_coeff(mf.xc)
max_memory = 4000
h1a =-(mol.intor('int1e_ipkin', comp=3) +
mol.intor('int1e_ipnuc', comp=3))
offsetdic = mol.offset_nr_by_atom()
h1aos = []
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
mol.set_rinv_origin(mol.atom_coord(ia))
h1ao = -mol.atom_charge(ia) * mol.intor('int1e_iprinv', comp=3)
h1ao[:,p0:p1] += h1a[:,p0:p1]
h1ao = h1ao + h1ao.transpose(0,2,1)
shls_slice = (shl0, shl1) + (0, mol.nbas)*3
int2e_ip1 = mol._add_suffix('int2e_ip1')
if abs(hyb) > 1e-10:
vj1, vj2, vk1, vk2 = \
_vhf.direct_bindm(int2e_ip1, 's2kl',
('ji->s2kl', 'lk->s1ij', 'li->s1kj', 'jk->s1il'),
(-dm0[:,p0:p1], -dm0, -dm0[:,p0:p1], -dm0),
3, mol._atm, mol._bas, mol._env,
shls_slice=shls_slice)
for i in range(3):
lib.hermi_triu(vj1[i], 1)
veff = vj1 - hyb*.5*vk1
veff[:,p0:p1] += vj2 - hyb*.5*vk2
else:
vj1, vj2 = \
_vhf.direct_bindm(int2e_ip1, 's2kl',
('ji->s2kl', 'lk->s1ij'),
(-dm0[:,p0:p1], -dm0),
3, mol._atm, mol._bas, mol._env,
shls_slice=shls_slice)
for i in range(3):
lib.hermi_triu(vj1[i], 1)
veff = vj1
veff[:,p0:p1] += vj2
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')
vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]
vrho = vxc[0]
frr = fxc[0]
half = lib.dot(ao[0], dm0[:,p0:p1].copy())
rho1 = numpy.einsum('xpi,pi->xp', ao[1:,:,p0:p1], half)
aow = numpy.einsum('pi,xp->xpi', ao[0], weight*frr*rho1)
aow1 = numpy.einsum('xpi,p->xpi', ao[1:,:,p0:p1], weight*vrho)
aow[:,:,p0:p1] += aow1
veff[0] += lib.dot(-aow[0].T, ao[0])
veff[1] += lib.dot(-aow[1].T, ao[0])
veff[2] += lib.dot(-aow[2].T, ao[0])
half = aow = aow1 = None
elif xctype == 'GGA':
def get_wv(rho, rho1, weight, vxc, fxc):
vgamma = vxc[1]
frr, frg, fgg = fxc[:3]
ngrid = weight.size
sigma1 = numpy.einsum('xi,xi->i', rho[1:], rho1[1:])
wv = numpy.empty((4,ngrid))
wv[0] = frr * rho1[0]
wv[0] += frg * sigma1 * 2
wv[1:] = (fgg * sigma1 * 4 + frg * rho1[0] * 2) * rho[1:]
wv[1:] += vgamma * rho1[1:] * 2
wv *= weight
return wv
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')
vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]
vrho, vgamma = vxc[:2]
# (d_X \nabla_x mu) nu DM_{mu,nu}
half = lib.dot(ao[0], dm0[:,p0:p1].copy())
rho1X = numpy.einsum('xpi,pi->xp', ao[[1,XX,XY,XZ],:,p0:p1], half)
rho1Y = numpy.einsum('xpi,pi->xp', ao[[2,YX,YY,YZ],:,p0:p1], half)
rho1Z = numpy.einsum('xpi,pi->xp', ao[[3,ZX,ZY,ZZ],:,p0:p1], half)
# (d_X mu) (\nabla_x nu) DM_{mu,nu}
half = lib.dot(ao[1], dm0[:,p0:p1].copy())
rho1X[1] += numpy.einsum('pi,pi->p', ao[1,:,p0:p1], half)
rho1Y[1] += numpy.einsum('pi,pi->p', ao[2,:,p0:p1], half)
rho1Z[1] += numpy.einsum('pi,pi->p', ao[3,:,p0:p1], half)
half = lib.dot(ao[2], dm0[:,p0:p1].copy())
rho1X[2] += numpy.einsum('pi,pi->p', ao[1,:,p0:p1], half)
rho1Y[2] += numpy.einsum('pi,pi->p', ao[2,:,p0:p1], half)
rho1Z[2] += numpy.einsum('pi,pi->p', ao[3,:,p0:p1], half)
half = lib.dot(ao[3], dm0[:,p0:p1].copy())
rho1X[3] += numpy.einsum('pi,pi->p', ao[1,:,p0:p1], half)
rho1Y[3] += numpy.einsum('pi,pi->p', ao[2,:,p0:p1], half)
rho1Z[3] += numpy.einsum('pi,pi->p', ao[3,:,p0:p1], half)
wv = get_wv(rho, rho1X, weight, vxc, fxc)
wv[0] *= .5
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
veff[0] -= lib.transpose_sum(lib.dot(aow.T, ao[0]))
wv = get_wv(rho, rho1Y, weight, vxc, fxc)
wv[0] *= .5
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
veff[1] -= lib.transpose_sum(lib.dot(aow.T, ao[0]))
wv = get_wv(rho, rho1Z, weight, vxc, fxc)
wv[0] *= .5
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
veff[2] -= lib.transpose_sum(lib.dot(aow.T, ao[0]))
wv = numpy.empty_like(rho)
wv[0] = weight * vrho
wv[1:] = rho[1:] * (weight * vgamma * 2)
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
veff[0,p0:p1] -= lib.dot(ao[1,:,p0:p1].T.copy(), aow)
veff[1,p0:p1] -= lib.dot(ao[2,:,p0:p1].T.copy(), aow)
veff[2,p0:p1] -= lib.dot(ao[3,:,p0:p1].T.copy(), aow)
aow = numpy.einsum('npi,np->pi', ao[[XX,XY,XZ],:,p0:p1], wv[1:4])
veff[0,p0:p1] -= lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[YX,YY,YZ],:,p0:p1], wv[1:4])
veff[1,p0:p1] -= lib.dot(aow.T, ao[0])
aow = numpy.einsum('npi,np->pi', ao[[ZX,ZY,ZZ],:,p0:p1], wv[1:4])
veff[2,p0:p1] -= lib.dot(aow.T, ao[0])
else:
raise NotImplementedError('meta-GGA')
veff = veff + veff.transpose(0,2,1)
if chkfile is None:
h1aos.append(h1ao+veff)
else:
key = 'scf_h1ao/%d' % ia
lib.chkfile.save(chkfile, key, h1ao+veff)
if chkfile is None:
return h1aos
else:
return chkfile
def _contract_xc_kernel(mf, xc_code, dms, max_memory=2000):
mol = mf.mol
grids = mf.grids
ni = copy.copy(mf._numint)
if USE_XCFUN:
try:
ni.libxc = dft.xcfun
xctype = ni._xc_type(xc_code)
except (ImportError, KeyError, NotImplementedError):
ni.libxc = dft.libxc
xctype = ni._xc_type(xc_code)
else:
xctype = ni._xc_type(xc_code)
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
ndm = len(dms)
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
dms = numpy.asarray(dms)
dms = (dms + dms.transpose(0,2,1)) * .5
v1ao = numpy.zeros((ndm,nao,nao))
if xctype == 'LDA':
ao_deriv = 0
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao, mo_coeff, mo_occ, mask, 'LDA')
fxc = ni.eval_xc(xc_code, rho, 0, deriv=2)[2]
frho = fxc[0]
for i, dm in enumerate(dms):
rho1 = ni.eval_rho(mol, ao, dm, mask, xctype)
aow = numpy.einsum('pi,p->pi', ao, weight*frho*rho1)
v1ao[i] += numint._dot_ao_ao(mol, aow, ao, mask, shls_slice, ao_loc)
rho1 = aow = None
elif xctype == 'GGA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')
vxc, fxc = ni.eval_xc(xc_code, rho, 0, deriv=2)[1:3]
vgamma = vxc[1]
frr, frg, fgg = fxc[:3]
for i, dm in enumerate(dms):
rho1 = ni.eval_rho(mol, ao, dm, mask, 'GGA')
sigma1 = numpy.einsum('xi,xi->i', rho[1:], rho1[1:])
ngrid = weight.size
wv = numpy.empty((4,ngrid))
wv[0] = frr * rho1[0]
wv[0] += frg * sigma1 * 2
wv[1:] = (fgg * sigma1 * 4 + frg * rho1[0] * 2) * rho[1:]
wv[1:] += vgamma * rho1[1:] * 2
wv[1:] *= 2 # for (\nabla\mu) \nu + \mu (\nabla\nu)
wv *= weight
aow = numpy.einsum('npi,np->pi', ao, wv)
v1ao[i] += numint._dot_ao_ao(mol, aow, ao[0], mask, shls_slice, ao_loc)
else:
raise NotImplementedError('meta-GGA')
for i in range(ndm):
v1ao[i] = (v1ao[i] + v1ao[i].T) * .5
return v1ao
class Hessian(rhf.Hessian):
'''Non-relativistic restricted Hartree-Fock hessian'''
def make_h1(self, mo_coeff, mo_occ, chkfile=None, atmlst=None,
verbose=None):
return make_h1(self._scf, mo_coeff, mo_occ, chkfile, atmlst, verbose)
hess_elec = hess_elec
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
from pyscf.dft import rks_grad
dft.numint._NumInt.libxc = dft.xcfun
#xc_code = 'lda,vwn'
xc_code = 'blyp'
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
[1 , (1. , 0. , 0.000)],
[1 , (0. , 1. , 0.000)],
[1 , (0. , -1.517 , 1.177)],
[1 , (0. , 1.517 , 1.177)],
]
mol.basis = '631g'
#mol.unit = 'B'
mol.build()
mf = dft.RKS(mol)
mf.grids.level = 4
mf.grids.prune = False
mf.xc = xc_code
mf.conv_tol = 1e-14
mf.kernel()
n3 = mol.natm * 3
h = Hessian(mf)
e2 = h.kernel().transpose(0,2,1,3).reshape(n3,n3)
def grad1(coord, ptr, x, inc):
coord = coord.copy()
mol._env[ptr:ptr+3] = coord + numpy.asarray(x)*inc
mf = dft.RKS(mol).set(conv_tol=1e-14)
mf.grids.level = 4
mf.grids.prune = False
mf.xc = xc_code
e1a = mf.run().apply(rks_grad.Gradients).kernel()
mol._env[ptr:ptr+3] = coord - numpy.asarray(x)*inc
mf = dft.RKS(mol).set(conv_tol=1e-14)
mf.grids.level = 4
mf.grids.prune = False
mf.xc = xc_code
e1b = mf.run().apply(rks_grad.Gradients).kernel()
mol._env[ptr:ptr+3] = coord
return (e1a-e1b)/(2*inc)
e2ref = []
for ia in range(mol.natm):
coord = mol.atom_coord(ia)
ptr = mol._atm[ia,gto.PTR_COORD]
e2ref.append(grad1(coord, ptr, (1,0,0), .5e-3))
e2ref.append(grad1(coord, ptr, (0,1,0), .5e-3))
e2ref.append(grad1(coord, ptr, (0,0,1), .5e-3))
e2ref = numpy.asarray(e2ref).reshape(-1,n3)
print abs(e2ref).sum()
numpy.set_printoptions(2,linewidth=100)
print numpy.linalg.norm(e2-e2ref)
for i in range(n3):
print e2ref[i]-e2[i], abs(e2ref[i]-e2[i]).max()
## partial derivative for C
# e2 = h.hess_elec().transpose(0,2,1,3).reshape(n3,n3)
# g1a = mf.apply(rks_grad.Gradients).grad_elec()
# ia = 0
# coord = mol.atom_coord(ia)
# ptr = mol._atm[ia,gto.PTR_COORD]
# inc = 1e-4
# coord = coord.copy()
# mol._env[ptr:ptr+3] = coord + numpy.asarray((0,1,0))*inc
# mf = dft.RKS(mol)
# mf.grids.level = 4
# mf.grids.prune = False
# mf.conv_tol = 1e-14
# mf.xc = xc_code
# mf.kernel()
# mol._env[ptr:ptr+3] = coord
# g1b = mf.apply(rks_grad.Gradients).grad_elec()
# print (g1b-g1a)/inc
# print e2[1].reshape(-1,3)
## partial derivative for R
# e2 = h.hess_elec().transpose(0,2,1,3).reshape(n3,n3)
# g1a = mf.apply(rks_grad.Gradients).grad_elec()
# ia = 0
# coord = mol.atom_coord(ia)
# ptr = mol._atm[ia,gto.PTR_COORD]
# inc = 1e-4
# coord = coord.copy()
# mol._env[ptr:ptr+3] = coord + numpy.asarray((0,1,0))*inc
# g1b = mf.apply(rks_grad.Gradients).grad_elec()
# print (g1b-g1a)/inc
# print e2[1].reshape(-1,3)
# g1a = mf.apply(rks_grad.Gradients).grad_elec()
# ia = 1
# coord = mol.atom_coord(ia)
# ptr = mol._atm[ia,gto.PTR_COORD]
# inc = 1e-4
# coord = coord.copy()
# mol._env[ptr:ptr+3] = coord + numpy.asarray((0,1,0))*inc
# g1b = mf.apply(rks_grad.Gradients).grad_elec()
# print (g1b-g1a)/inc
# print e2[4].reshape(-1,3)
# h^1
# e2 = h.hess_elec()
# dm0 = mf.make_rdm1()
# g1a = rks_grad.get_veff(rks_grad.Gradients(mf), mol)
# #g1a = mf.get_veff(mol, dm0)
# ia = 0
# coord = mol.atom_coord(ia)
# ptr = mol._atm[ia,gto.PTR_COORD]
# inc = 1e-4
# coord = coord.copy()
# mol._env[ptr:ptr+3] = coord + numpy.asarray((0,1,0))*inc
# mf._eri = None
# g1b = rks_grad.get_veff(rks_grad.Gradients(mf), mol)
# #g1b = mf.get_veff(mol, dm0)
# print (g1b-g1a)/inc
| [
"[email protected]"
] | |
bacb1da7545cd9eba476314710d4b271f50af7c7 | 600283415a6a403b0a12ee8b5b4a3ff5d6aa757a | /templates-demo/venv/bin/pip3 | 50b5431b3c65b767022c0bb2a67f39c43c6248ba | [] | no_license | pixb/flask-demo | 0f2ef9a8bc6315c92e0aec6cac0acfdcf7bc8f03 | a40686eb25df0ca6379bc409ba34ef0425a863f8 | refs/heads/master | 2023-07-25T06:35:08.364038 | 2023-07-11T16:09:49 | 2023-07-11T16:09:49 | 336,691,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | #!/home/pix/dev/code/python/python-web-demo/templates-demo/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
ae47ebf4bd9e9ee2b5a327015ca46141aaec74fd | 609444f644b60898e1ae9f8f52679a83b65a5df6 | /afrl/cmasi/__init__.py | 933720cead057b724c263f88da292372fd9ef63b | [] | no_license | sahabi/Shield_exp | 7bdef30f11edc34bf0811445aaa667cb4814f0e1 | 09d601fa1a346e0f780d5a438b6c5fbd3d4b6deb | refs/heads/master | 2021-01-12T05:41:57.468997 | 2016-12-22T20:16:07 | 2016-12-22T20:16:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | __all__ = [ "SeriesEnum", "AbstractGeometry", "KeyValuePair", "Location3D", "PayloadAction", "PayloadConfiguration", "PayloadState", "VehicleAction", "Task", "SearchTask", "AbstractZone", "EntityConfiguration", "FlightProfile", "AirVehicleConfiguration", "EntityState", "AirVehicleState", "Wedge", "AreaSearchTask", "CameraAction", "CameraConfiguration", "GimballedPayloadState", "CameraState", "Circle", "GimbalAngleAction", "GimbalConfiguration", "GimbalScanAction", "GimbalStareAction", "GimbalState", "GoToWaypointAction", "KeepInZone", "KeepOutZone", "LineSearchTask", "NavigationAction", "LoiterAction", "LoiterTask", "Waypoint", "MissionCommand", "MustFlyTask", "OperatorSignal", "OperatingRegion", "AutomationRequest", "PointSearchTask", "Polygon", "Rectangle", "RemoveTasks", "ServiceStatus", "SessionStatus", "VehicleActionCommand", "VideoStreamAction", "VideoStreamConfiguration", "VideoStreamState", "AutomationResponse", "RemoveZones", "RemoveEntities", "FlightDirectorAction", "WeatherReport", "FollowPathCommand", "PathWaypoint", "StopMovementAction", "WaypointTransfer", "PayloadStowAction", "WavelengthBand", "NavigationMode", "FOVOperationMode", "GimbalPointingMode", "ZoneAvoidanceType", "LoiterType", "LoiterDirection", "ServiceStatusType", "SimulationStatusType", "SpeedType", "TurnType", "CommandStatusType", "AltitudeType", "TravelMode", "WaypointTransferMode", "Play", "Command", "ShieldInfo" ]
SERIES_NAME = "CMASI"
#Series Name turned into a long for quick comparisons.
SERIES_NAME_ID = 4849604199710720000
SERIES_VERSION = 3
| [
"[email protected]"
] | |
4090b855d38ff1df15e8df2efe8df61b15a3b630 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Mastering-Machine-Learning-scikit-learn/NumPy-Cookbook/NumPy Cookbook 2nd Edition_CodeBundle/Final Code/0945OS_05_Final Code/ch5code/memmap.py | cfb0133647aece53a7a0d204bfbeaf4a3dd9f4dd | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 891 | py | import numpy as np
import matplotlib.pyplot as plt
N = 512
NSQUARES = 30
# Initialize
img = np.zeros((N, N), np.uint8)
centers = np.random.random_integers(0, N, size=(NSQUARES, 2))
radii = np.random.randint(0, N/9, size=NSQUARES)
colors = np.random.randint(100, 255, size=NSQUARES)
# Generate squares
for i in xrange(NSQUARES):
xindices = range(centers[i][0] - radii[i], centers[i][0] + radii[i])
xindices = np.clip(xindices, 0, N - 1)
yindices = range(centers[i][1] - radii[i], centers[i][1] + radii[i])
yindices = np.clip(yindices, 0, N - 1)
if len(xindices) == 0 or len(yindices) == 0:
continue
coordinates = np.meshgrid(xindices, yindices)
img[coordinates] = colors[i]
# Load into memory map
img.tofile('random_squares.raw')
img_memmap = np.memmap('random_squares.raw', shape=img.shape)
# Display image
plt.imshow(img_memmap)
plt.axis('off')
plt.show()
| [
"[email protected]"
] | |
8eda126b9ddc871d2c35c095ecc5b5f9c3159fc9 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/show_instance_request.py | 5da7cd5b7264aaf5c265c40ff95920dd44048ea5 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowInstanceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id'
}
def __init__(self, instance_id=None):
"""ShowInstanceRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self.discriminator = None
self.instance_id = instance_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowInstanceRequest.
实例id
:return: The instance_id of this ShowInstanceRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowInstanceRequest.
实例id
:param instance_id: The instance_id of this ShowInstanceRequest.
:type: str
"""
self._instance_id = instance_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowInstanceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
1f55ed528984aa255668b43fc89c149e90036a34 | 51a37b7108f2f69a1377d98f714711af3c32d0df | /src/leetcode/P657.py | f83e958ab590ac9a78e88d3a6b8e2222b20faa3e | [] | no_license | stupidchen/leetcode | 1dd2683ba4b1c0382e9263547d6c623e4979a806 | 72d172ea25777980a49439042dbc39448fcad73d | refs/heads/master | 2022-03-14T21:15:47.263954 | 2022-02-27T15:33:15 | 2022-02-27T15:33:15 | 55,680,865 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | class Solution:
def judgeCircle(self, moves: str) -> bool:
x = y = 0
for move in moves:
if move == 'U':
x -= 1
if move == 'D':
x += 1
if move == 'L':
y -= 1
if move == 'R':
y += 1
if x == y == 0:
return True
return False
if __name__ == '__main__':
print(Solution().judgeCircle("DURDLDRRLL"))
| [
"[email protected]"
] | |
850b481185c8eb69502f7934a5f7e97ad8d38921 | db03d88ddb75cc9b044a193f5d5f7ac438d64e59 | /tests/test_checksum_generator.py | 9f92f8d7e15fb47ad05e573674e171e01d127ac5 | [
"MIT"
] | permissive | edeposit/edeposit.amqp.ltp | e461d00bf74fd54c797d670abdaa3bdccc84e5ce | df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e | refs/heads/master | 2021-01-17T11:30:24.223324 | 2016-03-03T11:10:38 | 2016-03-03T11:10:38 | 21,615,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import os
import shutil
import pytest
import tempfile
from ltp import checksum_generator as cg
# Variables ===================================================================
DIRNAME = ""
# Functions & objects =========================================================
def create_dir_structure():
dirname = tempfile.mkdtemp()
subdir = dirname + "/xex/"
os.mkdir(subdir)
with open(dirname + "/info.xml", "w") as f:
f.write("hello")
with open(subdir + "/xex.xx", "w") as f:
f.write("this is info file")
with open(dirname + "/somefile.txt", "w") as f:
f.write("somecontent")
with open(subdir + "/somefile.txt", "w") as f:
f.write("somecontent")
return dirname
# Tests =======================================================================
def setup_module(module):
global DIRNAME
DIRNAME = create_dir_structure()
def test_get_required_fn():
assert cg._get_required_fn("./hello", "./") == "/hello"
assert cg._get_required_fn("/home/xex/hello", "/home/xex/") == "/hello"
with pytest.raises(ValueError):
assert cg._get_required_fn("./hello", "/home") == "/hello"
assert cg._get_required_fn("/home/xex/hello", "./") == "/hello"
def test_generate_checksums():
checksums = cg.generate_checksums(DIRNAME)
assert checksums == {
'/somefile.txt': '18c0864b36d60f6036bf8eeab5c1fe7d',
'/xex/somefile.txt': '18c0864b36d60f6036bf8eeab5c1fe7d',
'/xex/xex.xx': 'e77b911e47bb73f6d69a70d246489fb0'
}
def test_generate_hashfile():
hashfile = cg.generate_hashfile(DIRNAME)
assert hashfile == """18c0864b36d60f6036bf8eeab5c1fe7d /somefile.txt
18c0864b36d60f6036bf8eeab5c1fe7d /xex/somefile.txt
e77b911e47bb73f6d69a70d246489fb0 /xex/xex.xx
"""
def teardown_module(module):
global DIRNAME
shutil.rmtree(DIRNAME)
| [
"[email protected]"
] | |
b2a188f7f610cf761151ec7b3382ab71ee475b77 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_5/byrtre001/question1.py | 6de3673699da383ae564bde3010d5be128888984 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | """program to simulate a simple BBS with one stored message and 2 fixed file
Trevor Byaruhanga
15 april 2014"""
# stored message.
a=('Welcome to UCT BBS'+'\n'+
'MENU'+'\n'+
'(E)nter a message'+'\n'+
'(V)iew message'+'\n'+
'(L)ist files'+'\n'+
'(D)isplay file'+'\n'+
'e(X)it')
print(a)
# input question prompting the user to chose one of the options
#in the menu
command=input('Enter your selection:'+'\n')
#function to work out which item was chosen and present output to the user.
def function(command):
command=command.upper()
if command == 'E':
message=input('Enter the message:'+'\n')
print(a)
command=input('Enter your selection:'+'\n')
command=command.upper()
if command == 'X':
print('Goodbye!')
if command == 'V':
if message:
print ('The message is:', message)
print(a)
command=input('Enter your selection:'+'\n')
if command == 'X':
print('Goodbye!')
elif command == 'V':
print ('The message is: no message yet')
print(a)
command=input('Enter your selection:'+'\n')
elif command == 'X':
print('Goodbye!')
if command == 'L':
print('List of files: 42.txt, 1015.txt')
print(a)
command=input('Enter your selection:'+'\n')
if command == 'D':
filename=input('Enter the filename:'+'\n')
if filename=='42.txt':
print('The meaning of life is blah blah blah ...')
print(a)
command=input('Enter your selection:'+'\n')
elif filename=='1015.txt':
print('Computer Science class notes ... simplified'+'\n'+
'Do all work'+'\n'+
'Pass course'+'\n'+
'Be happy')
print(a)
command=input('Enter your selection:'+'\n')
else:
print('File not found')
print(a)
command=input('Enter your selection:'+'\n')
function(command)
| [
"[email protected]"
] | |
cbc1a1a06bf7ee83de0cbc07b4f9b4f47f119827 | 9f4b3edaf1095ed58f5ff2d38d79d27b7e230e92 | /doc/source/python_as_glue.py | 81aaf1b30b0f672c46f77cb95265a1e0cfc8bcee | [
"BSD-3-Clause-LBNL"
] | permissive | zhishang72/TECA | dbd954ec48f5d9ad0643d26f5fbb6daf8dfd9842 | b8bed845e868133e4fbe01f4da40edd4c34cd775 | refs/heads/master | 2020-04-21T04:54:35.853007 | 2019-01-26T18:13:18 | 2019-01-26T18:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | # initialize MPI
from mpi4py import MPI
# bring in TECA
from teca_py_io import *
from teca_py_alg import *
# start the pipeline with the NetCDF CF-2.0 reader
cfr = teca_cf_reader.New()
cfr.set_files_regex('cam5_1_amip_run2\.cam2\.h2\.*')
cfr.set_x_axis_variable('lon')
cfr.set_y_axis_variable('lat')
cfr.set_t_axis_variable('time')
# add L2 norm operator to compute wind speed
l2n = teca_l2_norm.New()
l2n.set_component_0_variable('U850')
l2n.set_component_1_variable('V850')
l2n.set_l2_norm_variable('wind_speed')
l2n.set_input_connection(cfr.get_output_port())
# and vorticity operator to compute wind vorticity
vor = teca_vorticity.New()
vor.set_component_0_variable('U850')
vor.set_component_1_variable('V850')
vor.set_vorticity_variable('wind_vorticity')
vor.set_input_connnection(l2n.get_output_port())
# and finally the tropical cyclone detector
tcd = teca_tc_detect.New()
tcd.set_pressure_variable('PSL')
tcd.set_temperature_variable('TMQ')
tcd.set_wind_speed_variable('wind_speed')
tcd.set_vorticity_variable('wind_vorticity')
tcd.set_input_connection(vor.get_output_port())
# now add the map-reduce, the pipeline above is run in
# parallel using MPI+threads. Each thread processes one time
# step. the pipeline below this algorithm runs in serial on
# rank 0, # with 1 thread
mapr = teca_table_reduce.New()
mapr.set_thread_pool_size(2)
mapr.set_first_step(0)
mapr.set_last_step(-1)
mapr.set_input_connection(tcd.get_output_port())
# save the detected stroms
twr = teca_table_writer.New()
twr.set_file_name('detections_%t%.csv')
twr.set_input_connection(mapr.get_output_port())
# the commands above connect and configure the pipeline
# this command actually runs it
twr.update()
| [
"[email protected]"
] | |
db1ad97f218bbfe8114e47d1210c1a9a1bfafd4d | c099611e42319053888a747ea78468224e45a725 | /Polar-slepian/V_20/polarchannelsim_FERvsR_rateless_Det_Iter_delta_300_T8.py | 646a54ffe87882062f4e7a8a464441bd074fcbe9 | [] | no_license | sbsoumya/PolarProject-Code_Res | 118f54593716520c71cdc0e479236ffdc1a94f89 | 12a3b6fb24cf8160a519c74b064fd845066cbe0b | refs/heads/master | 2021-06-27T21:04:41.057937 | 2019-03-22T20:56:44 | 2019-03-22T20:56:44 | 129,615,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | #-------------------------------------------------------------------------------
# Name: polarchannelsim_FERvsR_rateless_det_Iterretro.py
# Purpose: FER VS R simulation for given msg_length and varying channel
#
# Author: soumya
#
# Created: 19/08/2017
#----------------------------------------
import numpy as np
import math as ma
import problib as pl
import polarencdec as ec
import polarconstruct as pcon
from datetime import datetime
import json
import polarchannel as pch
from pprint import pprint
import rateless_channel_det as rlc
from timeit import default_timer as timer
#=================================================================simulation
#------------Number of good channels = capacity
start = timer()
Nlist=[1024]
channel_plist=list(np.linspace(0.05,0.45,10))
compound_plist=[0.08349999999999963, 0.10599999999999965, 0.13099999999999967, 0.1594999999999997, 0.19249999999999973]
#[600, 525, 450, 375, 300]
T=8
msg_length=296
deltaG=38
runsim=1
start=timer()
for N in Nlist:
stamp=datetime.now().strftime("%y-%m-%d_%H-%M-%S")
filename="./simresults/polarchannel_FERvsR_rateless_Det_Iter_delta_"+str(msg_length)+"in"+str(N)+"_T"+str(T)+"_"+stamp+".txt"
f1=open(filename,'w')
print filename
print "RATE Vs FER REPORT Rateless Det Iter delta"
print "------------------------------------------"
print "Compound_plist:"
print compound_plist
print "sim ran :"+str(runsim)
print "T:"+str(T)
json.dump( "RATE Vs FER REPORT Rateless Det Iter delta",f1) ;f1.write("\n")
json.dump( "------------------------------------------",f1) ;f1.write("\n")
json.dump( "Compound_plist:",f1) ;f1.write("\n")
json.dump(compound_plist,f1) ;f1.write("\n")
json.dump("sim ran :"+str(runsim),f1) ;f1.write("\n")
json.dump("T:"+str(T),f1);f1.write("\n")
print "N="+str(N)
json.dump( "N="+str(N),f1) ;f1.write("\n")
used_rate=[];
achieved_rate=[]
FER=[];
Iter_problist=[]
for channel_p in channel_plist:
#print "channel_p:"+str(channel_p)
(u_rate,ach_rate,block_error,Iter_probdict)=rlc.send_rateless_det_Iter_retro_delta_sim(N,T,compound_plist,channel_p,msg_length,deltaG,runsim)
used_rate.append(u_rate)
achieved_rate.append(ach_rate)
FER.append(block_error)
Iter_problist.append(Iter_probdict)
block_error_exp=np.log10(FER).tolist()
print channel_plist
print achieved_rate
print block_error_exp
print Iter_problist
json.dump( "Rate vs Block_error=",f1) ;f1.write("\n")
json.dump(channel_plist,f1) ;f1.write("\n")
json.dump(achieved_rate,f1) ;f1.write("\n")
json.dump(block_error_exp,f1) ;f1.write("\n")
json.dump( "Iter Probabilities=",f1) ;f1.write("\n")
json.dump(Iter_problist,f1) ;f1.write("\n")
end = timer()
TC=(end-start)
print "Time taken:"+str(TC)
json.dump("Time taken:"+str(TC) ,f1) ;f1.write("\n")
| [
"[email protected]"
] | |
40c74d123eff6051b5952100a45abef935eac8db | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4030/894004030.py | 124523df12c7828177f53169e08f00b209133396 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 809 | py | from bots.botsconfig import *
from records004030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'DX',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'G82', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'G83', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'G22', MIN: 0, MAX: 1},
{ID: 'G72', MIN: 0, MAX: 10},
{ID: 'G23', MIN: 0, MAX: 20},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'G72', MIN: 0, MAX: 20},
{ID: 'G23', MIN: 0, MAX: 20},
{ID: 'G84', MIN: 1, MAX: 1},
{ID: 'G86', MIN: 1, MAX: 1},
{ID: 'G85', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
5779bcdba227fa41b598888395ca6c6cb372d7fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02614/s337550371.py | e941783ee59fd288f4e96377d81f254481c236d0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | h, w, k = map(int, input().split())
c = []
for _ in range(h):
c.append([c for c in input()])
ans = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for n in range(h):
for m in range(w):
if i >> n & 1:
continue
if j >> m & 1:
continue
if c[n][m] == '#':
cnt += 1
if cnt == k:
ans += 1
print(ans) | [
"[email protected]"
] | |
8513012fbdbf8d8b052ea5fdb992e028c5dec60c | d7016f69993570a1c55974582cda899ff70907ec | /sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_03_01_preview/_azure_digital_twins_management_client.py | 7be0cc594bb1102a3a5a1b3abda1105fdbb3df76 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 4,798 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import AzureDigitalTwinsManagementClientConfiguration
from .operations import DigitalTwinsEndpointOperations, DigitalTwinsOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AzureDigitalTwinsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Azure Digital Twins Client for managing DigitalTwinsInstance.
:ivar digital_twins: DigitalTwinsOperations operations
:vartype digital_twins:
azure.mgmt.digitaltwins.v2020_03_01_preview.operations.DigitalTwinsOperations
:ivar digital_twins_endpoint: DigitalTwinsEndpointOperations operations
:vartype digital_twins_endpoint:
azure.mgmt.digitaltwins.v2020_03_01_preview.operations.DigitalTwinsEndpointOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.digitaltwins.v2020_03_01_preview.operations.Operations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription identifier. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-03-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AzureDigitalTwinsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.digital_twins = DigitalTwinsOperations(self._client, self._config, self._serialize, self._deserialize)
self.digital_twins_endpoint = DigitalTwinsEndpointOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "AzureDigitalTwinsManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details)
| [
"[email protected]"
] | |
2136f5f5f2747cea092dd86d56384971f885788c | 69c33fcad69a2e61cc60209401215530d033e712 | /Python/Python Basics/80.exercise.py | edec7ae40f434231bf7bad65c48644f653a77183 | [] | no_license | KULDEEPMALIKM41/Practices | 7659b895ea959c7df2cdbc79c0b982b36f2bde63 | 193abe262ff281a384aac7895bb66dc39ee6e88d | refs/heads/master | 2023-08-17T11:01:11.694282 | 2021-09-30T08:12:41 | 2021-09-30T08:12:41 | 289,527,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #addition list element
l=[5,8,9,10,12,44,12]
print('list is : ',l)
d=0
for element in l:
d+=element
print('list addtion is : ',d)
print()
#print list element
l=[5,8,9,10,12,44,12]
print('list is : ',l)
e=1
for element in l:
print('element no',e, 'is : ',element)
e+=1
print()
#count number of elements in list
l=[1,2,3,4,5,6,7,8,9,10]
print('list is : ',l)
c=0
for element in l:
c+=1
print('number of element in list : ',c)
print() | [
"[email protected]"
] | |
fc67686c7d358fe86c73044273f69669abee17fa | b22b0760b29d24cff24eda9d1c114094fd1a588f | /Python/Easy/1. Two Sum.py | 04d0f39a36f83c43424be8c7e9ed2cf68bb3927e | [] | no_license | MridulGangwar/Leetcode-Solutions | bbbaa06058a7b3e7621fc54050e344c06a256080 | d41b1bbd762030733fa271316f19724d43072cd7 | refs/heads/master | 2022-03-07T12:20:33.485573 | 2022-02-21T07:22:38 | 2022-02-21T07:22:38 | 231,700,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dic ={}
for i,num in enumerate(nums):
if target-num in dic:
return[dic[target-num],i]
else:
dic[num]=i | [
"[email protected]"
] | |
926a132b2cac32d3d310cfd5c8940261302a4f1b | 602a4e86499841fbae43d84fc92908c533106aea | /core/actions/photoline.py | c9b5f8cb9eb9948ac8e096ec9914ec904d5370bc | [] | no_license | vden/TsoguNG | b187ccf1bef387417ec73467c51458d6f1443239 | f8d5e7ab9d85559aa163c232c9f28a24a2b7c2a4 | refs/heads/master | 2021-01-02T08:52:03.914218 | 2011-04-26T07:01:57 | 2011-04-26T07:01:57 | 1,663,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # -*- coding: utf-8 -*-
from core.portal.register import portalaction
from core.portal.render import render_to_portal
from core.views import get_object_by_url
from core.models import BaseObject
import random
@portalaction(verbose_name=u'Фотокалейдоскоп')
@render_to_portal(template='actions/photogallery.html')
def photoline(request):
result = BaseObject.nodes()(types=['News'], sort_fields=['-date_published'], states=[u'опубликовынный',u'на главной']).all()[:100]
all_imgs = []
for x in result:
all_imgs.extend( x.get_images() )
format = request.GET.get("format", "medium")
if format != "small":
format = "medium"
x = y = 160
count = 25
else:
x = y = 80
count = 70
print "EEE", len(all_imgs), count
imgs = random.sample(all_imgs, count)
return { 'imgs': imgs, 'format': format, 'x': x, 'y': y }
| [
"[email protected]"
] | |
3b8e9fcc40666e3db7dbe7e7babc3807bb05661f | 8da9c672aeb0a006344a452a817163b9e46c2a1c | /code/components/classifier/wrapper.py | e61f65058e9fda5579c5bec1bd27c308fdf525d0 | [] | no_license | tuandnvn/ttk | 2d200218645dc84df8aebb236381191da1177a8d | 72b57f34f194bbb3049a6f3c879580b8a5755c7f | refs/heads/master | 2021-04-07T07:25:04.780157 | 2018-03-15T00:39:14 | 2018-03-15T00:39:14 | 125,291,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,362 | py | """
Python wrapper around the MaxEnt Classifier
CLASSES
ClassifierWrapper
"""
import os
from ttk_path import TTK_ROOT
from library.tarsqi_constants import CLASSIFIER
from library.timeMLspec import TLINK, EIID, TID
from library.timeMLspec import RELTYPE, EVENT_INSTANCE_ID, TIME_ID
from library.timeMLspec import RELATED_TO_EVENT_INSTANCE, RELATED_TO_TIME, CONFIDENCE
from components.common_modules.component import ComponentWrapper
from utilities import logger
from components.classifier import vectors, tree_vectors,\
tree_vectors_with_narrative
from docmodel.xml_parser import Parser
from docmodel.xml_parser import XmlDocElement, XmlDocument
from adjacent_tlink_feature_extractor import feature_recollect
from feature_index import Feature_Index_Dict
PARSED_DOCUMENT = 'PARSED_DOC'
class ClassifierWrapper(ComponentWrapper):
"""Wraps the maxent link classifier."""
def __init__(self, tag, xmldoc, tarsqi_instance, auxillary = None):
"""Calls __init__ on the base class and initializes component_name,
DIR_CLASSIFIER, CREATION_EXTENSION, TMP_EXTENSION and
RETRIEVAL_EXTENSION."""
ComponentWrapper.__init__(self, tag, xmldoc, tarsqi_instance)
self.component_name = CLASSIFIER
self.DIR_CLASSIFIER = os.path.join(TTK_ROOT, 'components', 'classifier')
self.CREATION_EXTENSION = 'cla.i.xml'
self.TMP_EXTENSION = 'cla.t.xml'
self.RETRIEVAL_EXTENSION = 'cla.o.xml'
self.DIR_DATA = os.path.join(TTK_ROOT, 'data', 'tmp')
self.DICT_DATA = os.path.join(TTK_ROOT, 'data', 'dict')
# platform = self.document.getopt_platform()
# if platform == 'linux2':
# self.executable = 'mxtest.opt.linux'
# elif platform == 'darwin':
# self.executable = 'mxtest.opt.osx'
self.executable = 'mxtest.opt.linux'
self.auxillary = auxillary
def process_fragments(self):
"""Retrieve the XmlDocument and hand it to the classifier for processing. Processing will
update this slice when tlinks are added."""
os.chdir(self.DIR_CLASSIFIER)
perl = self.tarsqi_instance.getopt_perl()
ee_model = os.path.join('data', 'op.e-e.model')
et_model = os.path.join('data', 'op.e-t.model')
fragment_count = 0
for fragment in self.fragments:
base = fragment[0]
fragment_count += 1
fin = os.path.join(self.DIR_DATA, base+'.'+self.CREATION_EXTENSION)
ftmp = os.path.join(self.DIR_DATA, base+'.'+self.TMP_EXTENSION)
fout = os.path.join(self.DIR_DATA, base+'.'+self.RETRIEVAL_EXTENSION)
ee_vectors = fin + '.EE'
et_vectors = fin + '.ET'
ee_results = ee_vectors + '.REL'
et_results = et_vectors + '.REL'
fragment_doc = Parser().parse_file(open(fin, "r"))
vectors.create_vectors(fragment_doc, ee_vectors, et_vectors)
print 'done create vectors'
commands = [
"./%s -input %s -model %s -output %s > class.log" %
(self.executable, ee_vectors, ee_model, ee_results),
"./%s -input %s -model %s -output %s > class.log" %
(self.executable, et_vectors, et_model, et_results),
"%s collectClassifier.pl %s %s %s" %
(perl, ee_vectors, et_vectors, ftmp) ]
for command in commands:
os.system(command)
print 'done create features'
self._add_tlinks_to_fragment(fin, ftmp, fout)
def process_training_fragments(self):
"""Retrieve the TRAINING XmlDocument and hand it to the feature extracter for processing.
Features file will be used for training"""
os.chdir(self.DIR_CLASSIFIER)
perl = self.tarsqi_instance.getopt_perl()
fragment_count = 0
for fragment in self.fragments:
base = fragment[0]
fragment_count += 1
fin = os.path.join(self.DIR_DATA, base+'.'+self.CREATION_EXTENSION)
ee_vectors = fin + '.EE'
et_vectors = fin + '.ET'
# tt_vectors = fin + '.TT'
ee_train_vectors = fin + '.train.EE'
et_train_vectors = fin + '.train.ET'
# tt_train_vectors = fin + '.train.TT'
fragment_doc = Parser().parse_file(open(fin, "r"))
fragment_doc.set_dct_timex( self.document.get_dct() )
# vectors.create_vectors(fragment_doc, ee_vectors, et_vectors, tt_vectors)
# vectors.create_vectors(fragment_doc, ee_vectors, et_vectors)
"""
Without narrative scheme
"""
dictionary_file = os.path.join( self.DICT_DATA,
'feature_index.dict')
"""
With narrative scheme
"""
# dictionary_file = os.path.join( self.DICT_DATA,
# 'feature_index_with_narrative_scheme.dict' )
feature_index_dict = Feature_Index_Dict()
feature_index_dict.load_from_file(dictionary_file)
"""
Without narrative scheme
"""
tree_vectors.create_vectors(fragment_doc, self.auxillary[PARSED_DOCUMENT],
feature_index_dict, ee_vectors, et_vectors)
"""
With narrative scheme
"""
# tree_vectors_with_narrative.create_vectors(fragment_doc, self.auxillary[PARSED_DOCUMENT],
# feature_index_dict, ee_vectors, et_vectors)
feature_index_dict.dump_to_file(dictionary_file)
print 'done create vectors'
# feature_recollect( self.document, ee_vectors, et_vectors, tt_vectors,
# ee_train_vectors, et_train_vectors, tt_train_vectors)
feature_recollect( self.document, ee_vectors, et_vectors,
ee_train_vectors, et_train_vectors)
print 'done collect training label and features'
print '======================================================'
def _add_tlinks_to_fragment(self, in_fragment, tmp_fragment, out_fragment):
"""Takes the links created by the classifier and merges them into the
input fragment."""
xmldoc1 = Parser().parse_file(open(in_fragment,'r'))
xmldoc2 = Parser().parse_file(open(tmp_fragment,'r'))
for tlink in xmldoc2.get_tags(TLINK):
reltype = tlink.attrs[RELTYPE]
id1 = tlink.attrs.get(EVENT_INSTANCE_ID, None)
if not id1:
id1 = tlink.attrs.get(TIME_ID, None)
if not id1:
logger.warn("Could not find id1 in " + tlink.content)
id2 = tlink.attrs.get(RELATED_TO_EVENT_INSTANCE, None)
if not id2:
id2 = tlink.attrs.get(RELATED_TO_TIME, None)
if not id2:
logger.warn("Could not find id2 in " + tlink.content)
origin = CLASSIFIER + ' ' + tlink.attrs.get(CONFIDENCE,'')
xmldoc1.add_tlink(reltype, id1, id2, origin)
xmldoc1.save_to_file(out_fragment)
def _add_links_to_xmldoc(self, xmldoc, ee_vectors, et_vectors, ee_results, et_results, fout):
"""Insert new tlinks into the xmldco using the vectors and the results
from the classifier."""
for (f1, f2) in ((ee_vectors, ee_results), (et_vectors, et_results)):
vector_file = open(f1)
classifier_file = open(f2)
for line in vector_file:
classifier_line = classifier_file.readline()
attrs = self._parse_vector_string(line)
id1 = self._get_id('0', attrs, line)
id2 = self._get_id('1', attrs, line)
(rel, confidence) = self._parse_classifier_line(classifier_line)[0:2]
origin = CLASSIFIER + ' ' + confidence
xmldoc.add_tlink(rel, id1, id2, origin)
def _parse_vector_string(self, line):
"""Return the attribute dictionaries from the vestor string. """
attrs = {}
for pair in line.split():
if pair.find('-') > -1:
(attr, val) = pair.split('-',1)
attrs[attr] = val
return attrs
def _parse_classifier_line(self, line):
"""Extract relType, confidence correct/incorrect and correct relation
from the classifier result line."""
line = line.strip()
(rel, confidence, judgment, correct_judgement) = line.split()
return (rel, confidence, judgment, correct_judgement)
def _get_id(self, prefix, attrs, line):
"""Get the eiid or tid for the first or second object in the
vector. The prefix is '0' or '1' and determines which object's
id is returned."""
id = attrs.get(prefix+EIID, attrs.get(prefix+TID, None))
if not id:
logger.warn("Could not find id in " + line)
return id
| [
"[email protected]"
] | |
9edf40ecb62eac81cd943cb16e8c1f28793c615e | f95db72e9a6f0c89f77582eb589e6de625de76c7 | /tools/perf/core/results_processor/processor_unittest.py | 2659fce99be675fd600510596c5e1886e0fa2649 | [
"BSD-3-Clause",
"Zlib",
"LGPL-2.0-or-later",
"MIT",
"LGPL-2.1-only",
"APSL-2.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Seshpenguin/chromium | ca814257d998676850dd69c86c2dc2892b06aa87 | c2e0382538708b7801254f7a06c6bbe61b9aa65c | refs/heads/master | 2023-03-11T10:50:20.627435 | 2019-11-07T21:06:41 | 2019-11-07T21:06:41 | 220,328,767 | 0 | 0 | BSD-3-Clause | 2019-11-07T21:05:20 | 2019-11-07T21:05:19 | null | UTF-8 | Python | false | false | 5,495 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for results_processor methods."""
import datetime
import os
import unittest
import mock
from core.results_processor import processor
from core.results_processor import testing
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import date_range
from tracing.value import histogram_set
class ResultsProcessorUnitTests(unittest.TestCase):
def testAddDiagnosticsToHistograms(self):
start_ts = 1500000000
start_iso = datetime.datetime.utcfromtimestamp(start_ts).isoformat() + 'Z'
test_result = testing.TestResult(
'benchmark/story',
output_artifacts={
'trace.html': testing.Artifact('/trace.html', 'gs://trace.html'),
},
start_time=start_iso,
tags=['story_tag:test'],
result_id='3',
)
test_result['_histograms'] = histogram_set.HistogramSet()
test_result['_histograms'].CreateHistogram('a', 'unitless', [0])
processor.AddDiagnosticsToHistograms(
test_result, test_suite_start=start_iso, results_label='label',
test_path_format='telemetry')
hist = test_result['_histograms'].GetFirstHistogram()
self.assertEqual(hist.diagnostics['labels'],
generic_set.GenericSet(['label']))
self.assertEqual(hist.diagnostics['benchmarks'],
generic_set.GenericSet(['benchmark']))
self.assertEqual(hist.diagnostics['benchmarkStart'],
date_range.DateRange(start_ts * 1e3))
self.assertEqual(hist.diagnostics['traceStart'],
date_range.DateRange(start_ts * 1e3))
self.assertEqual(hist.diagnostics['stories'],
generic_set.GenericSet(['story']))
self.assertEqual(hist.diagnostics['storyTags'],
generic_set.GenericSet(['test']))
self.assertEqual(hist.diagnostics['storysetRepeats'],
generic_set.GenericSet([3]))
self.assertEqual(hist.diagnostics['traceUrls'],
generic_set.GenericSet(['gs://trace.html']))
def testUploadArtifacts(self):
test_result = testing.TestResult(
'benchmark/story',
output_artifacts={
'logs': testing.Artifact('/log.log'),
'trace.html': testing.Artifact('/trace.html'),
'screenshot': testing.Artifact('/screenshot.png'),
},
)
with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch:
cloud_patch.return_value = 'gs://url'
processor.UploadArtifacts(test_result, 'bucket', 'run1')
cloud_patch.assert_has_calls([
mock.call('bucket', 'run1/benchmark/story/logs', '/log.log'),
mock.call('bucket', 'run1/benchmark/story/trace.html', '/trace.html'),
mock.call('bucket', 'run1/benchmark/story/screenshot',
'/screenshot.png'),
],
any_order=True,
)
for artifact in test_result['outputArtifacts'].itervalues():
self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testRunIdentifier(self):
with mock.patch('random.randint') as randint_patch:
randint_patch.return_value = 54321
run_identifier = processor.RunIdentifier(
results_label='src@abc + 123',
test_suite_start='2019-10-01T12:00:00.123456Z')
self.assertEqual(run_identifier, 'src_abc_123_20191001T120000_54321')
def testAggregateTraces(self):
test_result = testing.TestResult(
'benchmark/story2',
output_artifacts={
'trace/1.json': testing.Artifact(
os.path.join('test_run', 'story2', 'trace', '1.json')),
'trace/2.json': testing.Artifact(
os.path.join('test_run', 'story2', 'trace', '2.json')),
},
)
serialize_method = 'tracing.trace_data.trace_data.SerializeAsHtml'
with mock.patch(serialize_method) as mock_serialize:
processor.AggregateTraces(test_result)
self.assertEqual(mock_serialize.call_count, 1)
trace_files, file_path = mock_serialize.call_args[0][:2]
self.assertEqual(
set(trace_files),
set([
os.path.join('test_run', 'story2', 'trace', '1.json'),
os.path.join('test_run', 'story2', 'trace', '2.json'),
]),
)
self.assertEqual(
file_path,
os.path.join('test_run', 'story2', 'trace', 'trace.html'),
)
artifacts = test_result['outputArtifacts']
self.assertEqual(len(artifacts), 1)
self.assertEqual(artifacts.keys()[0], 'trace.html')
def testMeasurementToHistogram(self):
hist = processor.MeasurementToHistogram('a', {
'unit': 'sizeInBytes',
'samples': [1, 2, 3],
'description': 'desc',
})
self.assertEqual(hist.name, 'a')
self.assertEqual(hist.unit, 'sizeInBytes')
self.assertEqual(hist.sample_values, [1, 2, 3])
self.assertEqual(hist.description, 'desc')
def testMeasurementToHistogramLegacyUnits(self):
hist = processor.MeasurementToHistogram('a', {
'unit': 'seconds',
'samples': [1, 2, 3],
})
self.assertEqual(hist.name, 'a')
self.assertEqual(hist.unit, 'ms_smallerIsBetter')
self.assertEqual(hist.sample_values, [1000, 2000, 3000])
def testMeasurementToHistogramUnknownUnits(self):
with self.assertRaises(ValueError):
processor.MeasurementToHistogram('a', {'unit': 'yards', 'samples': [9]})
| [
"[email protected]"
] | |
624ed4f23273f22a3d0728f4428603fd1d9342bb | 12a72da6848ae461b995ec2fc6c4e1827be82803 | /coin_db/okex_kline_history.py | 97744aeb24e43f8578bebdea12ec2dde40a5950b | [] | no_license | lim1942/coin_helper | f3ed40c07a049a00f052dfa3e59cee7eefe969cf | d34ce363371fd964d8c46d5dd04ca7c5eb7d35b4 | refs/heads/main | 2023-04-30T10:46:03.231440 | 2021-05-25T12:15:49 | 2021-05-25T12:15:49 | 366,247,314 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | from coin_db.base import InstrumentIdMysql
from coin_helper.settings import TZ_HOUR
class OkexKlineHistoryMysql(InstrumentIdMysql):
# 插入
insert_fields = ('open','high','low','close','volume','time')
insert_sql = f"""INSERT IGNORE INTO table_name({','.join(insert_fields)}) VALUES({','.join(('%s' for _ in insert_fields))});"""
insert_fields_date = [('time','%Y-%m-%dT%H:%M:%S.%fZ',TZ_HOUR)]
insert_fields_multiple = ('open','high','low','close','volume')
# 查询
query_order_field = 'time'
# 建表
table_sql = f"""CREATE TABLE If Not Exists `table_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`open` bigint(20) NOT NULL,
`high` bigint(20) NOT NULL,
`low` bigint(20) NOT NULL,
`close` bigint(20) NOT NULL,
`volume` bigint(20) NOT NULL,
`time` datetime(6) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `time` (`time`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
"""
@classmethod
def get_value_by_item(cls,item):
item = {'open':item[1],'high':item[2],'low':item[3],'close':item[4],'volume':item[5],'time':item[0]}
return super().get_value_by_item(item)
if __name__ == "__main__":
OkexKlineHistoryMysql(instrument_id='BTC-USDT').create_tables() | [
"[email protected]"
] | |
676fc4fbab4931a3870271f4f287804749359ed0 | fe33bdb20436a379a17d56b83816d7064cb75d90 | /src/rocon_multimaster/rocon_hub/src/rocon_hub/main.py | f1ce15a41be27d10d3bb1d6af1477ff7a91e47bd | [] | no_license | uml-robotics/catkin_tester | 764744614782acaff46f66f25dbd1650d0fcd5e8 | dfc8bb2026c06d0f97696a726a6773ff8b99496e | refs/heads/master | 2022-10-31T11:48:27.207535 | 2017-11-27T18:09:38 | 2017-11-27T18:09:38 | 111,495,779 | 0 | 1 | null | 2022-10-19T14:49:44 | 2017-11-21T03:45:59 | C | UTF-8 | Python | false | false | 3,280 | py | #!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_multimaster/license/LICENSE
#
##############################################################################
# Imports
##############################################################################
import sys
# Ros imports
import rospy
import std_srvs.srv as std_srvs
# Local imports
from . import utils
from . import redis_server
from . import ros_parameters
from . import watcher
from . import zeroconf
##############################################################################
# Variables
##############################################################################
redi = None
timeout = 15
##############################################################################
# Shutdown Handlers
##############################################################################
#
# This lets the hub have a controlled shutdown from an external party
# (in our special case of interest, from the conductor).
def ros_service_shutdown(unused_request):
shutdown()
return std_srvs.EmptyResponse()
def shutdown():
global redi
if redi is not None:
rospy.loginfo("Hub : shutting down.")
redi.shutdown()
redi = None
def wait_for_shutdown():
'''
Shutdown hook - we wait here for an external shutdown via ros service
(at which point redi is None)
timing out after a reasonable time if we need to.
'''
global redi
global timeout
count = 0.0
while count < timeout:
if redi is None:
return
else:
count += 0.5
rospy.rostime.wallsleep(0.5) # human time
rospy.logwarn("Hub : timed out waiting for external shutdown by ros service, forcing shutdown now.")
shutdown()
##############################################################################
# Main
##############################################################################
def main():
global redi
global timeout
while not utils.check_master():
rospy.logerr("Unable to communicate with master!")
rospy.rostime.wallsleep(1.0)
if rospy.is_shutdown():
sys.exit(utils.red_string("Unable to communicate with master!"))
rospy.init_node('hub')
param = ros_parameters.load()
# Installation checks - sys exits if the process if not installed.
utils.check_if_executable_available('redis-server')
if param['zeroconf']:
utils.check_if_executable_available('avahi-daemon')
if param['external_shutdown']:
timeout = param['external_shutdown_timeout']
rospy.on_shutdown(wait_for_shutdown)
unused_shutdown_service = rospy.Service('~shutdown', std_srvs.Empty, ros_service_shutdown)
redi = redis_server.RedisServer(param)
redi.start() # sys exits if server connection is unavailable or incorrect version
if param['zeroconf']:
zeroconf.advertise_port_to_avahi(param['port'], param['name']) # sys exits if running avahi-daemon not found
watcher_thread = watcher.WatcherThread('localhost', param['port'])
watcher_thread.start()
rospy.spin()
if not param['external_shutdown']:
# do it here, don't wait for the ros service to get triggered
shutdown()
| [
"[email protected]"
] | |
3328d3db51d6fb8039493a5e75180371fc20c187 | b0e93e504bc1e45a4e08b0cbc30a9ada3601b9d2 | /service/routes.py | 94b4842fb7126c21b04b680857e789cd44f302a2 | [
"Apache-2.0"
] | permissive | jarty13/lab-flask-tdd | e54f3212d3804ecb60f5f0227f6fc9c6cb70e2a8 | d943e84a6c2dab6660d55428be56e767fd90bc0f | refs/heads/master | 2023-03-19T07:17:29.934253 | 2021-03-16T01:53:22 | 2021-03-16T01:53:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,059 | py | # Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pet Store Service
Paths:
------
GET /pets - Returns a list all of the Pets
GET /pets/{id} - Returns the Pet with a given id number
POST /pets - creates a new Pet record in the database
PUT /pets/{id} - updates a Pet record in the database
DELETE /pets/{id} - deletes a Pet record in the database
"""
import os
import sys
import logging
from flask import Flask, jsonify, request, url_for, make_response, abort
from flask_api import status # HTTP Status Codes
from werkzeug.exceptions import NotFound
# For this example we'll use SQLAlchemy, a popular ORM that supports a
# variety of backends including SQLite, MySQL, and PostgreSQL
from flask_sqlalchemy import SQLAlchemy
from .models import Pet, DataValidationError
# Import Flask application
from . import app
######################################################################
# Error Handlers
######################################################################
@app.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
return bad_request(error)
@app.errorhandler(status.HTTP_400_BAD_REQUEST)
def bad_request(error):
""" Handles bad reuests with 400_BAD_REQUEST """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_400_BAD_REQUEST, error="Bad Request", message=str(error)
),
status.HTTP_400_BAD_REQUEST,
)
@app.errorhandler(status.HTTP_404_NOT_FOUND)
def not_found(error):
""" Handles resources not found with 404_NOT_FOUND """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_404_NOT_FOUND, error="Not Found", message=str(error)
),
status.HTTP_404_NOT_FOUND,
)
@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED)
def method_not_supported(error):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_405_METHOD_NOT_ALLOWED,
error="Method not Allowed",
message=str(error),
),
status.HTTP_405_METHOD_NOT_ALLOWED,
)
@app.errorhandler(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def mediatype_not_supported(error):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
error="Unsupported media type",
message=str(error),
),
status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR)
def internal_server_error(error):
""" Handles unexpected server error with 500_SERVER_ERROR """
app.logger.error(str(error))
return (
jsonify(
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
error="Internal Server Error",
message=str(error),
),
status.HTTP_500_INTERNAL_SERVER_ERROR,
)
######################################################################
# GET INDEX
######################################################################
@app.route("/")
def index():
""" Root URL response """
app.logger.info("Request for Root URL")
return (
jsonify(
name="Pet Demo REST API Service",
version="1.0",
paths=url_for("list_pets", _external=True),
),
status.HTTP_200_OK,
)
######################################################################
# LIST ALL PETS
######################################################################
@app.route("/pets", methods=["GET"])
def list_pets():
""" Returns all of the Pets """
app.logger.info("Request for pet list")
pets = []
category = request.args.get("category")
name = request.args.get("name")
if category:
pets = Pet.find_by_category(category)
elif name:
pets = Pet.find_by_name(name)
else:
pets = Pet.all()
results = [pet.serialize() for pet in pets]
app.logger.info("Returning %d pets", len(results))
return make_response(jsonify(results), status.HTTP_200_OK)
######################################################################
# RETRIEVE A PET
######################################################################
@app.route("/pets/<int:pet_id>", methods=["GET"])
def get_pets(pet_id):
"""
Retrieve a single Pet
This endpoint will return a Pet based on it's id
"""
app.logger.info("Request for pet with id: %s", pet_id)
pet = Pet.find(pet_id)
if not pet:
raise NotFound("Pet with id '{}' was not found.".format(pet_id))
app.logger.info("Returning pet: %s", pet.name)
return make_response(jsonify(pet.serialize()), status.HTTP_200_OK)
######################################################################
# ADD A NEW PET
######################################################################
@app.route("/pets", methods=["POST"])
def create_pets():
"""
Creates a Pet
This endpoint will create a Pet based the data in the body that is posted
"""
app.logger.info("Request to create a pet")
check_content_type("application/json")
pet = Pet()
pet.deserialize(request.get_json())
pet.create()
message = pet.serialize()
location_url = url_for("get_pets", pet_id=pet.id, _external=True)
app.logger.info("Pet with ID [%s] created.", pet.id)
return make_response(
jsonify(message), status.HTTP_201_CREATED, {"Location": location_url}
)
######################################################################
# UPDATE AN EXISTING PET
######################################################################
@app.route("/pets/<int:pet_id>", methods=["PUT"])
def update_pets(pet_id):
"""
Update a Pet
This endpoint will update a Pet based the body that is posted
"""
app.logger.info("Request to update pet with id: %s", pet_id)
check_content_type("application/json")
pet = Pet.find(pet_id)
if not pet:
raise NotFound("Pet with id '{}' was not found.".format(pet_id))
pet.deserialize(request.get_json())
pet.id = pet_id
pet.update()
app.logger.info("Pet with ID [%s] updated.", pet.id)
return make_response(jsonify(pet.serialize()), status.HTTP_200_OK)
######################################################################
# DELETE A PET
######################################################################
@app.route("/pets/<int:pet_id>", methods=["DELETE"])
def delete_pets(pet_id):
"""
Delete a Pet
This endpoint will delete a Pet based the id specified in the path
"""
app.logger.info("Request to delete pet with id: %s", pet_id)
pet = Pet.find(pet_id)
if pet:
pet.delete()
app.logger.info("Pet with ID [%s] delete complete.", pet_id)
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def check_content_type(media_type):
""" Checks that the media type is correct """
content_type = request.headers.get("Content-Type")
if content_type and content_type == media_type:
return
app.logger.error("Invalid Content-Type: %s", content_type)
abort(415, "Content-Type must be {}".format(media_type))
| [
"[email protected]"
] | |
8fcd83d53bbb5a8a442dbd154cdedd48fa015b1d | 4c577d9ddf21d8aba5626343f91a4986266f01e2 | /eric6/.eric6/eric6plugins/vcsGit/Ui_GitRemoteRepositoriesDialog.py | 3dbff99898c2a2d8c6ac8a5b9b713e8915b761c2 | [] | no_license | metamarcdw/.dotfiles | 362199d415ebd7d09247ee0efbda03243aa22faa | 3df0c805225a8d4f2709565d7eda4e07a050c986 | refs/heads/master | 2020-12-30T15:29:25.769345 | 2017-12-22T05:44:01 | 2017-12-22T05:44:01 | 91,143,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,412 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/cypher/.eric6/eric6plugins/vcsGit/GitRemoteRepositoriesDialog.ui'
#
# Created by: PyQt5 UI code generator 5.8
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_GitRemoteRepositoriesDialog(object):
def setupUi(self, GitRemoteRepositoriesDialog):
GitRemoteRepositoriesDialog.setObjectName("GitRemoteRepositoriesDialog")
GitRemoteRepositoriesDialog.resize(600, 450)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(GitRemoteRepositoriesDialog)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtWidgets.QGroupBox(GitRemoteRepositoriesDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.repolist = QtWidgets.QTreeWidget(self.groupBox)
self.repolist.setAlternatingRowColors(True)
self.repolist.setRootIsDecorated(False)
self.repolist.setItemsExpandable(False)
self.repolist.setObjectName("repolist")
self.verticalLayout.addWidget(self.repolist)
self.verticalLayout_2.addWidget(self.groupBox)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.addButton = QtWidgets.QPushButton(GitRemoteRepositoriesDialog)
self.addButton.setObjectName("addButton")
self.horizontalLayout.addWidget(self.addButton)
self.renameButton = QtWidgets.QPushButton(GitRemoteRepositoriesDialog)
self.renameButton.setObjectName("renameButton")
self.horizontalLayout.addWidget(self.renameButton)
self.removeButton = QtWidgets.QPushButton(GitRemoteRepositoriesDialog)
self.removeButton.setObjectName("removeButton")
self.horizontalLayout.addWidget(self.removeButton)
self.pruneButton = QtWidgets.QPushButton(GitRemoteRepositoriesDialog)
self.pruneButton.setObjectName("pruneButton")
self.horizontalLayout.addWidget(self.pruneButton)
self.line = QtWidgets.QFrame(GitRemoteRepositoriesDialog)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout.addWidget(self.line)
self.showInfoButton = QtWidgets.QPushButton(GitRemoteRepositoriesDialog)
self.showInfoButton.setObjectName("showInfoButton")
self.horizontalLayout.addWidget(self.showInfoButton)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.errorGroup = QtWidgets.QGroupBox(GitRemoteRepositoriesDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.errorGroup.sizePolicy().hasHeightForWidth())
self.errorGroup.setSizePolicy(sizePolicy)
self.errorGroup.setObjectName("errorGroup")
self.vboxlayout = QtWidgets.QVBoxLayout(self.errorGroup)
self.vboxlayout.setObjectName("vboxlayout")
self.errors = QtWidgets.QTextEdit(self.errorGroup)
self.errors.setReadOnly(True)
self.errors.setAcceptRichText(False)
self.errors.setObjectName("errors")
self.vboxlayout.addWidget(self.errors)
self.verticalLayout_2.addWidget(self.errorGroup)
self.inputGroup = QtWidgets.QGroupBox(GitRemoteRepositoriesDialog)
self.inputGroup.setObjectName("inputGroup")
self._2 = QtWidgets.QGridLayout(self.inputGroup)
self._2.setObjectName("_2")
spacerItem2 = QtWidgets.QSpacerItem(327, 29, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self._2.addItem(spacerItem2, 1, 1, 1, 1)
self.sendButton = QtWidgets.QPushButton(self.inputGroup)
self.sendButton.setObjectName("sendButton")
self._2.addWidget(self.sendButton, 1, 2, 1, 1)
self.input = QtWidgets.QLineEdit(self.inputGroup)
self.input.setObjectName("input")
self._2.addWidget(self.input, 0, 0, 1, 3)
self.passwordCheckBox = QtWidgets.QCheckBox(self.inputGroup)
self.passwordCheckBox.setObjectName("passwordCheckBox")
self._2.addWidget(self.passwordCheckBox, 1, 0, 1, 1)
self.verticalLayout_2.addWidget(self.inputGroup)
self.buttonBox = QtWidgets.QDialogButtonBox(GitRemoteRepositoriesDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_2.addWidget(self.buttonBox)
self.retranslateUi(GitRemoteRepositoriesDialog)
QtCore.QMetaObject.connectSlotsByName(GitRemoteRepositoriesDialog)
GitRemoteRepositoriesDialog.setTabOrder(self.repolist, self.addButton)
GitRemoteRepositoriesDialog.setTabOrder(self.addButton, self.renameButton)
GitRemoteRepositoriesDialog.setTabOrder(self.renameButton, self.removeButton)
GitRemoteRepositoriesDialog.setTabOrder(self.removeButton, self.pruneButton)
GitRemoteRepositoriesDialog.setTabOrder(self.pruneButton, self.showInfoButton)
GitRemoteRepositoriesDialog.setTabOrder(self.showInfoButton, self.errors)
GitRemoteRepositoriesDialog.setTabOrder(self.errors, self.input)
GitRemoteRepositoriesDialog.setTabOrder(self.input, self.passwordCheckBox)
GitRemoteRepositoriesDialog.setTabOrder(self.passwordCheckBox, self.sendButton)
def retranslateUi(self, GitRemoteRepositoriesDialog):
_translate = QtCore.QCoreApplication.translate
GitRemoteRepositoriesDialog.setWindowTitle(_translate("GitRemoteRepositoriesDialog", "Git Remote Repositories"))
GitRemoteRepositoriesDialog.setWhatsThis(_translate("GitRemoteRepositoriesDialog", "<b>Git Remote Repositories</b>\n"
"<p>This dialog shows the available remote repositories.</p>"))
self.groupBox.setTitle(_translate("GitRemoteRepositoriesDialog", "Remote Repositories"))
self.repolist.headerItem().setText(0, _translate("GitRemoteRepositoriesDialog", "Name"))
self.repolist.headerItem().setText(1, _translate("GitRemoteRepositoriesDialog", "URL"))
self.repolist.headerItem().setText(2, _translate("GitRemoteRepositoriesDialog", "Operation"))
self.addButton.setToolTip(_translate("GitRemoteRepositoriesDialog", "Press to add a remote repository"))
self.addButton.setText(_translate("GitRemoteRepositoriesDialog", "&Add"))
self.renameButton.setToolTip(_translate("GitRemoteRepositoriesDialog", "Press to rename the selected repository"))
self.renameButton.setText(_translate("GitRemoteRepositoriesDialog", "&Rename"))
self.removeButton.setToolTip(_translate("GitRemoteRepositoriesDialog", "Press to remove the selected repository"))
self.removeButton.setText(_translate("GitRemoteRepositoriesDialog", "Re&move"))
self.pruneButton.setToolTip(_translate("GitRemoteRepositoriesDialog", "Press to delete stale tracking branches under the selected repository "))
self.pruneButton.setText(_translate("GitRemoteRepositoriesDialog", "&Prune"))
self.showInfoButton.setToolTip(_translate("GitRemoteRepositoriesDialog", "Press to show information about the selected repository"))
self.showInfoButton.setText(_translate("GitRemoteRepositoriesDialog", "Show &Info..."))
self.errorGroup.setTitle(_translate("GitRemoteRepositoriesDialog", "Errors"))
self.errors.setWhatsThis(_translate("GitRemoteRepositoriesDialog", "<b>Git log errors</b><p>This shows possible error messages of the git log command.</p>"))
self.inputGroup.setTitle(_translate("GitRemoteRepositoriesDialog", "Input"))
self.sendButton.setToolTip(_translate("GitRemoteRepositoriesDialog", "Press to send the input to the git process"))
self.sendButton.setText(_translate("GitRemoteRepositoriesDialog", "&Send"))
self.sendButton.setShortcut(_translate("GitRemoteRepositoriesDialog", "Alt+S"))
self.input.setToolTip(_translate("GitRemoteRepositoriesDialog", "Enter data to be sent to the git process"))
self.passwordCheckBox.setToolTip(_translate("GitRemoteRepositoriesDialog", "Select to switch the input field to password mode"))
self.passwordCheckBox.setText(_translate("GitRemoteRepositoriesDialog", "&Password Mode"))
self.passwordCheckBox.setShortcut(_translate("GitRemoteRepositoriesDialog", "Alt+P"))
| [
"[email protected]"
] | |
be0001f3b09da183ee40c5855d5794198269d7db | 7a59728868fc8bc81396f192a78e4dd184fb0201 | /simulation/simulate_ring.py | 65e3f6327a12593e872c4374b92ee1f61030cc9b | [] | no_license | chuan137/upiv-analysis | 1272d3515759bc6792c73fca9a2a13f074177381 | ed1a022a9e5069108fba49bcfd2ec736356a64ee | refs/heads/master | 2021-01-10T13:08:41.838545 | 2016-02-25T11:17:04 | 2016-02-25T11:17:04 | 52,277,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | #!/usr/bin/env python
import sys, os
sys.path.insert(0, os.path.realpath('../python'))
import cv
import getopt
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rand
import tifffile.tifffile as tif
from helper import stats
# {{{ Helper Functions
def snr_ind(snr):
return 20 * np.log(snr) / np.log(10)
def draw_circle(imgsize, rad, thickness=2):
w, h = imgsize
xx, yy = np.mgrid[:w, :h]
circle = (xx - w/2)**2 + (yy - h/2)**2
rmin, rmax = rad - thickness/2, rad + thickness/2
return np.logical_and(circle < rmax**2, circle > rmin**2)
def draw_fuzzy_circle(imgsize, rad, thickness):
width = 0.5 * thickness
idx = np.arange(min(imgsize))
kernel = np.exp(-(idx - rad)**2/(2*width**2))
w, h = imgsize
xx, yy = np.mgrid[:w, :h]
circle = np.floor(np.sqrt((xx - w/2)**2 + (yy - h/2)**2))
f = np.vectorize(lambda x: kernel[x])
return f(circle)
def gen_pattern(alpha, beta=0.01):
'''
generate pattern
:param beta: signal threashold, ratio of the peak value
'''
fuzzy_circle = draw_fuzzy_circle(samplesize, rad, thickness)
background = rand.normal(0.0, noise, samplesize)
image = alpha * fuzzy_circle + background
# signal noise ratio
thred = beta * image.max()
circle = draw_circle(samplesize, rad, 0.5*thickness)
signal = image[np.logical_and(image>thred, circle)]
snr = signal.mean() / noise
return image, snr
# }}}
# seperation between noise and circle
alpha = 1.0
# variance of gaussian noise
noise = 1.0
# ring radius
rad = 25
# ring thickness
thickness = 6
# sample size
samplesize = (128, 128)
# full image size
fullsize = (1024, 1024)
# output format
fmt = 'tif'
"""
options: [-a alpha] [-r radius] [-s fullsize] [-t fmt]
"""
optlist, args = getopt.getopt(sys.argv[1:], 'a:r:')
for o, a in optlist:
if o == '-a':
alpha = float(a)
elif o == '-r':
rad = float(a)
elif o == '-f':
fullsize = (float(a), float(a))
elif o == '-t':
fmt = a
else:
assert False, "unknown option"
image = np.zeros(fullsize)
xs, ys = samplesize
snr_ = []
for i in range(fullsize[0]/samplesize[0]):
for j in range(fullsize[1]/samplesize[1]):
sample, snr = gen_pattern(alpha)
snr_.append(snr)
print 'SNR : %.5f,' % snr,
print 'Industrial SNR: %.3f db' % snr_ind(snr)
image[i*xs:(i+1)*xs, j*xs:(j+1)*xs] = sample
snr_ = np.array(snr_)
print "Average SNR %.3f +/- %.3f (%.2f db)" % \
(snr_.mean(), snr_.std(), snr_ind(snr_.mean()))
# plt.imshow(image, cmap='gray')
if fmt == 'tif':
tif.imsave('figs/ring.tif', image.astype(np.float32))
elif fmt == 'png':
plt.savefig('figs/ring.png')
else:
print 'Output format %s not supported' % fmt
| [
"[email protected]"
] | |
8073c023a7861491a0cbcee033584d6d5578e89b | 5618d8b7d6fc2190f070e6fa5e3b423c0480a70d | /03-数组问题/0026-删除排序数组中的重复项.py | 3b2d07ba4456367779ffc8f7b64484a1b1780b7a | [] | no_license | ybm1/LeetCode-Solution-Python | 01d455e8d9305b789646b395e57e1ec50ea7b1b7 | 9b76e7d3d0cebbaa977769f5c41ac1813e3f9f4e | refs/heads/master | 2020-06-19T12:42:55.405557 | 2019-07-13T00:57:28 | 2019-07-13T00:57:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
# 只要有重复的元素覆盖就可以了
if size == 0:
return 0
# 接下来要赋值的那个元素
j = 0
for i in range(1, size):
if nums[i] != nums[j]:
j += 1
nums[j] = nums[i]
return j + 1
| [
"[email protected]"
] | |
fe4307ec8c7c404c7126633d04a8523bb4070eef | d806b109e5b196fe67b07ff88bfa61401a6a7e36 | /wdreconcile/engine.py | 34839d4c3fa19523ec143487c183514802c28cf2 | [
"MIT"
] | permissive | msaby/openrefine-wikidata | 6021ea140770b1e1f8a5ae4013ad34aeddd5bdaf | ee6fad6ebc76e34f1a1113cedcabbf145551afa7 | refs/heads/master | 2021-06-29T23:12:40.466189 | 2017-09-19T09:21:38 | 2017-09-19T09:21:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,501 | py | from config import *
import requests
import itertools
import re
import json
from collections import defaultdict
from .itemstore import ItemStore
from .typematcher import TypeMatcher
from .utils import to_q
from .language import language_fallback
from .propertypath import PropertyFactory
from .wikidatavalue import ItemValue
from .sitelink import SitelinkFetcher
class ReconcileEngine(object):
"""
Main class of the reconciliation system
"""
def __init__(self, redis_client):
self.item_store = ItemStore(redis_client)
self.type_matcher = TypeMatcher(redis_client)
self.pf = PropertyFactory(self.item_store)
self.sitelink_fetcher = self.item_store.sitelink_fetcher
self.property_weight = 0.4
self.validation_threshold_discount_per_property = 5
self.match_score_gap = 10
self.avoid_type = 'Q17442446' # Wikimedia internal stuff
self.p31_property_path = self.pf.parse('P31')
def wikidata_string_search(self, query_string, num_results, default_language):
"""
Use the Wikidata API to search for matching items
"""
r = requests.get(
'https://www.wikidata.org/w/api.php',
{'action':'query',
'format':'json',
'list':'search',
'srnamespace':0,
'srlimit':num_results,
'srsearch':query_string},
headers=headers)
resp = r.json()
search_results = [item['title'] for item in resp.get('query', {}).get('search', [])]
r = requests.get(
'https://www.wikidata.org/w/api.php',
{'action':'wbsearchentities',
'format':'json',
'language': default_language,
'limit':num_results,
'search':query_string},
headers=headers)
resp = r.json()
autocomplete_results = [item['id'] for item in resp.get('search', [])]
return search_results + autocomplete_results
def prepare_property(self, prop):
"""
Converts a property to a SPARQL path
"""
pid = prop['pid']
path = self.pf.parse(pid)
prop['path'] = path
prop['v'] = str(prop.get('v')).strip()
# This indicates whether the property is a unique
# identifier for the resolved items. If so, we can use it
# to fetch matches, without relying on string search.
prop['unique_id'] = path.is_unique_identifier()
return prop
def process_queries(self, queries, default_language='en'):
"""
This contains the backbone of the reconciliation algorithm.
- If unique identifiers are supplied for the queries,
try to use these to find matches by SPARQL
- Otherwise, do a string search for candidates,
filter them and rank them.
"""
# Prepare all properties
for query_id in queries:
queries[query_id]['properties'] = list(map(self.prepare_property,
queries[query_id].get('properties', [])))
# Find primary ids in the queries
unique_id_values = defaultdict(set)
for query in queries.values():
for prop in query['properties']:
v = prop['v']
if prop['unique_id'] and v:
unique_id_values[prop['path']].add(v)
# Find Qids and labels by primary id
unique_id_to_qid = {
path : path.fetch_qids_by_values(values, default_language)
for path, values in unique_id_values.items()
}
# Resolve all sitelinks to qids
possible_sitelinks = [query['query'] for query in queries.values()]
for query in queries.values():
possible_sitelinks += [p['v'] for p in query.get('properties', [])]
# (this is cached in redis)
sitelinks_to_qids = self.sitelink_fetcher.sitelinks_to_qids(
possible_sitelinks)
# Fetch all candidate qids for each query
qids = {}
qids_to_prefetch = set()
for query_id, query in queries.items():
# First, see if any qids can be fetched by primary id
primary_qids_and_labels = []
for prop in query['properties']:
if prop['unique_id']:
primary_qids_and_labels += unique_id_to_qid.get(
prop['path'], {}).get(
prop['v'], [])
if primary_qids_and_labels:
# for now we're throwing away the labels
# returned by the SPARQL query. Ideally we
# could keep them to avoid fetching these items.
qids[query_id] = [qid for qid, _ in primary_qids_and_labels]
qids_to_prefetch |= set(qids[query_id])
continue
# Otherwise, use the text query
if 'query' not in query:
raise ValueError('No "query" provided')
num_results = int(query.get('limit') or default_num_results)
num_results_before_filter = min([2*num_results, wd_api_max_search_results])
# If the text query is actually a QID, just return the QID itself
# (same for sitelinks, but with conversion)
query_as_qid = to_q(query['query'])
query_as_sitelink = SitelinkFetcher.normalize(query['query'])
qid_from_sitelink = None
if query_as_sitelink:
qid_from_sitelink = sitelinks_to_qids.get(query_as_sitelink)
if query_as_qid:
qids[query_id] = [query_as_qid]
elif qid_from_sitelink:
qids[query_id] = [qid_from_sitelink]
else: # otherwise just search for the string with the WD API
qids[query_id] = self.wikidata_string_search(query['query'],
num_results_before_filter, default_language)
qids_to_prefetch |= set(qids[query_id])
# Prefetch all items
self.item_store.get_items(qids_to_prefetch)
# Perform each query
result = {}
for query_id, query in queries.items():
result[query_id] = {
'result':self._rank_items(query,qids[query_id], default_language)
}
return result
def _rank_items(self, query, ids, default_language):
"""
Given a query and candidate qids returned from the search API,
return the list of fleshed-out items from these QIDs, filtered
and ranked.
"""
search_string = query['query']
properties = query.get('properties', [])
target_types = query.get('type') or []
type_strict = query.get('type_strict', 'any')
if type_strict not in ['any','all','should']:
raise ValueError('Invalid type_strict')
if type(target_types) != list:
target_types = [target_types]
discounted_validation_threshold = (validation_threshold -
self.validation_threshold_discount_per_property * len(properties))
# retrieve corresponding items
items = self.item_store.get_items(ids)
# Add the label as "yet another property"
properties_with_label = properties + [{
'pid':'all_labels',
'v':query['query'],
'path':self.pf.make_empty(),
'unique_id':False
}]
scored_items = []
no_type_items = []
types_to_prefetch = set()
for qid, item in items.items():
itemvalue = ItemValue(id=qid)
# Check the type if we have a type constraint
current_types = [val.id for val in self.p31_property_path.step(itemvalue)]
type_found = len(current_types) > 0
if target_types:
good_type = any([
any([
self.type_matcher.is_subclass(typ, target_type)
for typ in current_types
])
for target_type in target_types])
else: # Check if we should ignore this item
good_type = not all([
self.type_matcher.is_subclass(typ, self.avoid_type)
for typ in current_types
])
# If the type is invalid, skip the item.
# If there is no type, we keep the item and will
# reduce the score later on.
if type_found and not good_type:
continue
# Compute per-property score
scored = {}
unique_id_found = False
for prop in properties_with_label:
prop_id = prop['pid']
ref_val = str(prop['v'])
path = prop['path']
maxscore = 0
bestval = None
values = path.step(
ItemValue(id=qid))
for val in values:
curscore = val.match_with_str(ref_val, self.item_store)
if curscore > maxscore or bestval is None:
bestval = val
maxscore = curscore
if prop['unique_id'] and maxscore == 100:
# We found a match for a unique identifier!
unique_id_found = True
weight = (1.0 if prop_id == 'all_labels'
else self.property_weight)
scored[prop_id] = {
'score': maxscore,
'weighted': weight*maxscore,
}
# Compute overall score
sum_scores = sum([
prop['weighted'] for pid, prop in scored.items()
])
properties_non_unique_ids = len([p for p in properties if not p['unique_id']])
total_weight = self.property_weight*properties_non_unique_ids + 1.0
if unique_id_found:
avg = 100 # maximum score for matches by unique identifiers
elif sum_scores:
avg = sum_scores / total_weight
else:
avg = 0
scored['score'] = avg
scored['id'] = qid
scored['name'] = self.item_store.get_label(qid, default_language)
scored['type'] = current_types
types_to_prefetch |= set(scored['type'])
scored['match'] = False # will be changed later
if not type_found and target_types and not unique_id_found:
# Discount the score: we don't want any match
# for these items, but they might be interesting
# as potential matches for the user.
scored['score'] /= 2
no_type_items.append(scored)
else:
scored_items.append(scored)
# Prefetch the labels for the types
self.item_store.get_items(list(types_to_prefetch))
# If no item had the right type, fall back on items with no type.
# These items already have a much lower score, so there will be
# no automatic match.
if not scored_items:
scored_items = no_type_items
# Add the labels to the response
for i in range(len(scored_items)):
scored_items[i]['type'] = [
{'id':id, 'name':self.item_store.get_label(id, default_language)}
for id in scored_items[i]['type']]
ranked_items = sorted(scored_items, key=lambda i: -i.get('score', 0))
if ranked_items:
# Decide if we trust the first match
next_score = ranked_items[1]['score'] if len(scored_items) > 1 else 0
current_score = ranked_items[0]['score']
ranked_items[0]['match'] = (
current_score > discounted_validation_threshold and
current_score > next_score + self.match_score_gap)
max_results = int(query.get('limit') or default_num_results)
return ranked_items[:max_results]
def process_single_query(self, q, default_language='en'):
results = self.process_queries({'q':q}, default_language)
return results['q']
def fetch_values(self, args):
"""
Same as fetch_property_by_batch, but for a single
item (more convenient for testing).
The `flat` parameter can be used to return just
the value, without any JSON.
"""
new_args = args.copy()
qid = args.get('item', '')
new_args['ids'] = qid
results = self.fetch_property_by_batch(new_args)
values = results['values'][0]
if args.get('flat') == 'true':
if values:
return values[0]
else:
return ''
else:
return {'item':qid, 'prop':results['prop'], 'values':values}
def fetch_property_by_batch(self, args):
"""
Endpoint allowing clients to fetch the values associated
to items and a property path.
"""
lang = args.get('lang')
if not lang:
raise ValueError('No lang provided')
prop = args.get('prop')
if not prop:
raise ValueError('No property provided')
path = self.prepare_property({'pid':prop})['path']
fetch_labels = ((args.get('label') or 'true') == 'true')
items = args.get('ids','').split('|')
items = [to_q(item) for item in items]
if None in items:
raise ValueError('Invalid Qid provided')
values = [
path.evaluate(
ItemValue(id=qid),
lang=lang,
fetch_labels=fetch_labels,
) for qid in items ]
return {'prop':prop, 'values':values}
def fetch_properties_by_batch(self, args):
"""
Endpoint allowing clients to fetch multiple properties
(or property paths) on multiple items, simultaneously.
This is complies with OpenRefine's data extension protocol.
"""
lang = args.get('lang')
if not lang:
raise ValueError('No lang provided')
query = args.get('extend', {})
# Qids of the items to fetch
ids = query.get('ids', [])
ids = list(map(to_q, ids))
if None in ids:
raise ValueError('Invalid item id provided')
# Property paths to fetch
props = query.get('properties')
if not props:
raise ValueError("At least one property has to be provided")
paths = {
prop['id']: {
'path': self.prepare_property({'pid':prop['id']})['path'],
'settings': prop.get('settings', {}),
}
for prop in props
}
rows = {}
for qid in ids:
current_row = {}
for pid, prop in paths.items():
current_row[pid] = [
v.as_openrefine_cell(lang, self.item_store)
for v in prop['path'].step(
ItemValue(id=qid),
prop['settings'].get('references') or 'any',
prop['settings'].get('rank') or 'best')
]
try:
limit = int(prop['settings'].get('limit') or 0)
except ValueError:
limit = 0
if limit > 0:
current_row[pid] = current_row[pid][:limit]
if prop['settings'].get('count') == 'on':
current_row[pid] = [{'float':len(current_row[pid])}]
rows[qid] = current_row
# Prefetch property names
self.item_store.get_items(paths.keys())
meta = []
for prop in props:
pid = prop['id']
path = paths[pid]['path']
settings = paths[pid].get('settings') or {}
dct = {
'id':pid,
'name':path.readable_name(lang),
}
if settings:
dct['settings'] = settings
expected_types = path.expected_types()
if expected_types and not settings.get('count') == 'on':
qid = expected_types[0]
dct['type'] = {
'id':qid,
'name':self.item_store.get_label(qid, lang),
}
meta.append(dct)
ret = {
'rows': rows,
'meta': meta,
}
return ret
| [
"[email protected]"
] | |
69e9b2bd7bbdfc17ad9f50426744716f1198ec52 | de6e03e5385a8c502f144283d7cd1d85037ffee7 | /manage.py | 7beea34a6b0c6717b17e118914685d8c90e10485 | [] | no_license | WersonLu/OnlineCourse | 2c90b239be66643fa190294b0c0075d8e481f0c8 | 73b83b01962470db7ca029ce197f6451f51385f4 | refs/heads/master | 2020-03-07T18:21:11.104522 | 2018-04-01T14:20:36 | 2018-04-01T14:20:57 | 127,635,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "OnLineCourse.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
3ae6f30fa3fe13c41d94fe3ebd8e028f058ef13e | 0734fe314483192e630272bb212aa7817d627628 | /parsl/executors/swift_t.py | 752a5a139692958dc61318bba9ffc75fc4691beb | [
"Apache-2.0"
] | permissive | djf604/parsl | 9798f1043a2196d3b538c8683de6d34d57d8f279 | 118af3a52be1811a3355c79a7adadda5ea66afde | refs/heads/master | 2020-12-02T16:27:10.252111 | 2017-06-29T01:47:09 | 2017-06-29T01:47:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,882 | py | ''' Sample Executor for integration with SwiftT.
This follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_
to some extent.
'''
from concurrent.futures import Future
import logging
import uuid
import threading
import queue
import multiprocessing as mp
from ipyparallel.serialize import pack_apply_message, unpack_apply_message
from ipyparallel.serialize import serialize_object, deserialize_object
from parsl.executors.base import ParslExecutor
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024*1024
ITEM_THRESHOLD = 1024
def runner(incoming_q, outgoing_q):
''' This is a function that mocks the Swift-T side. It listens on the the incoming_q for tasks
and posts returns on the outgoing_q
Args:
- incoming_q (Queue object) : The queue to listen on
- outgoing_q (Queue object) : Queue to post results on
The messages posted on the incoming_q will be of the form :
{
"task_id" : <uuid.uuid4 string>,
"buffer" : serialized buffer containing the fn, args and kwargs
}
If ``None`` is received, the runner will exit.
Response messages should be of the form:
{
"task_id" : <uuid.uuid4 string>,
"result" : serialized buffer containing result
"exception" : serialized exception object
}
On exiting the runner will post ``None`` to the outgoing_q
'''
logger.debug("[RUNNER] Starting")
def execute_task(bufs):
''' Deserialize the buf, and execute the task.
Returns the serialized result/exception
'''
all_names = dir(__builtins__)
user_ns = locals()
user_ns.update( {'__builtins__' : {k : getattr(__builtins__, k) for k in all_names} } )
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
fname = getattr(f, '__name__', 'f')
prefix = "parsl_"
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
user_ns.update({ fname : f,
argname : args,
kwargname : kwargs,
resultname : resultname })
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
print("[RUNNER] Executing : {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught errors but will not handled %s", e)
raise e
else :
#print("Done : {0}".format(locals()))
print("[RUNNER] Result : {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname)
while True :
try:
# Blocking wait on the queue
msg = incoming_q.get(block=True, timeout=10)
#logger.debug("[RUNNER] Got message : %s", msg)
except queue.Empty:
# Handle case where no items were on queue
logger.debug("[RUNNER] got nothing")
except IOError as ioerror:
logger.debug("[RUNNER] broken pipe, error: %s", ioerror)
try:
# Attempt to send a stop notification to the management thread
outgoing_q.put(None)
except Exception :
pass
break
except Exception as e:
logger.debug("[RUNNER] caught unknown exception : %s", e)
else:
# Handle received message
if not msg :
# Empty message is a die request
logger.debug("[RUNNER] Received exit request")
outgoing_q.put(None)
break
else:
# Received a valid message, handle it
logger.debug("[RUNNER] Got a valid task : %s", msg["task_id"])
try:
response_obj = execute_task(msg['buffer'])
response = {"task_id" : msg["task_id"],
"result" : serialize_object(response_obj)}
logger.warning("[RUNNER] Returing result : %s",
deserialize_object(response["result"]) )
except Exception as e:
logger.debug("[RUNNER] Caught task exception")
response = {"task_id" : msg["task_id"],
"exception" : serialize_object(e)}
outgoing_q.put(response)
logger.debug("[RUNNER] Terminating")
class TurbineExecutor(ParslExecutor):
''' The Turbine executor. Bypass the Swift/T language and run on top off the Turbine engines
in an MPI environment.
Here's a simple diagram
.. code:: python
| Data | Executor | IPC | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|outgoing_q -|-> Worker_Process
| | | | | | |
Parsl<---Fut-| | | | result exception
^ | | | | | |
| | | Q_mngmnt | | V V
| | | Thread<--|incoming_q<-|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
'''
def _queue_management_worker(self):
''' The queue management worker is responsible for listening to the incoming_q
for task status messages and updating tasks with results/exceptions/updates
It expects the following messages:
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We don't support these yet, but they could be added easily as heartbeat.
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The None message is a die request.
None
'''
while True:
logger.debug("[MTHREAD] Management thread active")
try:
msg = self.incoming_q.get(block=True, timeout=1)
except queue.Empty as e:
# timed out.
pass
except IOError as e:
logger.debug("[MTHREAD] caught broken queue : %s : errno:%s", e, e.errno)
return
except Exception as e:
logger.debug("[MTHREAD] caught unknown exception : %s", e)
pass
else:
if msg is None:
logger.debug("[MTHREAD] Got None")
return
else:
logger.debug("[MTHREAD] Got message : %s", msg)
task_fut = self.tasks[msg['task_id']]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
exception, _ = deserialize_object(msg['exception'])
task_fut.set_exception(exception)
if not self.isAlive:
break
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
''' We do not use this yet
'''
q.put(None)
def _start_queue_management_thread(self):
''' Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
'''
logging.debug("In _start %s", "*"*40)
if self._queue_management_thread is None:
logging.debug("Starting management thread ")
self._queue_management_thread = threading.Thread (target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
else:
logging.debug("Management thread already exists, returning")
def shutdown(self):
''' Shutdown method, to kill the threads and workers.
'''
self.isAlive = False
logging.debug("Waking management thread")
self.incoming_q.put(None) # Wake up the thread
self._queue_management_thread.join() # Force join
logging.debug("Exiting thread")
self.worker.join()
return True
def __init__ (self, swift_attribs=None):
''' Initialize the thread pool
Trying to implement the emews model.
Kwargs:
- swift_attribs : Takes a dict of swift attribs. Fot future.
'''
logger.debug("In __init__")
self.mp_manager = mp.Manager()
self.outgoing_q = self.mp_manager.Queue()
self.incoming_q = self.mp_manager.Queue()
self.isAlive = True
self._queue_management_thread = None
self._start_queue_management_thread()
logger.debug("Created management thread : %s", self._queue_management_thread)
self.worker = mp.Process(target=runner, args = (self.outgoing_q, self.incoming_q))
self.worker.start()
logger.debug("Created worker : %s", self.worker)
self.tasks = {}
def submit (self, func, *args, **kwargs):
''' Submits work to the the outgoing_q, an external process listens on this queue for new
work. This method is simply pass through and behaves like a submit call as described
here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
'''
task_id = uuid.uuid4()
logger.debug("Before pushing to queue : func:%s func_args:%s", func, args)
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024*1024,
item_threshold=1024)
msg = {"task_id" : task_id,
"buffer" : fn_buf }
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
def scale_out (self, workers=1):
''' Scales out the number of active workers by 1
This method is notImplemented for threads and will raise the error if called.
This would be nice to have, and can be done
Raises:
NotImplemented exception
'''
raise NotImplementedError
def scale_in (self, workers=1):
''' Scale in the number of active workers by 1
This method is notImplemented for threads and will raise the error if called.
Raises:
NotImplemented exception
'''
raise NotImplementedError
if __name__ == "__main__" :
print("Start")
turb_x = TurbineExecutor()
print("Done")
| [
"[email protected]"
] | |
12361ffd64b16f79d033ac9d96479d6e32771b0f | b4da2201d2df789e28472aeded28720d5269ade5 | /Komodo-Edit-7/lib/mozilla/components/koRubyLanguage.py | 90f5bcb46614b184106069d22e6ae27ba04bb507 | [] | no_license | AeonSaber/first_app | 5ad89d4fb05d7662e2a39ce68176f43f1e618bf0 | 522fdfa6d33419fd49e431766fff85b40d21e78e | refs/heads/master | 2020-06-12T17:22:09.786142 | 2013-09-09T23:57:51 | 2013-09-09T23:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,669 | py | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2012
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from xpcom import components, nsError, ServerException
from koLintResult import *
from koLintResults import koLintResults
from eollib import eol2eolStr, scimozEOL2eol
import logging
import os, sys, re
import tempfile
import string
import process
import koprocessutils
import scimozindent
import which
# import time #for interal timing only
import sciutils
from koLanguageServiceBase import *
from koLanguageKeywordBase import KoLanguageKeywordBase
log = logging.getLogger("RubyLanguage")
#log.setLevel(logging.DEBUG)
indentlog = logging.getLogger("RubyLanguage.indent")
#indentlog.setLevel(logging.DEBUG)
sci_constants = components.interfaces.ISciMoz
ORD_SPACE = ord(' ')
ORD_TAB = ord('\t')
ORD_CR = ord('\r')
ORD_NL = ord('\n')
ORD_BS = ord('\\')
NUM_LINES_TO_ANALYZE = 100 # Make this configurable via a preference?
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
# Inherit from KoLanguageKeywordBase instead of KoLanguageBase
# so the generic KoLanguageKeywordBase class can share code written
# specifically for Ruby.
class KoRubyLanguage(KoLanguageKeywordBase):
name = "Ruby"
_reg_desc_ = "%s Language" % name
_reg_contractid_ = "@activestate.com/koLanguage?language=%s;1" \
% (name)
_reg_clsid_ = "{3CFC5F18-0288-47e9-BF2D-061329D00FB6}"
_reg_categories_ = [("komodo-language", name)]
_com_interfaces_ = [components.interfaces.koILanguage,
components.interfaces.nsIObserver]
accessKey = 'r'
primary = 1
defaultExtension = ".rb"
shebangPatterns = [ re.compile(ur'\A#!.*ruby.*$', re.IGNORECASE | re.MULTILINE),]
_lineup_chars = u"()[]"
_lineup_open_chars = "(["
_lineup_close_chars = ")]"
styleStdin = sci_constants.SCE_RB_STDIN
styleStdout = sci_constants.SCE_RB_STDOUT
styleStderr = sci_constants.SCE_RB_STDERR
def __init__(self):
KoLanguageKeywordBase.__init__(self)
self._keyword_dedenting_keywords = self._dedent_sliders
self.prefService = components.classes["@activestate.com/koPrefService;1"].\
getService(components.interfaces.koIPrefService)
self._prefs = self.prefService.prefs
self._indent_style = self._prefs.getStringPref("editAutoIndentStyle")
self._handle_keypress = self._indent_style == 'smart'
log.debug("Ruby indent style: %s", self._indent_style)
try:
self._prefs.prefObserverService.addObserver(self, "editAutoIndentStyle", 0)
except Exception, e:
print e
self._keyword_letters = string.ascii_letters
self._style_info.update(
_indent_styles = [sci_constants.SCE_RB_OPERATOR],
_variable_styles = [sci_constants.SCE_RB_IDENTIFIER,
sci_constants.SCE_RB_GLOBAL,
sci_constants.SCE_RB_INSTANCE_VAR,
sci_constants.SCE_RB_CLASS_VAR],
_lineup_close_styles = [sci_constants.SCE_RB_OPERATOR,
sci_constants.SCE_RB_REGEX],
_lineup_styles = [sci_constants.SCE_RB_OPERATOR,
sci_constants.SCE_RB_REGEX],
_multiline_styles = [sci_constants.SCE_RB_STRING,
sci_constants.SCE_RB_CHARACTER,
sci_constants.SCE_RB_REGEX,
sci_constants.SCE_RB_BACKTICKS,
sci_constants.SCE_RB_STRING_Q,
sci_constants.SCE_RB_STRING_QQ,
sci_constants.SCE_RB_STRING_QX,
sci_constants.SCE_RB_STRING_QR,
sci_constants.SCE_RB_STRING_QW],
_keyword_styles = [sci_constants.SCE_RB_WORD],
# These handle things like <<break if test>>
_modified_keyword_styles = [sci_constants.SCE_RB_WORD_DEMOTED],
_default_styles = [sci_constants.SCE_RB_DEFAULT],
_ignorable_styles = [sci_constants.SCE_RB_ERROR,
sci_constants.SCE_RB_COMMENTLINE,
sci_constants.SCE_RB_POD,
sci_constants.SCE_RB_DATASECTION,
sci_constants.SCE_RB_HERE_DELIM,
sci_constants.SCE_RB_HERE_Q,
sci_constants.SCE_RB_HERE_QQ,
sci_constants.SCE_RB_HERE_QX],
_regex_styles = [sci_constants.SCE_RB_DATASECTION,
sci_constants.SCE_RB_REGEX,
sci_constants.SCE_RB_STRING_QR],
# Set this to [] in UDL
_datasection_styles = [sci_constants.SCE_RB_DATASECTION]
)
self.matchingSoftChars["`"] = ("`", self.softchar_accept_matching_backquote)
def observe(self, subject, topic, data):
if topic == "editAutoIndentStyle":
log.debug("**************** observer: subject %s, topic %s, data %s", subject, topic, data)
self._indent_style = self._prefs.getStringPref("editAutoIndentStyle")
self._handle_keypress = self._indent_style == 'smart'
else:
KoLanguageKeywordBase.observe(self, subject, topic, data)
def getVariableStyles(self):
return self._style_info._variable_styles
def get_lexer(self):
if self._lexer is None:
self._lexer = KoLexerLanguageService()
self._lexer.setLexer(sci_constants.SCLEX_RUBY)
self._lexer.setKeywords(0, self.keywords)
self._lexer.supportsFolding = 1
return self._lexer
commentDelimiterInfo = {
"line": [ "#" ],
}
variableIndicators = '$@' # Won't find @@ class variables
namedBlockRE = "^[ \t]*((?:def|module|class)\s+\w+)"
namedBlockDescription = 'Ruby classes and methods'
leading_ws_re = re.compile(r'^(\s*)')
only_optws_word = re.compile(r'(\s*)(\w+)$') # for match, so no ^
optws_word = re.compile(r'\s*(\w+|[<%=/>])') # for match, so no ^
match_blank_or_comment_line = re.compile(r'(\s*)(#.*)?[\r\n]*$') # no match needed
styleBits = 6 # Override KoLanguageBase.styleBits setting of 5
indicatorBits = 2 # Currently (2004-05-25) Same as base class
_sliders = ['else', 'elsif', 'ensure', 'rescue', 'when']
_dedenters = ['break', 'redo', 'next', 'raise', 'retry', 'return']
_limited_openers = ['begin', 'class', 'def', 'for', 'module', 'case',
'if', 'unless', 'until', 'while']
_ending_openers = ['do']
_openers = _limited_openers + _ending_openers
_enders = ['end']
_loop_kwds = ('for', 'while', 'until')
_dedent_sliders = _sliders + _enders
stylingBitsMask = 0
for bit in range(styleBits):
stylingBitsMask <<= 1
stylingBitsMask |= 1
supportsSmartIndent = "keyword" # should be overridden otherwise
_max_lines_in_calc = 100
sample = r"""# Fruit salad recipe
# Let's build a fruit-salad maker
# This snippet just shows various parts of Ruby speech.
module Salad
class Fruit
@@class_var = 0
attr :symbol
def initialize(fruit_list, counts)
@instance_var = {}
fruit_list.zip(counts).each { |a, b| @instance_var[a] = b } # block code
end
def mix()
print "double-quoted string"
print 'single-quoted string'
print `command`
print %Q(Q string)
print %q(q string)
print %r(reg ex)
print %x(backquoted string)
regex = /pattern/isx
@@class_var += 1
end
end
end
fruit_list = %w(guava rambutan longan mangosteen)
counts = [3, 4, 1, 5]
fruits = Salad::Fruit.new(fruit_list, counts)
print <<abc
here document
abc
=begin
rubydoc
block comment
=end
__END__
end section
"""
# Pythonism
# _dedenting_statements = [u'yield', u'raise', u'return', u'break', u'next']
keywords = ["__FILE__", "__LINE__",
"BEGIN", "class", "ensure", "nil", "self", "when",
"END", "def", "false", "not", "super", "while",
"alias", "defined", "for", "or", "then", "yield",
"and", "do", "if", "redo", "true",
"begin", "else", "in", "rescue", "undef",
"break", "elsif", "module", "retry", "unless",
"case", "end", "next", "return", "until"]
def get_interpreter(self):
if self._interpreter is None:
self._interpreter = components.classes["@activestate.com/koAppInfoEx?app=Ruby;1"].getService()
return self._interpreter
def _do_preceded_by_looper(self, tokens, do_style, style_info):
# Don't process 'noise' do words -- if this 'do' is preceded
# by one of 'for' 'while' 'until' do nothing
# tokens ends with the token before the 'do' that triggered this
check_for_rhtml = do_style == sci_constants.SCE_UDL_SSL_WORD
for tok in reversed(tokens):
(style, text, tok_pos) = tok.explode()
if style in style_info._keyword_styles and text in self._loop_kwds:
return True
elif style in style_info._lineup_styles and text == ";":
# Hit the end of a previous statement on one line
return False
elif (check_for_rhtml
and (style < sci_constants.SCE_UDL_SSL_DEFAULT
or style > sci_constants.SCE_UDL_SSL_VARIABLE)):
return False
return False
def _have_significant_ending_do(self, scimoz, style_info, lineStartPos, initialPos):
tokens = self._get_line_tokens(scimoz, lineStartPos, initialPos, style_info, additional_ignorable_styles=style_info._default_styles)
try:
(style, text, tok_pos) = tokens[-1].explode()
except IndexError:
return False
if not (style in style_info._keyword_styles and text == 'do'):
return False
return not self._do_preceded_by_looper(tokens[:-1], style, style_info)
def _check_for_do(self, ch, scimoz, style_info, initialPos):
initialLine = scimoz.lineFromPosition(initialPos)
lineStartPos = scimoz.positionFromLine(initialLine)
if self._have_significant_ending_do(scimoz, style_info, lineStartPos, initialPos):
self._insert_end_keyword(scimoz, ch, initialLine, lineStartPos)
def _insert_end_keyword(self, scimoz, ch, initialLine, lineStartPos):
# Look to see if we need to follow it with an 'end'
curr_indent = self._getActualIndent(scimoz, initialLine, lineStartPos)
if self._startsNewBlock(scimoz, initialLine, curr_indent):
if ch == "\n" or ch == "\r":
# The character caused a newline to be inserted,
# so we need to insert the 'end' line below the
# newly inserted line.
if scimoz.lineFromPosition(scimoz.length) == initialLine + 1:
new_pos = scimoz.length
curr_indent = eol2eolStr[scimozEOL2eol[scimoz.eOLMode]] + curr_indent
else:
new_pos = scimoz.positionFromLine(initialLine + 2)
elif scimoz.lineFromPosition(scimoz.length) == initialLine:
# Bug 41788:
# If the end of the buffer is on the current line
# prepend another newline sequence to the generated
# text.
new_pos = scimoz.length
curr_indent = eol2eolStr[scimozEOL2eol[scimoz.eOLMode]] + curr_indent
else:
new_pos = scimoz.positionFromLine(initialLine + 1)
generated_line = curr_indent + "end" + eol2eolStr[scimozEOL2eol[scimoz.eOLMode]]
# log.debug("Inserting [%s] at posn %d (line %d)", generated_line, new_pos, scimoz.lineFromPosition(new_pos))
scimoz.insertText(new_pos, generated_line)
def _lineStartIsDefault(self, scimoz, style_info, lineStartPos, currentPos):
style1 = getActualStyle(scimoz, lineStartPos)
if style1 in (list(style_info._default_styles)
+ list(style_info._keyword_styles)):
return True
# Are we in RHTML, and the line-start is TPL or HTML?
style2 = getActualStyle(scimoz, currentPos)
if ((style1 < sci_constants.SCE_UDL_SSL_DEFAULT
or style1 > sci_constants.SCE_UDL_SSL_VARIABLE)
and (sci_constants.SCE_UDL_SSL_DEFAULT <= style2 <= sci_constants.SCE_UDL_SSL_VARIABLE)):
# This breaks if we have [Ruby]%> <%= <kwd>] - tough
return True
return False
def _checkForSlider(self, ch, scimoz, style_info):
# Looking for a reason to adjust indentation on a slider or ender
# Successful shifting requires 14 scimoz calls
# Try to minimize the number of scimoz calls when shifting
# isn't needed
#
# Also, if the triggering character is a newline, and
# we need to change the indentation on the line the newline ends,
# we'll need to adjust the indentation on the following line as well.
initialPos = currentPos = scimoz.currentPos - 1
new_line_needs_adjusting = False
if ch == "\n" or ch == "\r":
at_EOL = True
# Do we need to deal with some added indentation?
currChar = scimoz.getCharAt(currentPos)
new_line_needs_adjusting = (currChar == ORD_SPACE or currChar == ORD_TAB)
while currChar == ORD_SPACE or currChar == ORD_TAB:
currentPos -= 1
currChar = scimoz.getCharAt(currentPos)
if ch == "\n" and scimoz.getCharAt(currentPos - 1) == ORD_CR:
# Allow for 2-character newline sequences
currentPos -= 1
initialPos = currentPos
else:
at_EOL = False
if (currentPos <= 0 or
getActualStyle(scimoz, currentPos) in style_info._keyword_styles or
getActualStyle(scimoz, currentPos - 1) not in style_info._keyword_styles):
return
initialLine = scimoz.lineFromPosition(currentPos)
lineStartPos = scimoz.positionFromLine(initialLine)
if currentPos == lineStartPos:
# At start of line, no reason to indent
return
lineEndPos = scimoz.getLineEndPosition(initialLine)
log.debug("_checkForSlider, line-start(%d), curr-pos(%d), line-end(%d)" % (lineStartPos, currentPos, lineEndPos))
# 3 calls to verify we have ^[def]...[kwd]<|>[non-kwd]
if (currentPos == lineStartPos or
(not at_EOL and lineEndPos > currentPos + 1)):
return
if not self._lineStartIsDefault(scimoz, style_info, lineStartPos, currentPos):
return self._check_for_do(ch, scimoz, style_info, initialPos)
# Verify we match ^\s*(\w+)$
# Last call to scimoz
line = scimoz.getTextRange(lineStartPos, currentPos)
wmatch = self.only_optws_word.match(line)
if wmatch is None:
return self._check_for_do(ch, scimoz, style_info, initialPos)
leading_ws, leading_kwd = wmatch.group(1, 2)
do_smart_indenting = True
try:
res, res_buf = scimoz.getProperty('smartCloseTags')
if res_buf == "0":
do_smart_indenting = False
except Exception, ex:
log.debug("smartCloseTags: %s", ex)
pass
if leading_kwd not in self._dedent_sliders and do_smart_indenting:
if leading_kwd in self._limited_openers:
self._insert_end_keyword(scimoz, ch, initialLine, lineStartPos)
return
parentLine = self._getParentLine(scimoz, initialLine)
# Replace the indentation on the current line with
# the indentation on the parent.
if parentLine >= initialLine or parentLine < 0:
# Unexpected -- bail out
return
parentActualWS = self._getActualIndent(scimoz, parentLine)
if leading_ws == parentActualWS:
# We aren't changing anything on the current line,
# so we won't have to propagate any change to the next
# line either.
return
scimoz.targetStart = lineStartPos
scimoz.targetEnd = lineStartPos + len(leading_ws)
log.debug("Replacing chars %d:%d with %d chars(%s)" % (lineStartPos, lineStartPos + len(leading_ws), len(parentActualWS), parentActualWS))
scimoz.beginUndoAction()
try:
scimoz.replaceTarget(len(parentActualWS), parentActualWS)
finally:
scimoz.endUndoAction()
# We have to recalc indentation of new line as well,
# as its previous line's indentation changed.
# Refer to line i = line ended with the newline,
# line j = i + 1, the newline
# When we fix it, we need to base the fix on the final indent
# level specified by the line that the newline ended.
# Fortunately, we can use the property that the line consists of
# ^\s*<keyword>$
# and we can therefore characterize exactly what kind of adjustment
# is needed:
#
# _enders: indent(j) = new indentation(i)
# _sliders: indent(j) = new indentation(i) + 1 delta
if new_line_needs_adjusting:
new_line = initialLine + 1
new_line_ActualWS = self._getActualIndent(scimoz, new_line)
if leading_kwd in self._enders:
new_indent_string = parentActualWS
else:
# Start with parentActualWS, and add one delta
indent_amount = scimoz.indent
if not indent_amount:
indent_amount = 8
new_indent_width = indent_amount + \
len(parentActualWS.expandtabs(scimoz.tabWidth))
new_indent_string = scimozindent.makeIndentFromWidth(scimoz, new_indent_width)
if new_indent_string != new_line_ActualWS:
newLineStartPos = scimoz.positionFromLine(new_line)
scimoz.targetStart = newLineStartPos
scimoz.targetEnd = newLineStartPos + len(new_line_ActualWS)
log.debug("Replacing chars %d:%d with %d chars(%s)" % (newLineStartPos, newLineStartPos + len(new_line_ActualWS), len(new_indent_string), new_indent_string))
scimoz.beginUndoAction()
try:
scimoz.replaceTarget(len(new_indent_string), new_indent_string)
newPos = scimoz.getLineEndPosition(new_line)
scimoz.currentPos = newPos
scimoz.selectionStart = newPos
finally:
scimoz.endUndoAction()
# end if we started with a newline
return parentActualWS
# end _checkForSlider
def guessIndentation(self, scimoz, tabWidth, defaultUsesTabs):
return self.guessIndentationByFoldLevels(scimoz, tabWidth, defaultUsesTabs, minIndentLevel=2)
# Hook this handler and then dispatch to baseclass
# Don't process characters unless
# self._prefs.getStringPref('editAutoIndentStyle') is 'smart'
#
def keyPressed(self, ch, scimoz):
return self._keyPressed(ch, scimoz, self._style_info)
def _keyPressed(self, ch, scimoz, style_info):
if self._handle_keypress and ch not in self._keyword_letters:
#before_time = time.clock()
made_change = self._checkForSlider(ch, scimoz, style_info)
#after_time = time.clock()
#log.debug("self._checkForSlider needed %r msecs", 1000 * (after_time - before_time))
# Have the base class do its stuff
KoLanguageKeywordBase._keyPressed(self, ch, scimoz, style_info)
def _is_special_variable(self, scimoz, pos, opStyle):
if pos == 0:
return False;
prevPos = scimoz.positionBefore(pos)
if scimoz.getStyleAt(prevPos) == opStyle and chr(scimoz.getCharAt(prevPos)) == '$':
# In Ruby $" and $` are special.
return True
return False
def softchar_accept_matching_backquote(self, scimoz, pos, style_info, candidate):
if self._is_special_variable(scimoz, pos,
self.isUDL() and scimoz.SCE_UDL_SSL_VARIABLE or scimoz.SCE_RB_GLOBAL):
return None
return KoLanguageKeywordBase.softchar_accept_matching_backquote(self, scimoz, pos, style_info, candidate);
def softchar_accept_matching_double_quote(self, scimoz, pos, style_info, candidate):
"""First verify that we aren't seeing $"
"""
if self._is_special_variable(scimoz, pos,
self.isUDL() and scimoz.SCE_UDL_SSL_VARIABLE or scimoz.SCE_RB_GLOBAL):
return None
return KoLanguageKeywordBase.softchar_accept_matching_double_quote(self, scimoz, pos, style_info, candidate);
def _softchar_accept_match_outside_strings(self, scimoz, pos, style_info, candidate):
"""
See KoLanguageKeywordBase._softchar_accept_match_outside_strings for docs
"""
if pos == 0:
return candidate
prevPos = scimoz.positionBefore(pos)
prevStyle = scimoz.getStyleAt(prevPos)
if (prevStyle == scimoz.getStyleAt(pos) # we're in a string
# Can't have a string immediately following a variable name without
# an operator.
or prevStyle in self.getVariableStyles()
# Needed for embedded expressions, redundant for other languages.
# Bug 79388: puts "... {2 + 2}<|>
# Typing a single-quote at the cursor shouldn't generate a soft char.
or prevStyle in style_info._indent_styles and scimoz.getWCharAt(prevPos) == "}"):
return None
return candidate
def _startsNewBlock(self, scimoz, initialLine, curr_indent):
# Look to see if the next non-code line indents at or
# before the current line
#
# Return boolean: yes: this line does start a new block; no: it doesn't
finalLine = scimoz.lineFromPosition(scimoz.length)
normalized_indent_len = self._getNormalizedIndentLen(scimoz, curr_indent)
for i in xrange(initialLine + 1, finalLine + 1):
nchars_i, line_i = scimoz.getLine(i)
if nchars_i == 0:
continue
wmatch = self.match_blank_or_comment_line.match(line_i)
if wmatch:
# Don't bother checking indentation of blank and comment lines
continue
norm_len_i = self._getNormalizedIndentLen(scimoz, self._getActualIndent(scimoz, i))
if norm_len_i != normalized_indent_len:
# heuristic using indentation
# on a false positive we end up not an 'end
# on a false negative we'll insert an end, and
# they should fix the indentation
return norm_len_i <= normalized_indent_len
wmatch = self.optws_word.match(line_i)
return wmatch and wmatch.group(1) != "end"
# If we're here, it means the current line is the last
# non-comment line in the buffer, and we can add something.
return True
def _getActualIndent(self, scimoz, lineNo, lineStart=None, currentPos=None):
if lineStart is None:
lineStart = scimoz.positionFromLine(lineNo)
lineEnd = scimoz.getLineEndPosition(lineNo)
line = scimoz.getTextRange(lineStart, lineEnd)
ws = self.leading_ws_re.match(line).group(0)
if currentPos and (currentPos - lineStart) < len(ws):
ws2 = ws[0:(currentPos - lineStart)]
else:
ws2 = ws
return ws2
def _getNormalizedIndentLen(self, scimoz, indentStr):
space_str = indentStr.expandtabs(scimoz.tabWidth)
return len(space_str)
# This not used yet, but hang on...
def _getNormalizedIndentStr(self, scimoz, len):
if len <= 0:
return ""
str1 = " " * len
if not scimoz.getUseTabs(): return str1
tabLen = scimoz.GetTabWidth()
if tabLen <= 0: return str1
return str1.replace(' ' * tabLen, "\t")
def _getParentLine(self, scimoz, currentLine):
""" Here are the cases we need to consider for closing an end block:
1. previous line is a header
- the previous line is the current line's header
2. the previous line has a higher folding count than the current line
- this means that the previous line sits at the end of a block.
Return the current line's own header
3. the previous line has the same folding count as the current line,
so they both have the same current line.
The nice part:
Because the lexer does most of the work calculating parents
based on opening and closing keywords, and braces, we can just
use the Scintilla API to return the parent.
We just do a sanity check to make sure the result is valid.
"""
parentLine = scimoz.getFoldParent(currentLine)
if parentLine > currentLine:
return 0
return parentLine
def _getFoldLevel(self, scimoz, line):
# Adapted from koLanguageCommandHandler.py
return scimoz.getFoldLevel(line) & scimoz.SC_FOLDLEVELNUMBERMASK
def _getFoldLevelsFromPos(self, scimoz, currentPos, curr_line=None):
try:
if curr_line is None: curr_line = scimoz.lineFromPosition(currentPos)
curr_fold_level = self._getFoldLevel(scimoz, curr_line)
if curr_line == 0: return (curr_fold_level, curr_fold_level)
prev_line = curr_line - 1
if prev_line == 0:
# Can't go any higher
parent_fold_level = self._getFoldLevel(scimoz, prev_line)
else:
parentLine = scimoz.getFoldParent(prev_line)
parent_fold_level = self._getFoldLevel(scimoz, parentLine)
except:
pass
# Override computeIndent
def computeIndent(self, scimoz, indentStyle, continueComments):
if continueComments:
return KoLanguageKeywordBase.computeIndent(self, scimoz, indentStyle, continueComments)
return self._computeIndent(scimoz, indentStyle, continueComments, self._style_info)
def _computeIndent(self, scimoz, indentStyle, continueComments, style_info):
# Don't rely on KoLanguageKeywordBase here.
#super_indent = KoLanguageKeywordBase._computeIndent(self, scimoz, indentStyle, continueComments, style_info)
#qlog.debug("super_indent: %r", super_indent)
#if super_indent is not None:
# return super_indent
if continueComments:
#qlog.debug("continueComments: %r", continueComments)
inBlockCommentIndent, inLineCommentIndent = self._inCommentIndent(scimoz, scimoz.currentPos, continueComments, style_info)
if inLineCommentIndent is not None:
return inLineCommentIndent
elif inBlockCommentIndent is not None:
return inBlockCommentIndent
try:
new_indent_string = self._calcIndentLevel(scimoz, scimoz.currentPos, style_info)
#qlog.debug("new_indent_string: %r", new_indent_string)
return new_indent_string
except:
log.warn("Got exception computing _calcIndentLevel", exc_info=1)
return ''
# This function works its way back through the buffer, to a max of
# self._max_lines_in_calc(100) lines
def _calcIndentLevel(self, scimoz, currentPos, style_info):
# Check to see if we're on a continuation first
# Use the language service base
continuationLineIndent = self._continuationLineIndent(scimoz, currentPos)
initialLine = curr_line = scimoz.lineFromPosition(currentPos)
if continuationLineIndent is not None:
lineEndPos = scimoz.getLineEndPosition(curr_line)
log.debug("Ruby _calcIndentLevel detected continuation line[%d:%d->%d] indent of [%s](%d chars)" % (initialLine, currentPos, lineEndPos, continuationLineIndent, len(continuationLineIndent)))
return continuationLineIndent
minimumLine = max(initialLine - NUM_LINES_TO_ANALYZE, 0)
lineStartPos = scimoz.positionFromLine(curr_line)
tokens = self._get_line_tokens(scimoz, lineStartPos, currentPos, style_info, additional_ignorable_styles=style_info._default_styles)
indent_delta = 0
indent_amount = scimoz.indent
if not indent_amount:
indent_amount = 8
keep_walking_back = True
# Allows for moving up through continuation lines
while keep_walking_back and curr_line >= 0:
# Walk the tokens backwards
idx = len(tokens) - 1
# self._dump_tokens(tokens, indentlog)
while idx >= 0:
tok = tokens[idx]
(style, text, tok_pos) = tok.explode()
#indentlog.debug("tok %d: (pos %d, style %d, text [%s])", idx, tok_pos, style, text)
# lineup-open is more important than others
if style in style_info._lineup_styles:
if text in self._lineup_open_chars:
# Allow for keywords to the right of this spo, but an
# excess of 'end's doesn't make sense if we're lining up
# Add one to line up one to the right of the char.
if indent_delta < 0: indent_delta = 0
colPos = scimoz.getColumn(tok_pos) + 1 + indent_delta * indent_amount
# Check for end-of-line
if idx == len(tokens) - 1:
#indentlog.debug("this is the last token(%d)", idx)
lineEndPos = scimoz.getLineEndPosition(scimoz.lineFromPosition(tok_pos))
eolLen = len(eol2eolStr[scimozEOL2eol[scimoz.eOLMode]])
if tok_pos + len(text) == lineEndPos:
# Do a bracketing instead
bracketPos = self._getIndentWidthForLine(scimoz, curr_line) + indent_amount
if colPos > bracketPos:
colPos = bracketPos
return scimozindent.makeIndentFromWidth(scimoz, colPos)
elif text in self._indent_open_chars:
indent_delta += 1
elif text in self._lineup_close_chars + self._indent_close_chars:
# Find the matched part and move back
(new_line, new_pos) = self._calcMatcher(scimoz, tok_pos)
if new_line is None:
# Base it on the indents on the current line
# We have an extra closer, so ignore it and keep going
log.debug("_calcIndentLevel: ignoring closer at (%d:%d) returned None" % (curr_line, tok_pos))
elif new_line > curr_line:
log.debug("_calcIndentLevel: **** match(%d:%d) => greater line of (%d,%d)" % (curr_line, tok_pos, new_line, new_pos))
return self._getActualIndent(scimoz, curr_line, lineStartPos)
elif new_line < minimumLine:
# Can't look any further back, use the indent at this line.
#XXX Include delta's
return self._getActualIndent(scimoz, curr_line, lineStartPos)
# For the next two blocks,
# if the new position is at the start of the line,
# we're done
elif new_line == curr_line:
# Sanity check
if new_pos > tok_pos:
log.debug("_calcIndentLevel: match(%d:%d) => same line, greater pos of (%d)" % (curr_line, tok_pos, new_pos))
return self._getActualIndent(scimoz, curr_line, lineStartPos)
# Move back until idx is < the matched char
new_idx = self._find_token(tokens, idx - 1, new_pos)
idx = new_idx
#log.debug("_calcIndentLevel: closer moved to idx %d, pos %d, line %d" % (idx, new_pos, curr_line))
else:
#log.debug("_calcIndentLevel: moved from (%d:%d) to (%d:%d) with %d deltas" % (curr_line, tok_pos, new_line, new_pos, indent_delta))
lineStartPos = scimoz.positionFromLine(new_line)
curr_line = new_line
if new_pos <= lineStartPos:
# We moved to the start of the line, so we're done
break
tokens = self._get_line_tokens(scimoz, lineStartPos, new_pos, style_info, additional_ignorable_styles=style_info._default_styles)
# self._dump_tokens(tokens)
idx = len(tokens) - 1
new_idx = self._find_token(tokens, idx, new_pos)
idx = new_idx
#log.debug("_calcIndentLevel: diff-line closer moved to idx %d, pos %d, line %d" % (idx, new_pos, curr_line))
# Get the tokens for a new line
# Leave indent_delta alone -- it's still in effect
else:
# It's another line-up, like a reg-ex thing
#XXX do something smarter later
pass
elif style in style_info._keyword_styles:
this_delta = self._calc_keywords_change(tokens, idx, text, style_info)
if this_delta != 0:
log.debug("_calcIndentLevel: line %d: moving from del %d => %d on %s" % (curr_line, indent_delta, indent_delta + this_delta, text))
indent_delta += this_delta
elif (style in style_info._multiline_styles
and idx == 0
and curr_line > 0
and (idx > 1 or not self._lineEndsWithStyle(scimoz, style, curr_line))
and self._lineEndsWithStyle(scimoz, style, curr_line - 1)):
# So we know we're in a string, or at its end, and the string doesn't
# bleed to the next line, and it did bleed from the previous.
new_line, new_pos = self._findStartOfToken(scimoz, style, tok_pos)
# Check to see if the line ends with this style,
# and if it does use the indentation based at that line
if new_line is None or new_line >= curr_line:
log.debug("_calcIndentLevel: can't find start of token while at (%d:%d) => same line, greater pos of (%d)" % (curr_line, tok_pos))
return self._getActualIndent(scimoz, curr_line, lineStartPos)
#log.debug("_calcIndentLevel: string-moved from (%d:%d) to (%d:%d)" % (curr_line, tok_pos, new_line, new_pos))
lineStartPos = scimoz.positionFromLine(new_line)
curr_line = new_line
if new_pos <= lineStartPos:
# We moved to the start of the line, so we're done
#log.debug("_calcIndentLevel: string-moved to start of line")
break
tokens = self._get_line_tokens(scimoz, lineStartPos, new_pos, style_info, additional_ignorable_styles=style_info._default_styles)
# self._dump_tokens(tokens)
idx = len(tokens) - 1
new_idx = self._find_token(tokens, idx, new_pos)
idx = new_idx
#log.debug("_calcIndentLevel: string-moved to idx %d, pos %d, line %d" % (idx, new_pos, curr_line))
else:
# Nothing interesting here
pass
# end switching on the token's style
idx -= 1
# end inner while idx >= 0
if (keep_walking_back and curr_line > 0 and
self._is_continuation_line(scimoz, curr_line - 1, style_info)):
curr_line -= 1
lineStartPos = scimoz.positionFromLine(curr_line)
currentPos = scimoz.getLineEndPosition(curr_line)
tokens = self._get_line_tokens(scimoz, lineStartPos, currentPos, style_info, additional_ignorable_styles=style_info._default_styles)
else:
break
# end outer while
# Now calc something based on indent_delta and the
# current indentation level in effect at whatever line
# we ended up at.
parent_line = None
if indent_delta == 0:
return self._getActualIndent(scimoz, curr_line, lineStartPos, currentPos)
elif indent_delta > 0:
new_indent_amount = self._getIndentWidthForLine(scimoz, curr_line) \
+ indent_delta * indent_amount
#log.debug("_calcIndentLevel: found %d deltas, setting indent to %d" % (indent_delta, new_indent_amount))
return scimozindent.makeIndentFromWidth(scimoz, new_indent_amount)
else:
# This is the interesting one -- we have a bunch of 'end' keywords,
# and we need to move up by fold levels to find the parent that matches
#log.debug("_calcIndentLevel: found %d deltas, checking parents" % (indent_delta,))
while indent_delta < 0:
# parentLine = self._safeGetFoldParent(scimoz, curr_line)
parentLine = scimoz.getFoldParent(curr_line)
if parentLine is None or \
parentLine < minimumLine or \
parentLine >= curr_line:
#log.debug("_calcIndentLevel: parent(%d) => %s" % (curr_line, (parentLine and str(parentLine) or "<None>")))
break
#log.debug("_calcIndentLevel: parent(%d) => %d" % (curr_line, parentLine))
curr_line = parentLine
indent_delta += 1
istr = self._getActualIndent(scimoz, curr_line)
#log.debug("_calcIndentLevel: indent(%d) => [%s](%d)" % (curr_line, istr, len(istr)))
return istr
# end if
def _dump_tokens(self, tokens, log=log):
str2 = "tokens: \n"
for tok in tokens:
str2 += "[%d %s %d]" % (tok.explode())
log.debug(str2)
def _lineEndsWithStyle(self, scimoz, style, curr_line):
# This routine has a flaw:
# If the string ends at the very end of the buffer, and we
# press return, the EOL hasn't been styled yet,
endPos = scimoz.getLineEndPosition(curr_line)
bufferSize = scimoz.length;
if endPos >= bufferSize:
log.debug("_lineEndsWithStyle: endPos = %d, bufferSize = %d, end-style=%d, charNum=%d, setting..." % (endPos, bufferSize, style, scimoz.getCharAt(endPos)))
endPos = bufferSize - 1
endStyle = getActualStyle(scimoz, endPos)
endCharNum = scimoz.getCharAt(endPos)
log.debug("_lineEndsWithStyle: line %d, pos %d, charNum %d, style %d" % (curr_line, endPos, endCharNum, endStyle))
return endStyle == style
def _findStartOfToken(self, scimoz, style, pos):
while pos > 0 and getActualStyle(scimoz, pos) == style:
pos -= 1
return scimoz.lineFromPosition(pos), pos
def _is_continuation_line(self, scimoz, line_no, style_info):
eol_pos = scimoz.getLineEndPosition(line_no) - 1
sol_pos = scimoz.positionFromLine(line_no)
log.debug("_is_continuation_line(%d) over [%d:%d]" % (line_no, sol_pos, eol_pos))
if eol_pos < sol_pos: # A true empty line?
return False;
style = getActualStyle(scimoz, eol_pos)
if style not in style_info._default_styles:
return
# What if it's a newline?
chNum = scimoz.getCharAt(eol_pos)
if chNum in [ORD_CR, ORD_NL]:
log.debug("__is_continuation_line: have a newline")
eol_pos -= 1
if eol_pos < sol_pos:
return False
style = getActualStyle(scimoz, eol_pos)
chNum = scimoz.getCharAt(eol_pos)
log.debug("__is_continuation_line(%d) => ch(%d), style(%d)" % (line_no, chNum, style))
if style in style_info._default_styles and chNum == ORD_BS:
return True
return False
def _calcMatcher(self, scimoz, tok_pos):
new_pos = scimoz.braceMatch(tok_pos)
if new_pos is None or new_pos < 0 or new_pos >= tok_pos:
# Contractual agreement (where's the contract?)
return (None, None)
new_line = scimoz.lineFromPosition(new_pos)
return (new_line, new_pos)
def _calc_keywords_change(self, tokens, idx, curr_text, style_info):
if curr_text in self._enders:
return -1
elif curr_text in self._sliders:
# Assume these got pushed back, so we need to re-indent
if idx == 0:
return +1
else:
return 0
elif curr_text in self._dedenters:
#XXX Can we use fold info here? Probably not.
for tok in tokens[idx+1:]:
if tok.style in style_info._modified_keyword_styles:
# Don't dedent if we *might* be modifying the line
return 0
return -1
elif curr_text in self._limited_openers:
return +1
elif curr_text in self._ending_openers:
# Don't add an indent if this is a "noise" keyword
if self._do_preceded_by_looper(tokens[:idx],
tokens[idx].style, style_info):
# We'll count the indent when we reach the 'looper'
return 0
return +1
return 0
# Find the opener that matches the closer, and set the token
# pointer to point to it.
# If we can't find it, point to the token with the smallest
# position, or 0.
# This routine knows that the returned index will be decremented
# immediately.
def _find_token(self, tokens, idx, target_pos):
start_idx = idx
while idx >= 0:
this_pos = tokens[idx].start_pos
if this_pos == target_pos:
return idx
elif this_pos < target_pos:
break
idx -= 1
if idx == start_idx:
return start_idx
else:
# Let the caller try the token we ended up on next iter
return start_idx + 1
class KoRubyCompileLinter:
_com_interfaces_ = [components.interfaces.koILinter]
_reg_desc_ = "Komodo Ruby Linter"
_reg_clsid_ = "{9efff282-9919-4575-8c89-4a1e57512c97}"
_reg_contractid_ = "@activestate.com/koLinter?language=Ruby;1"
_reg_categories_ = [
("category-komodo-linter", 'Ruby'),
]
def __init__(self):
# From the Perl Linter
self.sysUtils = components.classes["@activestate.com/koSysUtils;1"].\
getService(components.interfaces.koISysUtils)
self.infoSvc = components.classes["@activestate.com/koInfoService;1"].\
getService()
self.rubyInfoEx = components.classes["@activestate.com/koAppInfoEx?app=Ruby;1"].\
getService(components.interfaces.koIAppInfoEx)
self._lastErrorSvc = components.classes["@activestate.com/koLastErrorService;1"].\
getService(components.interfaces.koILastErrorService)
self._koVer = self.infoSvc.version
self.warning_re = None
# Delay compiling the Ruby RE's until we need them.
self._word_letters = string.ascii_letters + string.digits + "_"
def _define_res(self):
self.warning_re = re.compile(r'^(?P<file>.*):(?P<line>\d+):\s*warning: (?P<message>.*)')
self.error_re = re.compile(r'^(?P<file>.*):(?P<line>\d+):\s*(?P<message>.*)')
self.caret_re = re.compile(r'^(\s*)\^')
self.leading_res = {}
self.trailing_res = {}
for setup in [['w', r'\w*'], ['o', r'[^\w\s]*']]:
self.leading_res[setup[0]] = re.compile('^(' + setup[1] + ')')
self.trailing_res[setup[0]] = re.compile('(' + setup[1] + ')$')
self.leading_res['s'] = re.compile(r'^(\s*\S?)')
self.trailing_res['s'] = re.compile(r'(\S?\s*)$')
def lint(self, request):
text = request.content.encode(request.encoding.python_encoding_name)
return self.lint_with_text(request, text)
def lint_with_text(self, request, text):
"""Lint the given Ruby file.
Raise an exception and set an error on koLastErrorService if there
is a problem.
"""
prefset = request.koDoc.getEffectivePrefs()
cwd = request.cwd
# Remove a possible "-d" in the shebang line, this will tell Ruby to
# launch the debugger which.
# Be careful to handle single-line files correctly.
splitText = text.split("\n", 1)
firstLine = self.RemoveDashDFromShebangLine(splitText[0])
if len(splitText) > 1:
text = "\n".join([firstLine, splitText[1]])
else:
text = firstLine
#print "----------------------------"
#print "Ruby Lint"
#print text
#print "----------------------------"
# Save ruby buffer to a temporary file.
tmpFileName = None
if cwd:
# Try to create the tempfile in the same directory as the ruby
# file so that @INC additions using FindBin::$Bin work as
# expected.
# XXX Would really prefer to build tmpFileName from the name of
# the file being linted but the Linting system does not
# pass the file name through.
tmpFileName = os.path.join(cwd, ".~ko-%s-rubylint~" % self._koVer)
try:
fout = open(tmpFileName, 'wb')
fout.write(text)
fout.close()
except (OSError, IOError), ex:
tmpFileName = None
if not tmpFileName:
# Fallback to using a tmp dir if cannot write in cwd.
try:
tmpFileName = str(tempfile.mktemp())
except OSError, ex:
# Sometimes get this error but don't know why:
# OSError: [Errno 13] Permission denied: 'C:\\DOCUME~1\\trentm\\LOCALS~1\\Temp\\~1324-test'
errmsg = "error determining temporary filename for "\
"Ruby content: %s" % ex
self._lastErrorSvc.setLastError(3, errmsg)
raise ServerException(nsError.NS_ERROR_UNEXPECTED)
fout = open(tmpFileName, 'wb')
fout.write(text)
fout.close()
lines = []
try:
rubyExe = self.rubyInfoEx.getExecutableFromDocument(request.koDoc)
if not rubyExe:
rubyExe = self.sysUtils.Which("ruby")
if not rubyExe:
errmsg = "Could not find a suitable Ruby interpreter for linting."
self._lastErrorSvc.setLastError(1, errmsg)
raise ServerException(nsError.NS_ERROR_NOT_AVAILABLE)
option = '-' + prefset.getStringPref("ruby_lintOption")
# Note that ruby -c doesn't actually consult paths. It doesn't
# actually load required modules, since it doesn't need to process
# them the way Perl does to determine how to interpret code after 'use'
rubyExtraPaths = prefset.getStringPref("rubyExtraPaths")
if rubyExtraPaths:
if sys.platform.startswith("win"):
rubyExtraPaths = string.replace(rubyExtraPaths, '\\', '/')
rubyExtraPaths = [x for x in rubyExtraPaths.split(os.pathsep) if x.strip()]
rubyExtraPaths.insert(0, '.')
else:
rubyExtraPaths = ['.']
argv = [rubyExe]
for incpath in rubyExtraPaths:
argv += ['-I', incpath]
argv += [option, tmpFileName]
cwd = cwd or None # convert '' to None (cwd=='' for new files)
env = koprocessutils.getUserEnv()
p = process.ProcessOpen(argv, cwd=cwd, env=env, stdin=None)
stdout, stderr = p.communicate()
lines = stderr.splitlines(1)
finally:
try:
os.unlink(tmpFileName)
except:
log.warn("Got exception trying to delete temp file %s", tmpFileName, exc_info=1)
pass
results = koLintResults()
try:
results = self._parseRubyResults(results, lines, tmpFileName, text)
except:
errmsg = "Exception in Ruby linting while parsing results"
self._lastErrorSvc.setLastError(1, errmsg)
log.exception(errmsg)
#print "----------------------------"
return results
# ruby error line format
# Errors:
# filename:lineNo: <message>
# source-code-text
# ^ at start of problem in text, usually start of a token
# So find the token, and squiggle it
#
# Warnings:
# filename:line_no: warning: <message>
def _add_if_new(self, results, result, reported, err_hash):
k = "%s:%s" % (err_hash['file'], err_hash['line'])
if not reported.has_key(k):
results.addResult(result)
reported[k] = None
def _parseRubyResults(self, results, lines, tmpFileName, text):
# caller does this: results = koLintResults()
# So that if an exception is thrown the caller will
# get a good default value.
if not lines or len(lines) == 0:
return results
if not self.warning_re:
self._define_res()
datalines = re.split('\r\n|\r|\n',text)
reported = {}
i = 0
numLines = len(lines)
while i < numLines:
line = lines[i]
#print line
ge = gw = None
gw = self.warning_re.match(line)
if gw:
err = gw.groupdict()
else:
ge = self.error_re.match(line)
if not ge:
log.debug("Couldn't match an error or warning with line %d:<%s>", i, line)
i = i + 1
continue
err = ge.groupdict()
# Common stuff here
# Assume there is no caret
result = KoLintResult()
result.lineStart = result.lineEnd = int(err['line'])
result.columnStart = 1
result.columnEnd = len(datalines[result.lineEnd-1]) + 1
result.description = err['message']
if gw:
result.severity = result.SEV_WARNING
self._add_if_new(results, result, reported, err)
i = i + 1
continue
result.severity = result.SEV_ERROR
# See if the next line contains next or something
if i < numLines - 2:
caret_match = self.caret_re.match(lines[i + 2])
if not caret_match:
i = i + 1
continue
try:
caret_posn = len(caret_match.group(0))
if caret_posn > 0: caret_posn -= 1
fpart, rest = lines[i + 1][:caret_posn], lines[i + 1][caret_posn + 1:]
caret_char = lines[i + 1][caret_posn]
if caret_char in " \t":
re_key = 's'
elif caret_char in self._word_letters:
re_key = 'w'
else:
re_key = 'o'
# white-space: just span white-space, and next
# Span to the surrounding non-white-space chars
ws_left_len = len(self.trailing_res[re_key].search(fpart).group(0))
ws_right_len = len(self.leading_res[re_key].search(rest).group(0))
col_left = caret_posn - ws_left_len
col_right = caret_posn + ws_right_len
if col_left < 0: col_left = 0
result.columnStart = col_left + 1
result.columnEnd = col_right + 1
except:
log.warn("Got exception computing _parseRubyResults", exc_info=1)
pass
i += 3
else:
i += 1
self._add_if_new(results, result, reported, err)
return results
def RemoveDashDFromShebangLine(self, line):
"""Remove a possible -d ruby option from the given shebang line.
Return the resultant string.
Note that this is probably not going to handle esoteric uses of quoting
and escaping on the shebang line.
>>> from KoRubyLanguage import RemoveDashDFromShebangLine as rd
>>> rd("foo")
'foo'
>>> rd("#!ruby -d")
'#!ruby '
>>> rd("#!ruby -d:foo")
'#!ruby '
>>> rd("#!ruby -cd")
'#!ruby -c'
>>> rd("#!ruby -0")
'#!ruby -0'
>>> rd("#!ruby -01d")
'#!ruby -01'
>>> rd("#!ruby -Mmymodule")
'#!ruby -Mmymodule'
>>> rd("#!ruby -dMmymodule")
'#!ruby -Mmymodule'
>>> rd("#!ruby -Vd")
'#!ruby -V'
>>> rd("#!ruby -V:d")
'#!ruby -V:d'
>>> rd("#!/bin/sh -- # -*- ruby -*- -p")
'#!/bin/sh -- # -*- ruby -*- -p'
>>> rd("#!/bin/sh -- # -*- ruby -*- -pd")
'#!/bin/sh -- # -*- ruby -*- -p'
"""
# ensure this is a shebang line
if not line.startswith("#!"):
return line
# The Ruby command-line options are so much like Perl's
# that we can probably get away with barebones.
result = ""
remainder = line
# parsing only begins from where "ruby" is first mentioned
splitter = re.compile("(ruby)", re.I)
try:
before, ruby, after = re.split(splitter, remainder, 1)
except ValueError:
# there was no "ruby" in shebang line
return line
else:
result += before + ruby
remainder = after
# the remainder are ruby arguments
tokens = re.split("(-\*|- |\s+)", remainder)
while len(tokens) > 0:
token = tokens[0]
if token == "":
tokens = tokens[1:]
elif token in ("-*", "- "):
# "-*" and "- " are ignored for Emacs-style mode lines
result += token
tokens = tokens[1:]
elif re.match("^\s+$", token):
# skip whitespace
result += token
tokens = tokens[1:]
elif token == "--":
# option processing stops at "--"
result += "".join(tokens)
tokens = []
elif token.startswith("-"):
# parse an option group
# See "ruby -h". Those options with arguments (some of them
# optional) must have 'd' in those arguments preserved.
stripped = "-"
token = token[1:]
while len(token) > 0:
ch = token[0]
if ch in ('0', 'l'):
# -0[octal]
# -l[octal]
stripped += ch
token = token[1:]
while len(token) > 0:
ch = token[0]
if ch in "01234567":
stripped += ch
token = token[1:]
else:
break
elif ch == 'd':
# -d[:debugger]
if len(token) > 1 and token[1] == ":":
# drop the "d:foo"
token = ""
else:
# drop the 'd'
token = token[1:]
elif ch in ('D', 'F', 'i', 'I', 'm', 'M', 'x'):
# -D[number/list]
# -F/pattern/
# -i[extension]
# -Idirectory
# -[mM][-]module
# -x[directory]
stripped += token
token = ""
elif ch == 'V':
# -V[:variable]
if len(token) > 1 and token[1] == ":":
stripped += token
token = ""
else:
stripped += ch
token = token[1:]
else:
stripped += ch
token = token[1:]
if stripped != "-":
result += stripped
tokens = tokens[1:]
else:
# this is a non-option group token, skip it
result += token
tokens = tokens[1:]
remainder = ""
return result
| [
"[email protected]"
] | |
f50d4463bf0b30fc4676896b773f4ee663cfafde | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /implement/백준/로봇 시뮬레이션.py | 9be5407197c46650cd9bb30b77ca890fafa6f60f | [] | no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | import sys
def find_location(num):
global a,b
#print(num)
for y in range(b):
for x in range(a):
# print(board[y][x])
if board[y][x]:
if board[y][x][0] == num:
return y,x
return None
direction = [(0,-1),(1,0),(0,1),(-1,0)]
a,b = map(int, input().split())
n,m = map(int, input().split())
board =[[[] for _ in range(a)] for _ in range(b)]
# for i in range(b):
# for j in range(a):
# print(board[i][j], end = " ")
# print()
command = []
for i in range(n):
x,y,d = map(str, input().split())
if d == 'N': d = 0
elif d == 'E': d = 1
elif d == 'S': d = 2
else: d = 3
#print("robot_location",b-(int(y)),int(x) - 1)
board[b-int(y)][int(x) - 1] = [i+1,d]
# for i in range(b):
# for j in range(a):
# print(board[i][j], end = " ")
# print()
for i in range(m):
num, com, cnt = map(str, input().split())
y,x = find_location(int(num))
d = board[y][x][1]
board[y][x] = []
for i in range(int(cnt)):
if com == 'F':
x+=direction[d][0]
y+=direction[d][1]
#print(y,x)
if not (0<=x< a and 0<= y <b):
print("Robot {0} crashes into the wall".format(num))
sys.exit()
if board[y][x]:
print("Robot {0} crashes into robot {1}".format(num,board[y][x][0]))
sys.exit()
elif com == 'L':
d-=1
if d<0: d = 3
elif com == 'R':
d+=1
if d>3: d = 0
board[y][x] = [int(num),d]
print("OK") | [
"[email protected]"
] | |
41f3e4504258bf7eb315ccb9fa76996f1a5dafeb | f42608d292c5784f59d554337e2826d398d8391f | /base_structure/app.py | 6bb1662cfab3f3de20a53e3d3d4606af506412d7 | [
"Unlicense"
] | permissive | thinkAmi-sandbox/werkzeug-sample | e4e0000b0b1ee0c72acc36113b125765b185ce39 | fbf778ba8a83d5c91de1a5baa619087b0ab46199 | refs/heads/master | 2020-03-29T19:45:47.006365 | 2018-10-15T13:41:54 | 2018-10-15T13:41:54 | 150,279,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,492 | py | import pathlib
from werkzeug._compat import text_type
from werkzeug.exceptions import abort, InternalServerError
from werkzeug.utils import redirect
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import SharedDataMiddleware
class MyInternalServerError(InternalServerError):
def get_body(self, environ=None):
# text_type()がprotectedなので、使っていいものか...
return text_type(
u'<!DOCTYPE html>'
u'<title>My Internal Server Error</title>'
u'<h1>Oh, my internal server error!</h1>'
)
class Application:
def dispatch_request(self, request):
"""
favicon.ico の分のリクエストも入ってくるので注意
本番だと、静的ファイルがNginxで用意されるので問題ないかも
"""
body = []
try:
# リクエストパスの取得
body.append(f'request.path: {request.base_url}')
# => http://localhost:5000/
# 環境変数の取得
# WSGI環境変数とCGI環境変数の両方が取れそう:型はdict
body.append(f'environ: {type(request.environ)} / {request.environ}')
# => <class 'dict'> / {'wsgi.version': (1, 0), ... , 'REQUEST_METHOD': 'GET', ...
# HTTPリクエストのメソッドを取得
body.append(f'HTTP method: {request.method}')
# => GET
# クエリストリングを取得
body.append(f'Query String: {request.args}')
# => [GET] $ curl http://localhost:5000?foo=bar の場合
# ImmutableMultiDict([('foo', 'bar')])
# => [POST] $ curl -w '\n' -X POST 'localhost:5000/?ham=spam' --data 'foo=1&bar=2' の場合
# ImmutableMultiDict([('ham', 'spam')])
# POSTデータを取得
body.append(f'Form: {request.form}')
# => [GET] $ curl http://localhost:5000?foo=bar の場合
# ImmutableMultiDict([])
# => [POST] $ curl -w '\n' -X POST 'localhost:5000/?ham=spam' --data 'foo=1&bar=2' の場合
# ImmutableMultiDict([('foo', '1'), ('bar', '2')])
# request.valuesを使えば、クエリストリング/formの両方の値を取得できる
body.append(f'request.values: {request.values}')
# => [GET] $ curl http://localhost:5000?foo=bar の場合
# CombinedMultiDict([ImmutableMultiDict([('foo', 'bar')]),
# ImmutableMultiDict([])
# ])
# => [POST] $ curl -w '\n' -X POST 'localhost:5000/?ham=spam' --data 'foo=1&bar=2' の場合
# CombinedMultiDict([ImmutableMultiDict([('ham', 'spam')]),
# ImmutableMultiDict([('foo', '1'), ('bar', '2')])
# ])
# HTTPリクエストヘッダの出力
for k, v in request.headers.items():
body.append(f'Request header: key:{k} / value: {v}')
# => Request header: key:Host / value: localhost:5000 ...
# 接続元IPアドレスを取得
# access_routeとremote_addrの違い
body.append(f'access_route: {request.access_route}')
# => access_route: ImmutableList(['127.0.0.1'])
body.append(f'remote_addr: {request.remote_addr}')
# => remote_addr: 127.0.0.1
# リクエスト時のCookieの値を取得
counter = request.cookies.get('counter', 0)
msg = '\n'.join(body)
response = Response(msg)
# 新しくCookieをセットしない場合でも、再リクエスト時には以前のCookieの値が使われる
if 'one_time' not in request.cookies:
response.set_cookie('one_time', 'x')
# Cookieを削除
if 'delete_cookie' in request.args:
response.delete_cookie('one_time')
# => Set-Cookie: one_time=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
# 常にセットするCookie
response.set_cookie('counter', str(int(counter) + 1))
# 同じCookieキーで、別々の属性をセットする
response.set_cookie('same_cookie', '1st', httponly=True)
response.set_cookie('same_cookie', '2nd', secure=True)
# 独自HTTPヘッダをセット
response.headers.add('X-headers-add', 'using add')
response.headers.add_header('X-headers-add_header', 'using add_header')
response.headers['X-headers-key'] = 'using key'
# => X-headers-add: using add
# X-headers-add_header: using add_header
# X-headers-key: using key
# content_typeを上書き
response.content_type = 'application/json'
# リダイレクト
if 'redirect' in request.args:
return redirect('https://www.google.co.jp')
# HTTP 500 エラー
if '500' in request.args:
abort(500)
except InternalServerError as e:
# 差し替え
return MyInternalServerError()
return response
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
"""WSGIアプリを直接dispatchすることで、wsgi_app()をWSGIミドルウェアっぽく使える"""
print('!!! app !!!')
return self.wsgi_app(environ, start_response)
def create_app(with_static=True):
application = Application()
# WSGIミドルウェアの設定ポイント
if with_static:
application.wsgi_app = SharedDataMiddleware(
application.wsgi_app,
{'/favicon.ico': str(pathlib.Path('./favicon.ico'))}
)
return application
if __name__ == '__main__':
from werkzeug.serving import run_simple
app = create_app()
# 外部からアクセス可能とするよう、第一引数は 0.0.0.0 を指定 (Flaskと同様)
# https://qiita.com/tomboyboy/items/122dfdb41188176e45b5
run_simple('0.0.0.0', 5000, app, use_debugger=True, use_reloader=True)
# run_simple('127.0.0.1', 5000, a, use_debugger=True, use_reloader=True)
| [
"[email protected]"
] | |
03da8fde9a8aff510e7b931f33942b88ea3adc4d | 1105414add7c27eb201a0941e5bc86eb2f09378f | /journey5/cputype.py | 1cd845d82e52ffb0bf5afda36aa8b46e72d2220a | [
"MIT"
] | permissive | parrisma/AI-Intuition | d083204267c351bc85c796a79ce43b8ff9d58022 | 3b081696b1d226815e029cbb536fac5e4d3de9a7 | refs/heads/master | 2021-07-25T21:07:11.455443 | 2020-06-06T20:44:17 | 2020-06-06T20:44:17 | 193,102,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from typing import List
from enum import Enum, unique
from copy import deepcopy
@unique
class CPUType(Enum):
GPU = 'GPU'
GENERAL = 'CPU'
BATCH = 'BAT'
def __str__(self):
return self.value
def __add__(self, other):
if isinstance(other, self.__class__):
return self.value + other.value
else:
ValueError('Cannot add :' + self.__class__.__name__ + 'with :' + other.__class__.__name__)
@classmethod
def cpu_types(cls) -> List['CPUType']:
return deepcopy([cls.GPU,
cls.GENERAL,
cls.BATCH
]
)
| [
"[email protected]"
] | |
ec2a61260d952cbdbaf2e4abdfc61955db8ec40e | d138deda43e36f6c79c5e3a9ef1cc62c6a92e881 | /python/paddle/distributed/fleet/data_generator/data_generator.py | 669d2ea24a0c788bbe1c0cff38a843bd96e29016 | [
"Apache-2.0"
] | permissive | seiriosPlus/Paddle | 51afd6f5c85c3ce41dd72953ee659d1539c19f90 | 9602a182b2a4979247c09df1ec283fc39cb4a981 | refs/heads/develop | 2021-08-16T16:05:10.848535 | 2020-12-27T15:15:19 | 2020-12-27T15:15:19 | 123,257,829 | 2 | 0 | Apache-2.0 | 2019-12-10T08:22:01 | 2018-02-28T08:57:42 | C++ | UTF-8 | Python | false | false | 14,586 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
class DataGenerator(object):
"""
DataGenerator is a general Base class for user to inherit
A user who wants to define his/her own python processing logic
with paddle.distributed.InMemoryDataset/QueueDataset should
inherit this class.
"""
def __init__(self):
self._proto_info = None
self.batch_size_ = 32
def set_batch(self, batch_size):
'''
Set batch size of current DataGenerator
This is necessary only if a user wants to define generator_batch
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
'''
self.batch_size_ = batch_size
def run_from_memory(self):
'''
This function generator data from memory, it is usually used for
debug and benchmarking
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
yield ("words", [1, 2, 3, 4])
return local_iter
mydata = MyData()
mydata.run_from_memory()
'''
batch_samples = []
line_iter = self.generate_sample(None)
for user_parsed_line in line_iter():
if user_parsed_line == None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
batch_samples = []
if len(batch_samples) > 0:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
def run_from_stdin(self):
'''
This function reads the data row from stdin, parses it with the
process function, and further parses the return value of the
process function with the _gen_str function. The parsed data will
be wrote to stdout and the corresponding protofile will be
generated.
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", [int_words])
return local_iter
mydata = MyData()
mydata.run_from_stdin()
'''
batch_samples = []
for line in sys.stdin:
line_iter = self.generate_sample(line)
for user_parsed_line in line_iter():
if user_parsed_line == None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
batch_samples = []
if len(batch_samples) > 0:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the datafeed,and
updating proto_info information.
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the datafeed.
'''
raise NotImplementedError(
"pls use MultiSlotDataGenerator or PairWiseDataGenerator")
def generate_sample(self, line):
'''
This function needs to be overridden by the user to process the
original data row into a list or tuple.
Args:
line(str): the original data row
Returns:
Returns the data processed by the user.
The data format is list or tuple:
[(name, [feasign, ...]), ...]
or ((name, [feasign, ...]), ...)
For example:
[("words", [1926, 08, 17]), ("label", [1])]
or (("words", [1926, 08, 17]), ("label", [1]))
Note:
The type of feasigns must be in int or float. Once the float
element appears in the feasign, the type of that slot will be
processed into a float.
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", [int_words])
return local_iter
'''
raise NotImplementedError(
"Please rewrite this function to return a list or tuple: " +
"[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)")
def generate_batch(self, samples):
'''
This function needs to be overridden by the user to process the
generated samples from generate_sample(self, str) function
It is usually used as batch processing when a user wants to
do preprocessing on a batch of samples, e.g. padding according to
the max length of a sample in the batch
Args:
samples(list tuple): generated sample from generate_sample
Returns:
a python generator, the same format as return value of generate_sample
Example:
.. code-block:: python
import paddle.distributed.fleet.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
'''
def local_iter():
for sample in samples:
yield sample
return local_iter
# TODO: guru4elephant
# add more generalized DataGenerator that can adapt user-defined slot
# for example, [(name, float_list), (name, str_list), (name, int_list)]
class MultiSlotStringDataGenerator(DataGenerator):
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the MultiSlotDataFeed,
and updating proto_info information.
The input line will be in this format:
>>> [(name, [str(feasign), ...]), ...]
>>> or ((name, [str(feasign), ...]), ...)
The output will be in this format:
>>> [ids_num id1 id2 ...] ...
For example, if the input is like this:
>>> [("words", ["1926", "08", "17"]), ("label", ["1"])]
>>> or (("words", ["1926", "08", "17"]), ("label", ["1"]))
the output will be:
>>> 3 1234 2345 3456 1 1
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the MultiSlotDataFeed.
'''
if not isinstance(line, list) and not isinstance(line, tuple):
raise ValueError(
"the output of process() must be in list or tuple type"
"Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]")
output = ""
for index, item in enumerate(line):
name, elements = item
if output:
output += " "
out_str = []
out_str.append(str(len(elements)))
out_str.extend(elements)
output += " ".join(out_str)
return output + "\n"
class MultiSlotDataGenerator(DataGenerator):
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the MultiSlotDataFeed,
and updating proto_info information.
The input line will be in this format:
>>> [(name, [feasign, ...]), ...]
>>> or ((name, [feasign, ...]), ...)
The output will be in this format:
>>> [ids_num id1 id2 ...] ...
The proto_info will be in this format:
>>> [(name, type), ...]
For example, if the input is like this:
>>> [("words", [1926, 08, 17]), ("label", [1])]
>>> or (("words", [1926, 08, 17]), ("label", [1]))
the output will be:
>>> 3 1234 2345 3456 1 1
the proto_info will be:
>>> [("words", "uint64"), ("label", "uint64")]
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the MultiSlotDataFeed.
'''
if not isinstance(line, list) and not isinstance(line, tuple):
raise ValueError(
"the output of process() must be in list or tuple type"
"Example: [('words', [1926, 08, 17]), ('label', [1])]")
output = ""
if self._proto_info is None:
self._proto_info = []
for item in line:
name, elements = item
if not isinstance(name, str):
raise ValueError("name%s must be in str type" % type(name))
if not isinstance(elements, list):
raise ValueError("elements%s must be in list type" %
type(elements))
if not elements:
raise ValueError(
"the elements of each field can not be empty, you need padding it in process()."
)
self._proto_info.append((name, "uint64"))
if output:
output += " "
output += str(len(elements))
for elem in elements:
if isinstance(elem, float):
self._proto_info[-1] = (name, "float")
elif not isinstance(elem, int) and not isinstance(elem,
long):
raise ValueError(
"the type of element%s must be in int or float" %
type(elem))
output += " " + str(elem)
else:
if len(line) != len(self._proto_info):
raise ValueError(
"the complete field set of two given line are inconsistent.")
for index, item in enumerate(line):
name, elements = item
if not isinstance(name, str):
raise ValueError("name%s must be in str type" % type(name))
if not isinstance(elements, list):
raise ValueError("elements%s must be in list type" %
type(elements))
if not elements:
raise ValueError(
"the elements of each field can not be empty, you need padding it in process()."
)
if name != self._proto_info[index][0]:
raise ValueError(
"the field name of two given line are not match: require<%s>, get<%s>."
% (self._proto_info[index][0], name))
if output:
output += " "
output += str(len(elements))
for elem in elements:
if self._proto_info[index][1] != "float":
if isinstance(elem, float):
self._proto_info[index] = (name, "float")
elif not isinstance(elem, int) and not isinstance(elem,
long):
raise ValueError(
"the type of element%s must be in int or float"
% type(elem))
output += " " + str(elem)
return output + "\n"
| [
"[email protected]"
] | |
2a68a5ba29718b61fc2e53486891a3f1d861179d | 3a39e879fb2901207afcfc238b169ddefa104055 | /Chapter05/Docs/headercontract/headercontract/settings.py | 88caffbe94df2a4de120cbbaece0608c79c3236a | [] | no_license | Synapses/Web_Scraping_with_Python | cb32ddd468250b9f11ad16d3576d0920693e708c | 3bb8cd47d0e1e182bb8ee800d32e24f45bf13ab0 | refs/heads/master | 2023-03-15T09:19:02.754593 | 2020-06-16T02:17:11 | 2020-06-16T02:17:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | # -*- coding: utf-8 -*-
# Scrapy settings for headercontract project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'headercontract'
SPIDER_MODULES = ['headercontract.spiders']
NEWSPIDER_MODULE = 'headercontract.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'headercontract (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'headercontract.middlewares.HeadercontractSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'headercontract.middlewares.HeadercontractDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'headercontract.pipelines.HeadercontractPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
fd48e51c8eab549b95ed79cc075a60d840f595ef | 65dd982b7791b11b4f6e02b8c46300098d9b5bb3 | /neutron-2014.2.2-gcloud/neutron/db/portqos_db.py | 91e3edb81874a111692dc538727c9926b83a4b00 | [
"Apache-2.0"
] | permissive | xiongmeng1108/openstack_gcloud | 83f58b97e333d86d141493b262d3c2261fd823ac | d5d3e4f8d113a626f3da811b8e48742d35550413 | refs/heads/master | 2021-01-10T01:21:13.911165 | 2016-03-25T08:21:14 | 2016-03-25T08:21:14 | 54,700,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py | __author__ = 'luoyb'
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.extensions import gcloud_qos
LOG = logging.getLogger(__name__)
class PortQos(model_base.BASEV2):
"""
define qos
"""
__tablename__ = 'gcloud_portqoss'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=False,primary_key=True)
ingress = sa.Column(sa.BIGINT, nullable=True)
outgress = sa.Column(sa.BIGINT, nullable=True)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load extra_port_qos
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("qos", lazy='joined',uselist=False,cascade='delete'))
class PortQosMixin(object):
"""Mixin class to add extra options to the Qos file
and associate them to a port.
"""
def _extend_port_dict_qos(self,res,port):
port_qos=port.get('qos')
res['qos']=self._make_port_qos_dict(port_qos)
return res
def _get_qos(self,context,port_id):
port_qos = context.session.query(PortQos).filter_by(port_id=port_id).first()
return self._make_port_qos_dict(port_qos)
def _create_or_update_qos(self,context,id,qos):
if not qos:
raise "qos is null"
qos= qos['qos']
if id:
qos['port_id']=id
port= context.session.query(models_v2.Port).filter_by(id = qos['port_id']).first()
if not port:
raise gcloud_qos.QosPortNotFound(id = qos['port_id'])
port_qos=None
with context.session.begin(subtransactions=True):
port_qos = context.session.query(PortQos).filter_by(port_id = qos['port_id']).first()
if port_qos:
port_qos.update(qos)
else:
port_qos = PortQos(
port_id=qos['port_id'],
ingress=qos.get('ingress'),
outgress=qos.get('outgress'))
context.session.add(port_qos)
return self._make_port_qos_dict(port_qos)
def _make_port_qos_dict(self,port_qos):
res={}
if port_qos:
res = {"port_id": port_qos["port_id"],
'ingress': port_qos['ingress'],
"outgress": port_qos["outgress"]
}
return res
def update_qos(self, context,id,qos):
return self._create_or_update_qos(context,id,qos)
def create_qos(self, context, qos):
return self._create_or_update_qos(context=context,id=None,qos=qos)
def get_qos(self, context, id, fields=None):
return self._get_qos(context,port_id=id)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_dict_qos'])
| [
"[email protected]"
] | |
10445f9a71f13523fca7627d75cae341995b9c9f | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/feature/vt/gui/util/AbstractVTMatchTableModel.pyi | 9bb8b66ed7016dae1a867a3badabf5b66a85a76a | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 60,357 | pyi | from typing import List
import docking.widgets.table
import docking.widgets.table.threaded
import ghidra.docking.settings
import ghidra.feature.vt.api.main
import ghidra.feature.vt.gui.filters
import ghidra.feature.vt.gui.provider.markuptable
import ghidra.feature.vt.gui.provider.matchtable
import ghidra.feature.vt.gui.util
import ghidra.framework.plugintool
import ghidra.program.model.address
import ghidra.program.model.listing
import ghidra.program.model.symbol
import ghidra.program.util
import ghidra.util.table
import ghidra.util.table.column
import ghidra.util.table.field
import ghidra.util.task
import java.lang
import java.util
import java.util.function
import javax.swing.event
import javax.swing.table
class AbstractVTMatchTableModel(ghidra.util.table.AddressBasedTableModel):
class SessionNumberTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> int: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class SourceLengthTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> int: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class SourceAddressTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.gui.provider.markuptable.DisplayableListingAddress: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class SourceAddressComparator(object, java.util.Comparator):
def __init__(self): ...
@overload
def compare(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.feature.vt.api.main.VTMatch) -> int: ...
@overload
def compare(self, __a0: object, __a1: object) -> int: ...
@overload
@staticmethod
def comparing(__a0: java.util.function.Function) -> java.util.Comparator: ...
@overload
@staticmethod
def comparing(__a0: java.util.function.Function, __a1: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def comparingDouble(__a0: java.util.function.ToDoubleFunction) -> java.util.Comparator: ...
@staticmethod
def comparingInt(__a0: java.util.function.ToIntFunction) -> java.util.Comparator: ...
@staticmethod
def comparingLong(__a0: java.util.function.ToLongFunction) -> java.util.Comparator: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
@staticmethod
def naturalOrder() -> java.util.Comparator: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@staticmethod
def nullsFirst(__a0: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def nullsLast(__a0: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def reverseOrder() -> java.util.Comparator: ...
def reversed(self) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.Comparator) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.function.Function) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.function.Function, __a1: java.util.Comparator) -> java.util.Comparator: ...
def thenComparingDouble(self, __a0: java.util.function.ToDoubleFunction) -> java.util.Comparator: ...
def thenComparingInt(self, __a0: java.util.function.ToIntFunction) -> java.util.Comparator: ...
def thenComparingLong(self, __a0: java.util.function.ToLongFunction) -> java.util.Comparator: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class ConfidenceScoreTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.api.main.VTScore: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnDescription(self) -> unicode: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class AppliedMarkupStatusTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.api.main.VTMatch: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class LengthDeltaTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> int: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class StatusTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.gui.util.MungedAssocationAndMarkupItemStatus: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class DestinationLabelTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.gui.provider.matchtable.DisplayableLabel: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class SourceNamespaceTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class DestinationNamespaceTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class SourceLabelSourceTypeTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class TagTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class DestinationAddressComparator(object, java.util.Comparator):
def __init__(self): ...
@overload
def compare(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.feature.vt.api.main.VTMatch) -> int: ...
@overload
def compare(self, __a0: object, __a1: object) -> int: ...
@overload
@staticmethod
def comparing(__a0: java.util.function.Function) -> java.util.Comparator: ...
@overload
@staticmethod
def comparing(__a0: java.util.function.Function, __a1: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def comparingDouble(__a0: java.util.function.ToDoubleFunction) -> java.util.Comparator: ...
@staticmethod
def comparingInt(__a0: java.util.function.ToIntFunction) -> java.util.Comparator: ...
@staticmethod
def comparingLong(__a0: java.util.function.ToLongFunction) -> java.util.Comparator: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
@staticmethod
def naturalOrder() -> java.util.Comparator: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@staticmethod
def nullsFirst(__a0: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def nullsLast(__a0: java.util.Comparator) -> java.util.Comparator: ...
@staticmethod
def reverseOrder() -> java.util.Comparator: ...
def reversed(self) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.Comparator) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.function.Function) -> java.util.Comparator: ...
@overload
def thenComparing(self, __a0: java.util.function.Function, __a1: java.util.Comparator) -> java.util.Comparator: ...
def thenComparingDouble(self, __a0: java.util.function.ToDoubleFunction) -> java.util.Comparator: ...
def thenComparingInt(self, __a0: java.util.function.ToIntFunction) -> java.util.Comparator: ...
def thenComparingLong(self, __a0: java.util.function.ToLongFunction) -> java.util.Comparator: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class ScoreTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.api.main.VTScore: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnDescription(self) -> unicode: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class DestinationLabelSourceTypeTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class DestinationLengthTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> int: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class MatchTypeTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnDescription(self) -> unicode: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class MultipleDestinationLabelsTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> List[ghidra.program.model.symbol.Symbol]: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class RelatedMatchCountColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> int: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnDescription(self) -> unicode: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class AlgorithmTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> unicode: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
class MultipleSourceLabelsTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> List[ghidra.program.model.symbol.Symbol]: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class DestinationAddressTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.gui.provider.markuptable.DisplayableListingAddress: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class AppliedMarkupStatusBatteryTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.api.main.VTMatch: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class SourceLabelTableColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> ghidra.feature.vt.gui.provider.matchtable.DisplayableLabel: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
@property
def columnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
class ImpliedMatchCountColumn(ghidra.util.table.field.AbstractProgramBasedDynamicTableColumn):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColumnClass(self) -> java.lang.Class: ...
def getColumnDescription(self) -> unicode: ...
def getColumnDisplayName(self, __a0: ghidra.docking.settings.Settings) -> unicode: ...
def getColumnName(self) -> unicode: ...
def getColumnPreferredWidth(self) -> int: ...
def getColumnRenderer(self) -> ghidra.util.table.column.GColumnRenderer: ...
def getComparator(self) -> java.util.Comparator: ...
def getMaxLines(self, __a0: ghidra.docking.settings.Settings) -> int: ...
def getSettingsDefinitions(self) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getSupportedRowType(self) -> java.lang.Class: ...
def getUniqueIdentifier(self) -> unicode: ...
@overload
def getValue(self, __a0: ghidra.feature.vt.api.main.VTMatch, __a1: ghidra.docking.settings.Settings, __a2: ghidra.program.model.listing.Program, __a3: ghidra.framework.plugintool.ServiceProvider) -> int: ...
@overload
def getValue(self, __a0: object, __a1: ghidra.docking.settings.Settings, __a2: object, __a3: ghidra.framework.plugintool.ServiceProvider) -> object: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def columnDescription(self) -> unicode: ...
@property
def columnName(self) -> unicode: ...
@property
def columnPreferredWidth(self) -> int: ...
def __init__(self, __a0: unicode, __a1: ghidra.feature.vt.gui.plugin.VTController): ...
def addFilter(self, __a0: ghidra.feature.vt.gui.filters.Filter) -> None: ...
def addInitialLoadListener(self, __a0: docking.widgets.table.threaded.ThreadedTableModelListener) -> None: ...
def addObject(self, __a0: object) -> None: ...
def addSortListener(self, __a0: docking.widgets.table.SortListener) -> None: ...
def addTableModelListener(self, __a0: javax.swing.event.TableModelListener) -> None: ...
def addThreadedTableModelListener(self, __a0: docking.widgets.table.threaded.ThreadedTableModelListener) -> None: ...
def cancelAllUpdates(self) -> None: ...
def clearData(self) -> None: ...
def dispose(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def findColumn(self, __a0: unicode) -> int: ...
def fireTableCellUpdated(self, __a0: int, __a1: int) -> None: ...
def fireTableChanged(self, __a0: javax.swing.event.TableModelEvent) -> None: ...
def fireTableDataChanged(self) -> None: ...
def fireTableRowsDeleted(self, __a0: int, __a1: int) -> None: ...
def fireTableRowsInserted(self, __a0: int, __a1: int) -> None: ...
def fireTableRowsUpdated(self, __a0: int, __a1: int) -> None: ...
def fireTableStructureChanged(self) -> None: ...
@staticmethod
def from(__a0: javax.swing.table.TableModel) -> docking.widgets.table.VariableColumnTableModel: ...
def getAddress(self, __a0: int) -> ghidra.program.model.address.Address: ...
def getClass(self) -> java.lang.Class: ...
def getColumn(self, __a0: int) -> docking.widgets.table.DynamicTableColumn: ...
def getColumnClass(self, __a0: int) -> java.lang.Class: ...
def getColumnCount(self) -> int: ...
def getColumnDescription(self, __a0: int) -> unicode: ...
def getColumnDisplayName(self, __a0: int) -> unicode: ...
@overload
def getColumnIndex(self, __a0: docking.widgets.table.DynamicTableColumn) -> int: ...
@overload
def getColumnIndex(self, __a0: java.lang.Class) -> int: ...
def getColumnName(self, __a0: int) -> unicode: ...
def getColumnSettings(self, __a0: int) -> ghidra.docking.settings.Settings: ...
def getColumnSettingsDefinitions(self, __a0: int) -> List[ghidra.docking.settings.SettingsDefinition]: ...
def getColumnValueForRow(self, __a0: object, __a1: int) -> object: ...
def getDataSource(self) -> object: ...
def getDefaultColumnCount(self) -> int: ...
def getLastSelectedObjects(self) -> List[object]: ...
def getListeners(self, __a0: java.lang.Class) -> List[java.util.EventListener]: ...
def getMaxLines(self, __a0: int) -> int: ...
def getModelData(self) -> List[object]: ...
def getModelIndex(self, __a0: object) -> int: ...
def getModelRow(self, __a0: int) -> int: ...
def getName(self) -> unicode: ...
def getPendingSortState(self) -> docking.widgets.table.TableSortState: ...
def getPreferredColumnWidth(self, __a0: int) -> int: ...
def getPrimarySortColumnIndex(self) -> int: ...
def getProgram(self) -> ghidra.program.model.listing.Program: ...
def getProgramLocation(self, __a0: int, __a1: int) -> ghidra.program.util.ProgramLocation: ...
def getProgramSelection(self, __a0: List[int]) -> ghidra.program.util.ProgramSelection: ...
def getRenderer(self, __a0: int) -> javax.swing.table.TableCellRenderer: ...
def getRowCount(self) -> int: ...
def getRowIndex(self, __a0: object) -> int: ...
def getRowObject(self, __a0: int) -> object: ...
def getRowObjects(self, __a0: List[int]) -> List[object]: ...
def getTableFilter(self) -> docking.widgets.table.TableFilter: ...
def getTableModelListeners(self) -> List[javax.swing.event.TableModelListener]: ...
def getTableSortState(self) -> docking.widgets.table.TableSortState: ...
def getUnfilteredData(self) -> List[object]: ...
def getUnfilteredRowCount(self) -> int: ...
def getUniqueIdentifier(self, __a0: int) -> unicode: ...
def getValueAt(self, __a0: int, __a1: int) -> object: ...
def getViewIndex(self, __a0: object) -> int: ...
def getViewRow(self, __a0: int) -> int: ...
def hasFilter(self) -> bool: ...
def hashCode(self) -> int: ...
def isBusy(self) -> bool: ...
def isCellEditable(self, __a0: int, __a1: int) -> bool: ...
def isDefaultColumn(self, __a0: int) -> bool: ...
def isFiltered(self) -> bool: ...
def isLoadIncrementally(self) -> bool: ...
def isSortPending(self) -> bool: ...
def isSortable(self, __a0: int) -> bool: ...
def isSorted(self) -> bool: ...
def isVisibleByDefault(self, __a0: int) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def reFilter(self) -> None: ...
def reSort(self) -> None: ...
def refresh(self) -> None: ...
def reload(self) -> None: ...
def removeObject(self, __a0: object) -> None: ...
def removeTableModelListener(self, __a0: javax.swing.event.TableModelListener) -> None: ...
def removeThreadedTableModelListener(self, __a0: docking.widgets.table.threaded.ThreadedTableModelListener) -> None: ...
def sessionChanged(self) -> None: ...
def setAllColumnSettings(self, __a0: List[ghidra.docking.settings.Settings]) -> None: ...
def setIncrementalTaskMonitor(self, __a0: ghidra.util.task.TaskMonitor) -> None: ...
def setLastSelectedObjects(self, __a0: List[object]) -> None: ...
def setProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
def setTableFilter(self, __a0: docking.widgets.table.TableFilter) -> None: ...
def setTableSortState(self, __a0: docking.widgets.table.TableSortState) -> None: ...
def setValueAt(self, __a0: object, __a1: int, __a2: int) -> None: ...
def stateChanged(self, __a0: javax.swing.event.ChangeEvent) -> None: ...
def toString(self) -> unicode: ...
@staticmethod
def unwrap(__a0: javax.swing.table.TableModel) -> javax.swing.table.TableModel: ...
def updateFilter(self) -> None: ...
def updateObject(self, __a0: object) -> None: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
6bc465b047b317b874564567d86dd750b92a8daf | b1255aedc430b128582d42cabfe675ac3a3321f1 | /src/promnesia/sources/browser.py | 176c05d8a1770d0487733568cf822e8e0b0c8c00 | [
"MIT"
] | permissive | alexriabtsev/promnesia | 855e3b54d3f17b3c9bf7ce985360e8d4f259c5da | 5055198170bdf57135679181d87450a979a05a2b | refs/heads/master | 2022-11-13T04:58:00.565517 | 2020-07-11T19:46:23 | 2020-07-11T19:46:23 | 278,930,706 | 0 | 0 | MIT | 2020-07-11T19:44:18 | 2020-07-11T19:44:17 | null | UTF-8 | Python | false | false | 2,714 | py | import csv
import sqlite3
from datetime import datetime
from subprocess import check_output
from typing import Dict, Iterator, List, NamedTuple, Optional, Set
from urllib.parse import unquote
import pytz
from sqlalchemy import Column, MetaData, Table, create_engine # type: ignore
from ..common import Loc, PathIsh, Visit, get_logger, Second
def browser_extract(histfile: PathIsh, cols, row_handler) -> Iterator[Visit]:
logger = get_logger()
logger.debug(f'extracing history from {histfile}')
# TODO fuck. why doesn't that work???
# engine = create_engine('sqlite:///{histfile}', echo=True)
# meta = MetaData()
# visits = Table('visits', meta, autoload=True, autoload_with=engine)
# TODO contextmanager
conn = sqlite3.connect(str(histfile))
for row in conn.execute(f"SELECT {', '.join(cols)} FROM visits"):
pv = row_handler(*row)
yield pv
logger.debug('done extracing')
def _firefox(cols, histfile: PathIsh) -> Iterator[Visit]:
def row_handler(url, ts):
# ok, looks like it's unix epoch
# https://stackoverflow.com/a/19430099/706389
dt = datetime.fromtimestamp(int(ts) / 1_000_000, pytz.utc)
url = unquote(url) # firefox urls are all quoted
return Visit(
url=url,
dt=dt,
locator=Loc.file(histfile),
)
yield from browser_extract(
histfile=histfile,
cols=cols,
row_handler=row_handler,
)
def firefox_phone(histfile: PathIsh) -> Iterator[Visit]:
yield from _firefox(cols=('url', 'date'), histfile=histfile)
def firefox(histfile: PathIsh) -> Iterator[Visit]:
yield from _firefox(cols=('url', 'visit_date'), histfile=histfile)
# should be utc? https://stackoverflow.com/a/26226771/706389
# yep, tested it and looks like utc
def chrome_time_to_utc(chrome_time: int) -> datetime:
epoch = (chrome_time / 1_000_000) - 11644473600
return datetime.fromtimestamp(epoch, pytz.utc)
# TODO could use sqlite3 module I guess... but it's quick enough to extract as it is
def chrome(histfile: PathIsh) -> Iterator[Visit]:
def row_handler(url, ts, durs):
dt = chrome_time_to_utc(int(ts))
url = unquote(url) # chrome urls are all quoted # TODO not sure if we want it here?
dd = int(durs)
dur: Optional[Second]
if dd == 0:
dur = None
else:
dur = dd // 1_000_000
return Visit(
url=url,
dt=dt,
locator=Loc.file(histfile),
duration=dur,
)
yield from browser_extract(
histfile=histfile,
cols=('url', 'visit_time', 'visit_duration'),
row_handler=row_handler,
)
| [
"[email protected]"
] | |
26e8ab9c4c3bad31527986883deb13a25f8efdd7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/216/usersdata/267/113208/submittedfiles/av2_p3_civil.py | ade1f16d5c34a108d034f19287aa5130054916e2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # -*- coding: utf-8 -*-
import numpy as np
n=int(input('Dimensão do tabuleiro: '))
print()
a=np.zeros((n,n))
x=int(input('Número da linha em que a torre se encontra: '))
if x>n:
while x>n:
x=int(input('VALOR INVÁLIDO. Número da linha em que a torre se encontra: '))
print()
y=int(input('Número da coluna em que a torre se encontra: '))
if y>n:
while y>n:
y=int(input('VALOR INVÁLIDO. Número da coluna em que a torre se encontra: '))
print()
for i in range(0,n,1):
for j in range(0,n,1):
a[i,j]=int(input('Digite o valor da posição %d%d: '%(i+1,j+1)))
somaL=0
for i in range(0,n,1):
somaL=somaL+a[x-1,i]
somaC=0
for i in range(0,n,1):
somaC=somaC+a[i,y-1]
peso=somaL+somaC-a[x-1,y-1]
print(peso) | [
"[email protected]"
] | |
7e1fa4523b5fdefe795f17c9d34f9c193eec062f | 12967293f285decb1568bd56af38b1df4e5c533d | /.eggs/botocore-1.10.9-py2.7.egg/botocore/__init__.py | 71839036f4abf9dc262ff861a79879d59ff94b71 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | martbhell/git-bigstore | 36cd16276379833fbade252a77c73cf3644aa30f | 960e9ea64d4d5646af3ce411adf46f3236b64d7e | refs/heads/master | 2020-05-16T17:51:52.011171 | 2019-03-12T20:54:42 | 2019-03-12T20:54:42 | 183,206,409 | 0 | 0 | Apache-2.0 | 2019-04-24T10:29:48 | 2019-04-24T10:29:47 | null | UTF-8 | Python | false | false | 3,884 | py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.10.9'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
_partial_renames = {
'ipv-6': 'ipv6',
'ipv_6': 'ipv6',
}
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache,
partial_renames=_partial_renames):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
s2 = _number_cap_regex.sub(r'\1' + sep + r'\2', s1)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s2).lower()
# Do partial renames
for old, new in partial_renames.items():
if old in transformed:
transformed = transformed.replace(old, new)
_xform_cache[key] = transformed
return _xform_cache[key]
| [
"[email protected]"
] | |
b1761ebb3d2a30a0f0fc45062e83e5c7d20d0f93 | f35254b599e251249b460b8edf4303e7009024d4 | /rl/bc_utils/init_tensor.py | b404e044c7d8ad18c4310e3469abb6aa1c58fdc7 | [
"MIT"
] | permissive | bcrafton/icsrl-deep-learning | 7e3a3f970bb8a3331d709d1a841f83bf15e6a39e | e3616982d1dda5f978d61d6591c91cb0da76ab02 | refs/heads/master | 2020-09-14T00:35:47.543984 | 2019-11-22T22:04:33 | 2019-11-22T22:04:33 | 222,955,022 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py |
import numpy as np
#######################################
def init_matrix(size, init, std=None):
input_size, output_size = size
if init == 'zero':
weights = np.zeros(shape=(input_size, output_size))
elif init == 'sqrt_fan_in':
sqrt_fan_in = np.sqrt(input_size)
weights = np.random.uniform(low=-1.0/sqrt_fan_in, high=1.0/sqrt_fan_in, size=(input_size, output_size))
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (input_size + output_size))
weights = np.random.uniform(low=-limit, high=limit, size=(input_size, output_size))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (input_size + output_size))
weights = np.random.normal(loc=0.0, scale=scale, size=(input_size, output_size))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(input_size, output_size))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(input_size, output_size))
else:
weights = np.random.normal(loc=0.0, scale=1.0, size=(input_size, output_size))
return weights
#######################################
def init_filters(size, init, std=None):
fh, fw, fin, fout = size
if init == 'zero':
weights = np.zeros(shape=(fh, fw, fin, fout))
elif init == 'sqrt_fan_in':
assert (False)
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (fh*fw*fin + fh*fw*fout))
weights = np.random.uniform(low=-limit, high=limit, size=(fh, fw, fin, fout))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (fh*fw*fin + fh*fw*fout))
weights = np.random.normal(loc=0.0, scale=scale, size=(fh, fw, fin, fout))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(fh, fw, fin, fout))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(fh, fw, fin, fout))
else:
assert (False)
return weights
#######################################
def init_local_filters(size, init, std=None):
h, w, fh, fw, fin, fout = size
if init == 'zero':
weights = np.zeros(shape=(h, w, fh*fw*fin, fout))
elif init == 'sqrt_fan_in':
assert (False)
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (fh*fw*fin + fh*fw*fout))
weights = np.random.uniform(low=-limit, high=limit, size=(h, w, fh*fw*fin, fout))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (fh*fw*fin + fh*fw*fout))
weights = np.random.normal(loc=0.0, scale=scale, size=(h, w, fh*fw*fin, fout))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(h, w, fh*fw*fin, fout))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(h, w, fh*fw*fin, fout))
else:
assert (False)
return weights
#######################################
| [
"[email protected]"
] | |
d18d566858011ece3ccc89dce925a0a7d1b87b83 | 4f394592f56b38e3fb1b971b2a10391ca86f4533 | /scripts/Migrate.py | a7f3ffd1af3a6292283a6c710d2c4480f3316f22 | [] | no_license | JKOK005/analytics-ilmuone | aa605868af3d5d9c5dc2b412a88093406ad7a806 | 96c541d68246a38ac7ee1678b6b162fadb727141 | refs/heads/master | 2021-06-25T19:42:33.547603 | 2017-08-18T08:09:23 | 2017-08-18T08:09:23 | 100,269,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | import os
from Excel import Excel
from ParseExcel import ExcelParser
from GenEnvironDb import DbConnector
if __name__ == "__main__":
parser = ExcelParser(os.path.join("..", "data"))
with DbConnector("environ", "ilumone", "ilumone") as db:
# Migrate countries data
try:
excel = Excel().setFileName("environ.xlsx").setSheetName("Metadata - Countries").SkipRows(0)
res = parser.read(excel)
db.fillCountriesChunk(res['Country Code'], res['Country Name'], res['Region'], res['IncomeGroup'], res['SpecialNotes'], 100)
except Exception as e:
pass
# Migrate indicators data
try:
excel = Excel().setFileName("environ.xlsx").setSheetName("Metadata - Indicators").SkipRows(0)
res = parser.read(excel)
db.fillIndicatorsChunk(res['INDICATOR_CODE'], res['INDICATOR_NAME'], res['SOURCE_NOTE'], 100)
except Exception as e:
pass
# Migrate historical data
excel = Excel().setFileName("environ.xlsx").setSheetName("Data").SkipRows(3)
res = parser.read(excel)
db.fillHDChunk(res['Country Code'], res['Indicator Code'], res.ix[:, '1960':'2015'], 100)
| [
"[email protected]"
] | |
9d077b206add1d0b332089d9e49a0b4944b8dd09 | e05f8d36c70336a8714cc260c02fe85ecee2e62e | /subject/api/glare/versions.py | fe4ada2edb37be074931d0afc9cbe7347e0b0dee | [
"Apache-2.0"
] | permissive | laoyigrace/subject | eafa442b5d9ebf83c78a01ce3bb5d088d08d620d | e6ed989fdc250917a19788112b22322b73b3550f | refs/heads/master | 2021-01-11T00:06:54.790751 | 2016-10-24T02:13:32 | 2016-10-24T02:13:32 | 70,754,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import http_client
import webob.dec
from subject.common import wsgi
from subject import i18n
_ = i18n._
versions_opts = [
# Note: Since both subject-api and glare-api have the same name for the
# option public_endpoint, oslo.config generator throws a DuplicateError
# exception during the conf file generation incase of differing help
# texts. Hence we have to have identical help texts for subject-api and
# glare-api's public_endpoint if not for changing the conf opt name.
cfg.StrOpt('public_endpoint',
help=_("""
Public url endpoint to use for Glance/Glare versions response.
This is the public url endpoint that will appear in the Glance/Glare
"versions" response. If no value is specified, the endpoint that is
displayed in the version's response is that of the host running the
API service. Change the endpoint to represent the proxy URL if the
API service is running behind a proxy. If the service is running
behind a load balancer, add the load balancer's URL for this value.
Possible values:
* None
* Proxy URL
* Load balancer URL
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(versions_opts)
class Controller(object):
"""A wsgi controller that reports which API versions are supported."""
def index(self, req, explicit=False):
"""Respond to a request for all OpenStack API versions."""
def build_version_object(version, path, status):
url = CONF.public_endpoint or req.host_url
return {
'id': 'v%s' % version,
'status': status,
'links': [
{
'rel': 'self',
'href': '%s/%s/' % (url, path),
},
],
}
version_objs = [build_version_object(0.1, 'v0.1', 'EXPERIMENTAL')]
status = explicit and http_client.OK or http_client.MULTIPLE_CHOICES
response = webob.Response(request=req,
status=status,
content_type='application/json')
response.body = jsonutils.dump_as_bytes(dict(versions=version_objs))
return response
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.index(req)
def create_resource(conf):
return wsgi.Resource(Controller())
| [
"[email protected]"
] | |
7cb340dcc964a7393ee90ccb58547d832899668f | c9eddcc8ae129d52ac08e59039a349827d78ac39 | /doc/examples/icatexport.py | fe77037fe58aaba97f51505fed77bc2ca9f69d5a | [
"Apache-2.0"
] | permissive | ahmad-tarbeya/python-icat | 48335cedc256992e811f903616a593aadb45f194 | a54f76551c74eacddffe19f8f217e81cf221d551 | refs/heads/master | 2021-01-20T06:43:09.083208 | 2017-04-25T13:42:46 | 2017-04-25T13:43:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | #! /usr/bin/python
#
# Export the content of the ICAT to a file or to stdout.
#
# Use the export feature from ICAT server: make the appropriate call
# to the ICAT RESTful interface to get the ICAT content and store the
# result to a file. Try to keep the command line interface as close
# as possible to the one from icatdump.py.
#
import sys
import os
import json
import re
import logging
import requests
import icat
import icat.config
from icat.exception import translateError
logging.basicConfig(level=logging.INFO)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
config = icat.config.Config()
config.add_variable('resturl', ("--resturl",),
dict(help="URL to the ICAT RESTful interface"),
default=True)
config.add_variable('file', ("-o", "--outputfile"),
dict(help="output file name or '-' for stdout"),
default='-')
# The format argument makes in fact little sense, as there is no
# choice. It's here for compatiblity with the command line interface
# of icatdump.py only.
config.add_variable('format', ("-f", "--format"),
dict(help="output file format", choices=["ICAT"]),
default='ICAT')
# Additional arguments that icatdump.py does not provide:
config.add_variable('query', ("--query",),
dict(help="query string to select the content"),
optional=True)
config.add_variable('attributes', ("--attributes",),
dict(help="attributes to include in the output",
choices=["ALL", "USER"]),
default='USER')
conf = config.getconfig()
client = icat.Client(conf.url, **conf.client_kwargs)
if client.apiversion < '4.3.99':
raise RuntimeError("Sorry, ICAT version %s is too old, need 4.4.0 or newer."
% client.apiversion)
client.login(conf.auth, conf.credentials)
if conf.resturl is True:
# As a default, derive the RESTful URL from the URL of the SOAP service.
conf.resturl = re.sub(r'(?<=/)ICATService/.*', 'icat', conf.url)
if not conf.resturl.endswith("/"):
conf.resturl += "/"
args = {"sessionId": client.sessionId, "attributes":conf.attributes}
if conf.query:
args['query'] = conf.query
parameters = {"json":json.dumps(args)}
request = requests.get(conf.resturl + "port", params=parameters,
stream=True, verify=conf.checkCert)
if request.status_code == requests.codes.ok:
if conf.file == "-":
# Need to reopen stdout in binary mode.
with os.fdopen(os.dup(sys.stdout.fileno()), 'wb') as f:
for chunk in request.iter_content(8192):
f.write(chunk)
else:
with open(conf.file, 'wb') as f:
for chunk in request.iter_content(8192):
f.write(chunk)
else:
try:
raise translateError(request.json(), status=request.status_code)
except (ValueError, TypeError):
request.raise_for_status()
| [
"[email protected]"
] | |
fc55aa9acf06aba5562b4ef3ac124136d7ad9207 | 8c07b4bbffac461f3dbdca5102736dded30b073a | /Problem Solving/Algorithms/Implementation/66_matrix_layer_rotation.py | 1987ae2895a6b57663a705210ed810d0ea9a92e4 | [] | no_license | yang4978/Hackerrank_for_Python | c3c36c4d68daadbf694a387abb6f32b7172604a1 | 86db5b6b59d090fccccbe51389c5282217549569 | refs/heads/master | 2020-05-09T20:06:45.181158 | 2019-08-11T15:15:51 | 2019-08-11T15:15:51 | 181,394,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | #https://www.hackerrank.com/challenges/matrix-rotation-algo/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the matrixRotation function below.
def matrixRotation(matrix, r):
#dict_matrix = {}
rows = len(matrix)
columns = len(matrix[0])
for layer in range(min(rows,columns)//2):
layer_r = r%(rows*2+columns*2-4-8*layer)
while(layer_r):
temp = matrix[layer][columns-layer-1]
for j in range(columns-layer-2,layer-1,-1):
temp,matrix[layer][j] = matrix[layer][j],temp
for i in range(layer+1,rows-layer):
temp,matrix[i][layer] = matrix[i][layer],temp
for j in range(layer+1,columns-layer):
temp,matrix[rows-1-layer][j] = matrix[rows-1-layer][j],temp
for i in range(rows-layer-2,layer-1,-1):
temp,matrix[i][columns-1-layer] = matrix[i][columns-1-layer],temp
#matrix[layer][columns-layer-1] = temp
# if(i>layer):
# dict_matrix[i,layer] = matrix[i][layer]
# if(i<rows-layer-1):
# dict_matrix[i,columns-1-layer] = matrix[i][columns-1-layer]
# for j in range(layer,columns-layer):
# if(j<columns-layer-1):
# dict_matrix[layer,j] = matrix[layer][j]
# if(j>layer):
# dict_matrix[rows-1-layer,j] = matrix[rows-1-layer][j]
# for i in range(layer,rows-layer):
# if(i>layer):
# matrix[i][layer] = dict_matrix[i-1,layer]
# if(i<rows-layer-1):
# matrix[i][columns-1-layer] = dict_matrix[i+1,columns-1-layer]
# for j in range(layer,columns-layer):
# if(j<columns-layer-1):
# matrix[layer][j] = dict_matrix[layer,j+1]
# if(j>layer):
# matrix[rows-1-layer][j] = dict_matrix[rows-1-layer,j-1]
layer_r -= 1
for i in matrix:
print(*i)
if __name__ == '__main__':
mnr = input().rstrip().split()
m = int(mnr[0])
n = int(mnr[1])
r = int(mnr[2])
matrix = []
for _ in range(m):
matrix.append(list(map(int, input().rstrip().split())))
matrixRotation(matrix, r)
| [
"[email protected]"
] | |
9e608dacc9d3ebb72707e372da7d75d249da71d7 | 6ac0bba8c1851e71529269c0d9d89a7c8fa507f2 | /Hard/757.py | 918d72e4e0133cbd93a4a645050b10ebca46e45d | [] | no_license | Hellofafar/Leetcode | e81dc85689cd6f9e6e9756beba070cb11e7b192e | 7a459e9742958e63be8886874904e5ab2489411a | refs/heads/master | 2021-05-16T07:07:19.823953 | 2020-02-17T03:00:09 | 2020-02-17T03:00:09 | 103,690,780 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | # ------------------------------
# 757. Set Intersection Size At Least Two
#
# Description:
# An integer interval [a, b] (for integers a < b) is a set of all consecutive integers from a to
# b, including a and b.
#
# Find the minimum size of a set S such that for every integer interval A in intervals, the
# intersection of S with A has size at least 2.
#
# Example 1:
# Input: intervals = [[1, 3], [1, 4], [2, 5], [3, 5]]
# Output: 3
# Explanation:
# Consider the set S = {2, 3, 4}. For each interval, there are at least 2 elements from S in the interval.
# Also, there isn't a smaller size set that fulfills the above condition.
# Thus, we output the size of this set, which is 3.
#
# Example 2:
# Input: intervals = [[1, 2], [2, 3], [2, 4], [4, 5]]
# Output: 5
# Explanation:
# An example of a minimum sized set is {1, 2, 3, 4, 5}.
#
# Version: 1.0
# 01/22/18 by Jianfa
# ------------------------------
class Solution(object):
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key = lambda (s, e): (s, -e))
cover = [2 for x in range(len(intervals))]
res = 0
while intervals:
last = intervals.pop()
step = cover.pop()
for n in range(last[0], last[0] + step):
for idx, pair in enumerate(intervals):
if cover[idx] and n <= pair[1]:
cover[idx] -= 1
res += 1
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Follow the idea in Solution section.
# The most awesome part is in the cover list. It can be used to record how many numbers in an
# interval need to be added to set S, in order to make S meets the conditions. | [
"[email protected]"
] | |
14a0100b2dbcbb01a41c4f7c91fe949ea56cfe56 | 00af09f4ac6f98203910d86c3791c152184ace9a | /Lib/lib2to3/tests/data/fixers/myfixes/fix_last.py | 593b3de7a7c597e50552cc3a14520a059d1b5f8b | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | <<<<<<< HEAD
<<<<<<< HEAD
from lib2to3.fixer_base import BaseFix
class FixLast(BaseFix):
run_order = 10
def match(self, node): return False
=======
from lib2to3.fixer_base import BaseFix
class FixLast(BaseFix):
run_order = 10
def match(self, node): return False
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from lib2to3.fixer_base import BaseFix
class FixLast(BaseFix):
run_order = 10
def match(self, node): return False
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| [
"[email protected]"
] | |
8494b13a4ef36e2d314b32d5727effc37baba5a6 | 60d2c390736f5dce1cd0c9d4249a0ab95bdae802 | /worker/domainiq/setup.py | 5bd72d37039fcdf8af6567236e686aac55c84727 | [
"Apache-2.0"
] | permissive | tsmolka/stoq-plugins-public | d996b0be051ce0bac453af7380e7cbfecc03ff93 | a8d3351fe55fc72891c395d6767188746bf381cf | refs/heads/master | 2020-12-28T22:22:15.077514 | 2016-07-13T17:57:43 | 2016-07-13T17:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from setuptools import setup, find_packages
setup(
name="domainiq",
version="0.1",
author="Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Interact with DomainIQ API",
packages=find_packages(),
include_package_data=True,
)
| [
"[email protected]"
] | |
99ee5ca0b9aa7e4965915ebe6fce53773a76ff5f | ddd35c693194aefb9c009fe6b88c52de7fa7c444 | /Live 10.1.18/VCM600/TrackEQComponent.py | 30a2e48aa972041bdf55f9e3dcde94e17de86be0 | [] | no_license | notelba/midi-remote-scripts | 819372d9c22573877c7912091bd8359fdd42585d | e3ec6846470eed7da8a4d4f78562ed49dc00727b | refs/heads/main | 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,076 | py | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\VCM600\TrackEQComponent.py
# Compiled at: 2020-07-14 15:33:46
from __future__ import absolute_import, print_function, unicode_literals
import Live
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.EncoderElement import EncoderElement
from _Generic.Devices import get_parameter_by_name
EQ_DEVICES = {b'Eq8': {b'Gains': [ b'%i Gain A' % (index + 1) for index in range(8) ]}, b'FilterEQ3': {b'Gains': [
b'GainLo', b'GainMid', b'GainHi'],
b'Cuts': [
b'LowOn', b'MidOn', b'HighOn']}}
class TrackEQComponent(ControlSurfaceComponent):
""" Class representing a track's EQ, it attaches to the last EQ device in the track """
def __init__(self):
ControlSurfaceComponent.__init__(self)
self._track = None
self._device = None
self._gain_controls = None
self._cut_buttons = None
return
def disconnect(self):
if self._gain_controls != None:
for control in self._gain_controls:
control.release_parameter()
self._gain_controls = None
if self._cut_buttons != None:
for button in self._cut_buttons:
button.remove_value_listener(self._cut_value)
self._cut_buttons = None
if self._track != None:
self._track.remove_devices_listener(self._on_devices_changed)
self._track = None
self._device = None
if self._device != None:
device_dict = EQ_DEVICES[self._device.class_name]
if b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
for cut_name in cut_names:
parameter = get_parameter_by_name(self._device, cut_name)
if parameter != None and parameter.value_has_listener(self._on_cut_changed):
parameter.remove_value_listener(self._on_cut_changed)
return
def on_enabled_changed(self):
self.update()
def set_track(self, track):
assert track == None or isinstance(track, Live.Track.Track)
if self._track != None:
self._track.remove_devices_listener(self._on_devices_changed)
if self._gain_controls != None and self._device != None:
for control in self._gain_controls:
control.release_parameter()
self._track = track
if self._track != None:
self._track.add_devices_listener(self._on_devices_changed)
self._on_devices_changed()
return
def set_cut_buttons(self, buttons):
assert buttons == None or isinstance(buttons, tuple)
if buttons != self._cut_buttons:
if self._cut_buttons != None:
for button in self._cut_buttons:
button.remove_value_listener(self._cut_value)
self._cut_buttons = buttons
if self._cut_buttons != None:
for button in self._cut_buttons:
button.add_value_listener(self._cut_value, identify_sender=True)
self.update()
return
def set_gain_controls(self, controls):
assert controls != None
assert isinstance(controls, tuple)
if self._device != None and self._gain_controls != None:
for control in self._gain_controls:
control.release_parameter()
for control in controls:
assert control != None
assert isinstance(control, EncoderElement)
self._gain_controls = controls
self.update()
return
def update(self):
super(TrackEQComponent, self).update()
if self.is_enabled() and self._device != None:
device_dict = EQ_DEVICES[self._device.class_name]
if self._gain_controls != None:
gain_names = device_dict[b'Gains']
for index in range(len(self._gain_controls)):
self._gain_controls[index].release_parameter()
if len(gain_names) > index:
parameter = get_parameter_by_name(self._device, gain_names[index])
if parameter != None:
self._gain_controls[index].connect_to(parameter)
if self._cut_buttons != None and b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
for index in range(len(self._cut_buttons)):
self._cut_buttons[index].turn_off()
if len(cut_names) > index:
parameter = get_parameter_by_name(self._device, cut_names[index])
if parameter != None:
if parameter.value == 0.0:
self._cut_buttons[index].turn_on()
if not parameter.value_has_listener(self._on_cut_changed):
parameter.add_value_listener(self._on_cut_changed)
else:
if self._cut_buttons != None:
for button in self._cut_buttons:
if button != None:
button.turn_off()
if self._gain_controls != None:
for control in self._gain_controls:
control.release_parameter()
return
def _cut_value(self, value, sender):
if not sender in self._cut_buttons:
raise AssertionError
assert value in range(128)
if self.is_enabled() and self._device != None and (not sender.is_momentary() or value is not 0):
device_dict = EQ_DEVICES[self._device.class_name]
if b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
index = list(self._cut_buttons).index(sender)
if index in range(len(cut_names)):
parameter = get_parameter_by_name(self._device, cut_names[index])
if parameter != None and parameter.is_enabled:
parameter.value = float(int(parameter.value + 1) % 2)
return
def _on_devices_changed(self):
if self._device != None:
device_dict = EQ_DEVICES[self._device.class_name]
if b'Cuts' in device_dict.keys():
cut_names = device_dict[b'Cuts']
for cut_name in cut_names:
parameter = get_parameter_by_name(self._device, cut_name)
if parameter != None and parameter.value_has_listener(self._on_cut_changed):
parameter.remove_value_listener(self._on_cut_changed)
self._device = None
if self._track != None:
for index in range(len(self._track.devices)):
device = self._track.devices[(-1 * (index + 1))]
if device.class_name in EQ_DEVICES.keys():
self._device = device
break
self.update()
return
def _on_cut_changed(self):
assert self._device != None
assert b'Cuts' in EQ_DEVICES[self._device.class_name].keys()
if self.is_enabled() and self._cut_buttons != None:
cut_names = EQ_DEVICES[self._device.class_name][b'Cuts']
for index in range(len(self._cut_buttons)):
self._cut_buttons[index].turn_off()
if len(cut_names) > index:
parameter = get_parameter_by_name(self._device, cut_names[index])
if parameter != None and parameter.value == 0.0:
self._cut_buttons[index].turn_on()
return
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/VCM600/TrackEQComponent.pyc
| [
"[email protected]"
] | |
af36d815d08d1bd4a25f5fd2f1d12947a91bafcb | f2a2f41641eb56a17009294ff100dc9b39cb774b | /current_session/python/423.py | 146f5f4023ad3adead1328140cd197f0a7827274 | [] | no_license | YJL33/LeetCode | 0e837a419d11d44239d1a692140a1468f6a7d9bf | b4da922c4e8406c486760639b71e3ec50283ca43 | refs/heads/master | 2022-08-13T01:46:14.976758 | 2022-07-24T03:59:52 | 2022-07-24T04:11:32 | 52,939,733 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,037 | py | import collections
class Solution:
def originalDigits(self, s: str) -> str:
# clarification:
# time/space restrictions?
# is the given string valid?
# upper/lower bound of s?
# idea:
# by observation, some words has unique characters, leverage that
# words: ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
# even = {z:0 w:2 u:4 x:6 g:8}
# odd = {o:1 r:3 f:5 s:7}
# 3-pass: handle even numbers -> odd numbers -> 9
# in each pass, prepare dicts (of even/odd) of unique char -> digit (as above, key: unique character, value: even/odd digit)
# count the unique char occurances, we'll know the digit occurances
# update the counter (by removal of seen/handled digits), and then output the numbers
# time analysis:
# O(1) for preparation
# O(N) to count all character occurances
# O(1) to output as ascending order
# for handling:
# O(1) to make query on dict
# O(1) to 'correct' the counter (9 times), where L is the avg length of words, in this case ~4
# overall: O(N)
# space analysis:
# O(N) for counter
# dummy case walk through
# preparation
word = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
counter = collections.Counter(s)
self.res = [0 for _ in range(10)]
def handle(numdict):
for k,v in numdict.items():
self.res[k] += counter[v]
for c in word[k]:
counter[c] -= self.res[k]
return
even = {0:'z',2:'w',4:'u',6:'x',8:'g'}
handle(even) # 1st pass: handle even numbers
odd = {1:'o',3:'r',5:'f',7:'s'}
handle(odd) # 2nd pass: handle odd numbers
self.res[9] += counter['i'] # 3rd pass
return "".join([str(i)*self.res[i] for i in range(10)])
print(Solution().originalDigits('owoztneoer'))
print(Solution().originalDigits('fivefour'))
print(Solution().originalDigits("zeroonetwothreefourfivesixseveneightnine"))
print(Solution().originalDigits("ertfsxvxttiorseenivsoiwosefeinoinwoioousiieihtfirrnioeenwiortwsefwnnniseoisontieiitnvsovthrenwfitfenoiwouwtrtnxneisieinovhuieenrveenoiuoireooieiuittefotnoeeftfsetooeniuvrsnvetwieetvoneeoiieeiiirhftnrntihenseveeorioriononnuhweixefoiotowxreossentetresvzoerfeonvhfoestveooxesintstvefewrifsneeexveefoxonvueituwoieefvofwrfistixeesooxoeweteortfevtsuffohnfruiwitnuxexrevrhfenwenofnzfvsiofeneesoshoefxoefieiieeoueofoivshvtrsoneenenwiexnifttfeoooetesnouewtneisfeenwiiwnvuntnwuesniffweeoretviftseeexeoetzntefnfoxesfforvrwrrhrewtegwsnvinotonitvxeerwooeefswxxtfixenwfxioevierefevinviwvxonfitouevwxnshiewuntwunniennvunvfnnxinovfsvohrresenffintfissstnrfwinsegrrnesentoitoieneiiinuenrtssgosetoufnniuesneseonesetnstiofieeosvoowooeueietrheiienstifweeueitioretiwefwvonsernsxrtteeiisofttwnorovenoisnrsoxseoeeoeeovnoweeowhfirovesxeutisnnviieneetvuxnreoenxinfnrvxntooeonroewxeoirxenfeeehietriuoofxiexwntotointhwvtnfwtnrieeofifinevsfxioninsvntenrhwiwovfenwfrffiofeiehuriieeeswooeofienuoeifoffrfetewoofeusoeiireootseisouvoofntexsnorovrersoovnoovtvvreineerrieeooxtoeseeowixtfeoettiornofunintonsrenoooosxifsrviuitonnetosfwnewnetoznvfhueovwrnnooexnnfeieninetihvwnrexiinvnieevexnwnvtfewexftixiernoonfeinffisofoefevviownoeionforfevufoefreisetvovixxoehuiniivvirewsrsoxoiohtoovvveioertfhveoitoxoiexniieneteiwerngfiesiiifwehsifrteieiwoueenwssnrtxnoevefrtxtoesnferxveeoeifoxuivononeeentsohofwexinrweuovoutoenhnnxenveoihfstturoeeeitrwiirisiinuhrosooiinffnoriweixeiuuenxtoouwosnnshiixnnefgereothioiinrfnneioeenitofieerwtreniortiweoinniewinxeerxtneeneoofutiiirrnsvoefoetssxonfxenowirutohnxerisweewuntnwfwounnentnohevnooxwwiifsoooevtviwsfeounfeitfxostgfgtnnfsounireiiingofxwesfroeofsxefirstrfnerfonitorneneiuifvhnninfvwsiffrffowxetvnfsrwweruiirfoooeeneetuioveoiioowxwsihvowfsrefifvetnwnzfoesnnevfoitrioniehtovwhreonensnhixsifnioeuushswtnnnfnofvneeetooeenheinurowzvftsoofwwgewsunnioffwersrtttohsrfxsoooitvnnsgruieuvteznwrueiisrineoewnxooiwnrreeofnxtuieesrnvtwotfiiewtoteriifwoofieriesoteofterverrroievoitnnnoweineehiowwvsweruooioffovoonrxvsresitrsnfinnostheeffosieeewntewotwfehonvfonrwrennwxeouontwxroitefnveneessrneeiiioeufeevofnxiifxwhefevxxtiosieonoexfrnvtfntnonereioiiferoutwtweouxoeneiirwotenrexvoouiooztnnvreotweeetveefwzeiueseiittitivsenrfseeisieosinnsreifeewnffiiffisfoiwfirtevewwuforiroteiefetiexwioenshneiiooesirhnonnnoriieoioruevroesntoffnuvievtinierfroxveriutofiovweivnifuwrwniteneefoeirxtnfiovogvfoinnrnnitviwiointtwneitnhetihuetnogwvfvevtxwefooiihevxnvxefftnnihfrrieoeortsseeoetnoioeiisowixnfowioseffexvoiwtteouieoexrfeeefeioxotniintoxwtfnofvxoenneoxowwoviionefnnssoifeoxrssovteeietoiirfeeretnerhoegoiesfvenwnnovzonifetorititohrtoixsinvieivfeivueufnisfoeinetioeoeonwiustxnstenwisnownornsievevtuwnontsntuonseeeneeeeheorivoeigoxtnwessuvfviniuuivoeeoetwvnxixxrnuvfweosofseohivuvvofivowiveinwvouueoeettwoxswnervtnovtfxionosweuisooiwnewwxnruowootisinrveowieoonewroefnsietfeentxoonoxvezroeutvefnueioihhxeoeeuxxifotwneeihewuefwexnftsnonofvfveoufniegionswreotrwfeowfeexxeofxtnuevieffoeeeeivteeeseofeniorevrftenrtnrnotfworsvifuesfvofxweoffvienitnonfeiotwnofnruswfonwottoenxorntfnxeoeeveotnouwetnfteuifnoxnvnnffssxfnuwiefveowexwiioffeewuwnewroiegnwzsniiinoixxeiitstsiztoeoefeesnvfniehotxouwtsrieernouwueuovfwreernovniwofrsvenioevnwoeotnuenfxientsooneexwioueioeeneouoeouteeweiitrreuioheonteuweroewiuiooftooutfeueesfenuwvniueniniforotviewinwxnhnteeftotfnoxiruonutofvrevvsutvrtnexeovoxffoxnfeiiirevesrooueeonnwnxetfofseoioenwnoxeeiwiofxnfteottufnfowseteeeeofwnxreevnvovuurroeefefenuwtoriofxwuiestonhreeuieeoonirefieeostieeeeossnveueerosinweefrsihrnveuiwnesoeetwvietoretzxonnuseixhsrtgnofnietseoenwvonnoieftvoefotwrooetxetueinruizotwuooiooveonoiftuevvenhroerenrfhfsfifwreoewwrtsesoewtvtfueiszviewieourioeinurnsrethertivnsenswenffhnrsvosuftffetwfoenneseffrenfnewsneneeisfwfoettfsstnnweteffotnfitsriotoswsuiioeenternsvfxivtiiwtnfosrwwretsfitsoioohfveosfovtifneeieuivtnexfuoeeiewirioruzonrenvitrsovoovvterswfweiiioorewhsefrieinerenexsirivifrxrsnsexoosveexvotoiofitiexfoioiveteitroizeisiehoeeoniuiewooeisuifxtotitehoswvtfntiiwseouxnhvfnteiwvrooioisnhesowreosiswenrnseeiweisieiewfsxfoxtreeinxxixueuunhwoetosxnvrfeeewhtxeuufiowifefnnneesonfxitveeeooiivetnovtiwtneoortoioeuroosfxfewnxeeinnetnnrfwrfhvnieniwtivxteussoieeotetuooxnuseioiwxsorroeuenrentnsnvosrufioeonteofiniiexfuisseooeiownsnvutwsrfeeieeseiiwvfxrivnfhhteftuvfuexifweentfoovsooveruutoinsofeswuxofiowififnsennsrixveovowfeeueevtinvifrooftirofwvuoutnuxwifweteoiosieeusesoiirneuiusnnxiiwxontverxferuefxsnnorrenrzeruxxnsvooetfxiiseirrneetnnniioovnxieesweewfieeuftzoovnoifofwheexofxsurweesinweooxnnoeeownieiisoowteiofonewxwreoeevrswretotvnewetvxtutufeoeoieeuhnoiieuiheweiteeniftxerntsenoxrittievosineuuxesrwfeneeineruxiosiveeeenftenvinivveroxhtetfwnttgftvninhnonesteoexnoooriowoisenoofowwrtwowffttfrrtervfvufeotffhnxfeueftwwfotrifwenfrfetennisftefeiewvnniiowveooiirorwivvxftieoweeufotorsnrorveiftsirvofoontixfnooteoneeiorneosurnhnxfioiiteoieineeerwftxviiwnteotefesutreeenenvutxieiohvhzrironixovonrsiiteefeeeiiotwfrnhutouitfreiresefeofofiniifnennowoifuevteoioiixtnweeveroeeunetoeeeteesewewfxunioeufoefroreofwnrznvifewoeeouxxetoirnoinnsttwiiigrnoufrerhovfiveefreteoswsfvsrseietneewtwsionisxotortnsvfsvfriioisnfuuvifiwssfofnxienxftisxeheoffesveioiitnseowfoeniofeetfesninfvusiootetixfwewwvvxniurefwioheriiooiwfvsoveifenetroeosonhfvfwooeononfghuonesefoofnfstxeevoenieeuofuvonniueoooinizxfieexutwxvnosroorfovtoruoetenrsfexnweiinxotfnvfieiiinixitvwwweoevnhveftrrorseeeetouefiuxoewtxnuotvoieesivefrvivfweeihwwxiexnwotffnxiwerxtwnwiixufeoeenrieeoeouseftifxensioiiegornezonxenuxwotvrsiivsowffnwoinhfooxriuninioownftoewvsoeteuefxorexfeevnfinnineitwtiietiufitntoowiutthrtoiefsewsuonvveetefoeixiexovriiitnfrfeuntwwnsgiuworuosvenoniswitrernnerfvoftihonhoxwfosevirfnwftseghxrsitiirvfvvefhvvufiooerowtunewthfvfeoeoiwouvefffiftietvnsoinforoegsrsexiowefoeoufeofiinoiioifgvoiteieorenevieteeitwuuienoioxvriowoewioenrrxooseexiuzeewoeirwotozetvoreortxefuvewofeftnueisoneeivroniewuwniveheoohsinonnfnvofovxetveiifniuiwvwfieoseseisveitsghtteeeiottsivovonveinvwtteofifevwnexieevfvvefhniwiueurieefeifoevnnexwivieewnoeerinewoeefofeoohnteneeenofoewvifvixnoetvtitoiwovsifxeeueoexeeiwioeeeonxnotriveionseioxvewnfvxfsrnstwwvivvehvnesevereivfftonrvoviosethueevhientinxvefiintvisoiefeiteexfsuonotieniievereeinexrvstnoosnrwhexoffrweuehonuetoutrsxisthrrfwixueeuurvnseiefeifrtenivtonovrontesnrouenweiisreetunwonvethifewesegiifwnevfotihwinenffnxtnirgownxhifiiwfeewfienrviixnitoufeoiiwioueovueungvoxeienuffifroxfifninoxetteetioxwoxexwfevswtvonrtrsentierusuuitiotevnonootriioivoonwvxiewivwotetuevnefentxfivwxxrwivrwevuheueeuohsfesxnivonwnsonriteirwehfehtrhunioofverwssuvstssenxifneriienneretevowererfnifnnriewfwsiriffseuvvvforisxvosntetrixofffewiiuurnerninurfhsfvxonforeexoeirxtneeifvootxnioewfettfeiseninwuxtutsnrfeonewioioiufutvviweuoennowfvtieiuuunnitttxoinitnwtoiiiifxnftenoioterfofofnhofxitvvisvuinnuveeefrowhioouioisxoenevthuinfeifixesrherienftfsiuvereuooetofsiofeuhxvnhsrirtvuwxtrnisstxtnoioewixfieootovefintegtieioeftifrnnextgeeirnfhoiznesxefrvvinnosetoiftennoonnxenwwhtiuwshoxirfefeeeeneoivoifnfvwsneeotnueuusohxwvnnweosinefefeoeoeeoovnwifeunfwfooosoostiroertosfvtetxixffertxvnrnxetfnunehwsxtirnueviowuffrureufuieiiefweneritrufxxnronresussnueiwetxxiofxntvrtxinfuuesroivnntiieosoffiroohtetoroeotfirfvnhoffieuonexewsxnssrvottexiifweohfeeevtfewonotnhixeiintetzrtweeeevenwunoonfefnontnsvviunxvritfohtenuuvoioouroeieovnesoeotnsevffvueuonwneeeoutioeieeoeeitwieuiteoesesheinvouffrerinnnoonesiwreoietfxeirforotninfonnvttfnvooenessnsuhnonnoxnterifwxefrfiruwvseshrwressufifoeixtentnsneuninivnioeonwxsoofnvrenwsoifrivvewnosreweewreveetfiinhtifeoxuoienosovoffexoinwnvieeoteneosuoeeisfnoeotooeveeiitfeonifesnineeeriiiwffeovuoofrwwevioisftstwveowetwsrusefhriexnfnwewsevenneinvxoiswefesetffonioxsowwveefsneeeooerxnefxiovsvoesewiifvxeeoxxxuteofoeosinifiestoroffosswwuinroixvoeiirzeesfsfvfreffifwveiroeiisieeeifwexfewhewxuwnofftihwovevstvisnnzorefnoteneiirirfffiuonuuuveoexiiotefoneevsfonsrifueoerfonneteoostieiiitfuuoeuufveuerorxtvnnfentreoofunfieoessxosfwotonuxsfwniounwuwiwioxoierfewioirvtseutooostnifreesofinefefnnoeneesioinfhoeuieoeweueoonstutentnffoesvfoneoeixrffuivrnooeiewieenfvieeiowrroiirrseoeieiiovneoiroetetooeexovositwneuvetiotxvtentursoethwiievreeixoiieieeerwieviuefefowvewoerxeivffntriouonveoinsvotfienneietfinutoovexennvinieofxoveixvoettvxeoewsfsiisxefuegsooiniieuffoveoewfnoxefoewuisgtnessnetftifvwfeftxfwtnenzvtnrinofeteinvxuftoueisofitixrweixvviounenvsgtfnnietnufohvrfxrfesrfoixeievxtseioeieeoesseetxeioiswnuseoofsefenfeitewottoiutofssreniextevsreiesfrvwnifhfueeuitfxoneivnrvhfiwxoiiinxoneiosvwuevverehexinsnoonsreuietnoeouiiwitwfvutovitsuienriifeotirxvnieenfoftooxovorvstetfoeontexfnnnhtrvniooohtewxooruiuhsfnerexvosnnotwvvoeeowvtiiiuiviisonroxffwooetevneeeuotntofwvietotsseeusuevnosutswoeirofeietvitvfonneuouuiseriixurooforvnneeefgvurivfgxfierfwwoveoootxnefefwssvvvisoohevfnowntuniseieewunofentxieoiiseonexnxofuefeniiixttwtesioifsenevtoontreevwwrveefeeenooerensooeetovivvvineiuisfofnfrwnvfvonsefvexwvtntifueveswwfxiennrnwfegiishniinesenuweieeofonsvnesvfrwnexweetooeteuxovnrowwsenoxunfwntonithwnenfifnsrnffisnwneeirhnuvfurroewoeeeoroteoxoxifwxerrfvrnefoixeeweooiohitnnnesrnisositsesfuuisvovfretgxivvofeuefvniisveenuoreenwswtwnvewuevwurfevfennoveoixeuuuifnfronverieivsntthrorhoroxootzoonoriofisiueenoievseveffoffunvfftiwofnnessfevuerevuinenffntsooftnoieirnoioshtnnoionoewritoinfseheoweoitiesneiusrinvifnieouohexnrtsintxooninrseoexrfsniefvexoeirxorwtefoefexovewwoftetrtixfxeuwtfefsrnntoiteifiewxoioveeviftefoiirinivwnitswxifvuieonvworstofoeovoienxnwfwuesoonviuivuniwnwrrwrfiexunnerofnnxoovezvosneeeftoeowewooegoenwoevowfttniifoosneweixsoenttrtutftsreenioerroovewenfetovrrfeefwitseoeevhfifsiioeerrftxowexfrrseevtheisefonofofneifutfeseenwrsswoovetfxofvroevxnnrtviowxvtssriwinfnhoonsfsifwewornuehfnrnvsnffiohoneoufnreiertfueenifofeoxtowiiivnioreouexfhnvsiosnnsifxfoeeooofeefnneotionhieinvhhniefxtsinxeneerinterrnfnvniionsviniroheositifuisiienvisntenininxeoiifnrruineowsiveewtieieurvhuteioeinnoovreeeiexiiffieufetiiossvtnosotesxifofitevusrforunrffniintfefnwiefxwiieoufooneiiusnoiwvohoivuiufuosiennnffnztiirnxetiiiieifotviiowfiveexnoivenfnitfoinrivtvtuiffstwxvfofwfiefxfnowroofveeeftoegrwsetfixtnefoouurionitonnrvsvwouirittoeefiooietifurrnfinwfvivewxuoxxftionrnrowinuuietnenriunwfiefoutnroxtewhotinvoritnvnoeerwnffveuiorixeoefefeiiniueiowxsosexortfreevtoxnfiwvnorzsiuoiiioftevhieerxeoisneneeutfgoitwftoeiervneessoreooviuusfioonsxtroviroxoenovrnowntienooiiuitroxortsfueinuwiwfofnoevveorvweixvefnfsnronhirrfrevonevsvviioieiiviireovffeeetiutevsnowoetsfevfnuxetxtnfssswxetftueirfefnrwixixxvtoieeninxofvooiveeooxteseeveeneenexeeoxtoieinoowuofoexveeiotosexosffrnexoernuvoiniohiivinoirwntstniriieiooeeeioietrxxeshooetienxeefneotifnnevriefnnsfouvvenfeoeotisienweneexufffffwntffutnnsuennxnotinrvooisfesweeoiownvxvwroeehrirotehoveorviwenxeuexhfrxwovsivonuvifiinfweeefoonoexhnefoxserxvoxovoeeettfoinevvfesoeonffiifieneuneheeiirueowsuvtefiovinoneowxeiveieoifieoieirnrenefnetevtoteowusrweiiioisrvrewtssgoeisxwfueewneeefiseexfntnfxtiirtiweservxtsxwnixvwniruotentgtxozouooruivfniihweuireizoenssevurfiniorueneuifoeonenoooiwxxewtfeewoxifiiosoifeeunuinfxvinfuovfwiuvnxnoxsonxsrteiitnrvneewrnefsufeovorowenoouretrsgotinvveeesotoeieissnuisenffvnouovssoooitntifeiioseosrfnotnsunrxeveitviiihoivfirxroreisiwovwfsoowosuwrsssnvuoninssnxreoenetfuifnoeoozoiroxeowinieeuhvsoixostzftefwxxeuxiusowsuseoeesevefseieexnorevsniiitwoiitttixvirirorfwveovwnnttuivrivtvuovuvitnnrsrseefeewnnvoifieniusonvfosnxeivixrifttreeotefsvitoneenivisovsineototvfvfnrnivovieiixnuhteirootixousetvfvenenveoevhtoviensretiioinionifoswffnriifhixevifuennivsxeeisfnotvrofrtooerxwvheitiuoitrrnueeoereenfitsnneiugtffoeenoeswxiowfnvivfnefxwusuretvesngiuuoensruxhnvnfwietwitfixernvwfntuioiiwiftrsfoifntinotfufinwuhtefoooiitvuoivoneieowntesrfioitnwuwtoinitioeonefxufsreersonnixxenftvueoeesfiefuifxnnfverixriiorxstwstofttffonwnvnsnehfeefxtnsvootvftwixneieoiiueennofeontoefrwioefeosrwnifrtixotwerffniunxoeositeneffeevtewisisosvsenzeinovtesoxoerefssoennwiestoxfexinfftnheouxtihirexeuewoouixihwfeviffiiftietsseneweooviofonnifiohswrfoxoveesiovxxeviveoneeeoifinennvefvrfvwoxnufrfuewfruitnhsirinvffohweoeenxifxeirnoientfiuuwnitfnrfnuivvsevwneerievrtontunoneienexuwtnuuioiofttiunvxevieevierxoixosnwiofrffnsevnoiexoffsntfftnoewnsefnoieeeonxxxviuefshterenotioonfzuuristrioeniwhreevusfnnroooverneieifrvtitnnntiotuurofsfxfuerxesefrehexefniffofotnxiinexortizffwiosfusuoteusesesxvinewoeofnvseiseinnxeeofneeoeuirioiievirffiisefrftioewrutnivsoeefwexfxorhvfeensnnnxvgvsrvvxvxeeeewufuivfvsisfsveiirftxooefrfesfiifrxooerevooentrotisshnixeitftovxfveusnreietfeueieinwienuivirfwsfvneerfeioesissvvvsrtfonvnsfffiveeieonviihtgsoroewnwfohxexhnirsoowinexiiiiooieisohiunwnofennfvsswneoeeioionieoxwowitfofofriefxevnsrowieuistfnixtuxniwseiuwixotstiientfvoeorosntextonsexnsiitiffrovonionnoftevehewuirwtsfnuifiiiveoifrfoeexfsneeionnersrnioiueuoteonortiftthnoueivnrwisxuonieofervieohrfxhvnetsnnfxirernxesnotoenshofvfvoxevvouononhxroresiivreriesoeiueuffifiivtieihihurtweesfnsieffntnvvieineufnxnnoinneivseoisneifnononfteoofifsnuntwfooorrsifunoixueuenivifeofroswinhwrsrfiwswuoronnevexxvxonsvieeeteinsovtwoxieievoxfsteeeheweeensnshenvvftnoivwihnnxveeooxofierisnhnnuxeninnvoerxisersethhnztxouexutefxuxztrexrorintnivufxenwfsrxruxuxexxooneotvrsifwiuvwwnuxufefvtifsfeiitsiuxnwfietoeffernwiniteeeserswxooueonzvinefuowvneiexoestneeoigsoofwiovveoiviirwnnntoffovonieeeotwfnwvtevfevfinwworeehetfxiixtxnievoveifofnsvwooxvxerewnfeuuifiwinoouovifwvwtrfeoieiisnfvnnfsvoenenxvfftswivxowfiuherisnssrnseionsosxsfofevouneenifeefieuexfefnorrhoeetuoweveunwnevseoouevwrxnrwesnensxefeofeitfiewxveoniesiiosnwifnenesnsrnwnvfvowrxsxixfenriffoxroxrinoiweviotofiiufnwwfeotsnihexvvorinnewvoseixovnetisefrfohhveeeteoofosetteereotfwnneieeiivrfrtnihxfeeirrifuerfrusofsefesrooovritrrxxfwsevtviinfoseouofitnnewfoffowiseeitiottvegvnvvieseixoirofnerwfvoneeiiofroeronehovfrnweofworonxoexroenoeseoweoinfveeenegrsxenettxfoofieoinesftfoxnirszeoifeesoixtonntevixesevoitenfntnurvtnftiooeniueirnifftiweeienfoiixviinsuefzfwovnvwtvwfefuiefnoineeveeeiwvtoooevnisfienninoutvesfvneeisxeoeovoesfeheuriieuotugesvvninnvrusrteonifhszoofnioniefwxtioisneuzuxnfseveovvenewoevitfvnstniewtxwoefeeuoevvoiresewieivsitvixrfienooeuurwvitonretfenvowetohontnixteeufesniinseeufixxfowxxxvvvfvoeoeoertieieeeoeeftfvsoefioieirsfrseotiuexiiisefrotonoenownwufeerwheeonxwsisiienwiootiefnxtiefizuvivovereefonsenovfnofiweewoornevofieoxinnixietfuxreiinfxexevfoenvteosfrstrufvtvrvvievxiiffftoiefvhisofeonihwwftenvoieveeinftoweouefteinfeetnoffrwtiseinrnivexosunfeohotoevnoiooisegvseniivsniozieoueufroeiveoenesfsonnvnfteshnfxnionwuixooeioxiroivheiihoeeeouooixieetvvfsieeuhnegfeteeeofrifofevteeeosvoveuwietusvxtonrteiteehsnofsefetsionnfiioiiiexefiveuixiofnnsisviuxhsfxwteoftwureuninrniwexunsvivvivofeunfesxofonsxwefisxvnfnefsivoxiotsfeefiitnsefviftrtovoenifirvifoshfuxeetgreenoneoeefoxonfhofixfeiietnsstvsseenuoiweouisfftfwiiwoosreresnoneuriiunoffoftivvetiiineftznszsfisveoovinenssfeeesoseehfnfvnvnsoetvxsewerireenonwfnfftoveiesseeniztownietwtoniroosefextreifieososexxefnevivfninexeienhvehrsvnetewefowetoiinfiiuxoetfnnxirxnioesfnvieuifouwnvufiinniviwnfesnonnwwefieoxoewiuoinsreoiffsennxowovxieixiewnovfgisunfvfntrieoossonsietoeitifteovinfnveeeevoieeofeesserefoxrisfeoienitvievtifweiexitunirefurnvwnvotnfoononhxeeisehoovnieieesiwwuetufnnvefeeeoeinevefoioeitivovniriiinxosivfeftoiieofriofouoweuxiofifveeeenizeoxxveeneovwtonifeooiiirinixnifosfvioisfvthevxnnvessuzffntsiwiinftiiwoeexsrrneoxiveonxeoeeonifseesfxtsisoiiooiefoieotntenoswhfsxnssewseuftfeeifevsftvrxnosfweionxeonfoevofixesvsefoovoveenfveiwonofsetnefeenzofossronwftnefeetoetvnitrivsoinrnuwgiinssveiteseoosfnveteinieefnwttefveiewiiuenfusutfiniseiowfiiginietreusvenoewwofvwitfeefrxrtfoivioeninoooivsfetoinxfswtinoviroiixtefoextooriewoiesofesvvvesitixernofixnxteeeiehohtiivteexoosnooeninieuxeefvowenortofwefooieefexfrenisezenswivotsusfffivoneofvwninnirfiesouwrowiiftsvewwuottoenveeiriivevienfvnfnuwxenwtffstevhovitnioiirnonierivorhnfniewnnviiffixtefteenoiiosfeotfnxwrivenoinfnxnnieeffheeeweetvexnrofivxortuftfsnetxwwtfioisirinnniseeiehvofueishtwnoooftooewisesoirnusoixeinioxreseuenrsstrevvxsiwfvsvoenwveisuveeizusxftwvioiiveoerweusfehefouwvunwixieeixfwuovinvintvontvenvrueieoooxnufsxevevxrevnivnievrirnrevxewxforviitieoiiosnwfioisixuixefeersxhfvrrrioeeeoeooveuienoofisoxefrxenviorfneoivifeeostnfeeorswnxouvnevieorxwioehoifiohieftufttrxnexfitruoneheexerwrtnseuenvtesrioeveuivfseoouvevnrttetrofswxuiexnetoefionoinegeooionevrevteivevoewefxreierseevnffrfeworhooiiesxeiguwesnsoosoeeotvevreoznfhoiwxouefrthiiixiiiifiosiunrxfniexstsoeeeiofuforveitsnnxinhsoxevnvxvnxwngxneofvefrroxfvsvfnisniooxefuunisvtnuvexxssieseiiexefxuuiostfoewnnoofonovoufuwovsxovnsihiixvnieosieewistsiovwuooweesefounefruwtoifhvsestennrosruetreeftfionwtenvtfeiesfeovfeeiiivtifsufirfnwevgtreeuiixievxnonfninvoiiutteioesrtvuovvrvfsowrineoteiiisuizvtisitoivosenieeiiieoittreoowfsoeffnenxosowvrnvswnnensfexeuwwwutwuefhffxienviontiwfffneioeonsrenxixeihouisiifsowoneexrewvsfgfswfssfotinsxneienfeeinerweoetesiosofeeonoswuitfvneeisetitofiohgninvwinuhrieownveiufetxvensifvftenrieoxorfnsfoexnoesenxnotfeovxorioeouvviexnsvvvsnfiooeuoeiineosuiohxiixgteivifwihofriooeonneoeuioxftonvsrwnixftsvviinfroeirfiexeiowfineseneiowwtexoeoxsnieufneuiteifxifxieorwesznnwnfeoueiiuvexotexrnosvxinrruveeivfnieniexsnnfiexeeeresorvveoonwwissiuruxeishxveiwtonxfeivnnviffeixifowtositwwxrfieiisitewsoesoeinioihxewfiovffxsevvowifftoeieeneinttneixiioteiixeeiofiottoofeoxferneettnftesrnuievfouoruntiiernoheosroxtonfwhrhitinoowonfiresounoirwiwixttfssxwieoerertfiswsfnenvnewofeeotnuuirntvutesxtixfuonexivfrxfrutfthoiiiovunfeonsiinxieiooioeuwuoeznixvnffgeresueiereiweofusttttnxnrvnenfxzieosftoiuwesoxonnnoixsixivxvixferststiefiztitoiefsxnnetuxnnunfoeooioiooiewrtwtionntisoretrovniseonvivrofnutioewriieiefnwrsretuevogfeuiffworiioointtnsinewehfsuoesneonituinvetofineifienteivtiwereosffontstxesfitxwieirotsxrriihuiexuvuffnrovhnthsfotwxhiniiornueoritntfvexsonfusfetnuofeirfewnovwfifwsorioossezftwvfuteuieivxnenffeitreeetnvfetfrrnvoeervnvsefeofroooenovorvfoiinuinrxoeeeoisixevwxtzisewiesoeiiiuisoenexitsriosizwsiwseroiiixunfreontxeixineeffnrtiiioeonsoswuiioerruwxinevewxiiienfnifnvrxnvnnnsnvexeuoneoniunvtenxtorfeewsvnevxvoesiosuvtonvisweuotioeooeonnfvoforontnooxnxrfiuihisufnitsnffxteeeoenwwinwxettiioirevuveitfvovovnueeifxffwfieoeiisovenoieioonfinesoooeiiournfvooveethnnenvsienovvunseeitiffxsirfeonttwofeeinfnevriuvtneenneeriirhxwfnoeewvosrvssnvntiossxtvevwniwfoxsoiuefiuwieeiiirioteewegovineorvtnoinnuffrvtnntensxofvvruivrifftfieintruooioevsivrwiesoxisnrsrgosfxoenosxirnoierowiivfexxeeiseiierfiefnoefiineiveitfxwxiewvtwwueneinssvinstnvfniwxnivooeeeinvvniegvsvsioxineeeofeeneennvhitxeetioseezgnsuutoiveeitenstsnirreseroevenwissfinnnzirierrevuwerfefiiovfnniiioirirninfrnfnefnnvftvuerveooftoxniiifoieiviufrfnoxwxnornfhxeeiexenineifnnvtftsweeiuioosoioofeveexixonuifuinrowioeixeitrxxnswioeonissefnxeueinexnvesioitonevieirzvorexoeooxeisiioiieofvnxwiinexfieiifsneoifituefrixwnxoefvuiifeeffsiirfuntiiegieoisiufiwiexnnoifxfroeffoforvooivvtoeieesxttnuiwffeeexueiesotifiefvofniiueoixrnitsnefosvroxieenerowsevnwvswiexnisxnseouxtrsntrrooeeofriofofwivnseivrhvteiiooexetusrvnvtffentieenfnvxtnfofifwueffnfteeheoeiweiefuniehnneituevieiiiereowifvonuohvvrtsofeeovifoiixsefuwsesierfiensohewsfefsvinvifxfeietiwofinnoxssfofeihnifivtfniewiensenrvoreefovxeeioofewifniieovwrernontrrfrtsiotoixnvifivnrousouwsiiueitsfeihnfwrxrntsttviwxunwoexuufuoerofeerfnheenvneiitfverefiirseeostehteswufesfuetexivfeevfofieesfeineevfvnosehfoiovwnsirnerevieouxfuuniefnfhuowontninnowoernffxistrisrioeueiofevursxeewotosxfertxseoinsnnefithnenxrotevtieonnxeoissitieuoeniteefnnxenxnesnsfevefffxniteertitnoreenfzexfsxxfsivenxoeevswntxeiwvfoiheetneuohnsnoerefwexewoieneeooviiinhrfwxvoeixiinnitfowifrieetvivxtxsvfftovneooexoheeosnwroorowiisuuensrsiesnrviwiirevifeffeirnrfnniwtxioonoetfsvwuswewseiushneseionnisrvxrixtnrfuettfeitutheetfexnereiforestournefweseiuwtinfwsrotiofiennunseuuiteuuhfxosnwrwvewwserfnriifieoswihstsefvvnexnwieueesewsuooniixuootirsvusxwfeunoinxexesnnnfvvizrrwiffteoenfevetxnsegffienexsotenextiefuxienxtfsoioeuoxeehifixteftnioeintnrieeortivwfitxtieenefetxntioixegsinrosxxrnnoofeovooihterfonvusiissnuseentoeozosffueixwnhrfewiioinnorovennvhxvxsrvswntosinrfiierieneriitonxivtsevesnoroviwxitnfeniivefesietovsxuiwnnuwsfosiiufuifiiefoeetveofxnvioefrsvnnfeeeososnvxnentxrvxoithunrnrsfwtoeveeeoeexoiueiftiveifofsrriesneovntsviueztettxinoiohiinfuiewefeeeufvvweiinfeseveeiifuhhoeivisevuueoeifinhrfxerxitxfnnssuvrvfentesteuswesnsffeoinevniirtseonisvivowtfivxifiotifiiexuwirsfnvtnrfeeiiwotnuixosovoniovnvzvttseuesvvnseirsewffrofxfeetsotvwoefneinsosoifiixeniowtrnnxeewfefvefoohxewfeuxxnsveewievozseoofhtonfnniiutvzxsxnroeofexovewxxfsetenxresoivsfiexfigfevoexifeneotsrfsefovsneusiewronwivuwveeionsrroffroerfzweetthniefuoetivhinnexieinewnfoovnirhweoetntitfxooneouvrifneteeniestfntfooxveotinresusoffiiivsisffvefnefnnffwiiriiixrensifxvuiiievewinwxvexosrrnoeoitterniseeuteftnniezeneewviosefnevionsnnfosirenxeneeniverioefxoerwosvowvonseteoenxinftsensetvisnongoennfutixeorrtevnxitxnowutnswiftviinwoewtzwnoeetfniiwvioonveevwrewxsenoiiennrfeiteisfezefoivififitxeorunrttiinxsniotinotretsegeoniiieeotouroiotovwfiirxiiwieoexrxnivetfinienieotirnwfxeevftnoohstsveoiniionsosireiethrexoeuhhvofeeexeoizvtvsfvvvnsvtovevooiierhoniwoiiougexwrneowshriixinoofvnunientoiixsfiioestoiintoixioruesrwuonenineieefnnuiveoixixesetofexhoeiuenteezoeerinsiiniosusfvftvxoevewitfinfvwtnreixoftnnnososiovionfhveoxeueoonxoxiioitnfoorntnennnefffsesnnovuffrxvunreexewretuenxwonisofnfxxwvtrnerwriitvnefexfteennixvientxunfonoottoetsenwofefvitwnefiweotvsoohxwseiiftwotftinxsnniefenfixeieoveeooonoofeioenevesoifiieoirrorxoxteneionvtntiersoixftnxvsihoxuosowrevoteeniigwsinrisvostniniiureoniuxoezfexvnosniuxttwitroeeuseexosoiofvunottxfforsnotreesewoersnireoivsrusvsixnrvoferrxiuretevenevixooetheiotwiustxwneeitxoivereeeoofevhnoinetneeenxfnwfiofxniwnieoeseixotrveiexorfsxwwivxnooovfrstoiehsonfvsrioififttfeehxuoeveieogntoieerseeffieiixxxoieotreioirxfseisxxvregneossostiooxuousteetoxfwoxiofngtniivnrnentnxfsonnseuivisevsevooiieerxivovxeixoeotuonifwtisewtsooeetuxoennsewxsifxttviietxovxoxxnwsoeteiieefevtnoiteihswsihoxoiofwveooiwoosowrninfntenrnisvieiiseniwxvuxixewroeeoniufeuvnuetwosfeseeeristovssiroeftrxirwwrfwneoffeieeeoeneornseniveoineernfswfttntiinntowtnowxtttetenonoeexsneineeneivnvtxisiwefefnehonoosrunsxssvsonnxtneteeseuxeoixrnsrwfnowsfioiiissioweesfonsroinxoseriuoninfnwuwuxtoixsfnnfevrreoitfoefnnrswiixfeofswwivzrniixfoutveoesennewvenefnvfexitfosvtefsvwvnstvverifnorioehvtevooroiiexeruwfsrxewunxiwxwnithfnvtotefoieiewiwisvfwtoxoiniivsenxnssoteeenfnxrteievseevoeueonnuteesfwvrnfeiuuwoirrvnnvovoenownfotoeooeewrwueseeroosveoeefsfnnixvieivrrnnfnnfeutenewxofnensoonvofxxfeinireinverooevsoeiirrosoevuenotrvitoserstivsfvoixvvfitenieftfteinoienesuvhnxvtotwoeorffeneoinoxtiwioitoxneneeontwxvsvnetrveoesieethixfeiossohiixoonsoirxtiesrwzwnesneotritwonsuneofiiofxiiueeoiinirvfuutrhtreiieeeevfeeswrvovovuiwioeefnwffooefoeiesiiiiinneveoxxoxxonttfxfeionnesgiinnwfniiefwntswteonwiiwterioosnenrinernvfheveirfsvnerwfesnwfvseifxevnrieveesetvhsisrnxfeistewxstfooniwruxsoifnhoirinsrneersirgfneoirithvieeftuunevvroerueustnfuewethonxtivuvitvfwneisttioxioinetivstfforwfinerxioieexrixtrneneeeeieviiineisevexewrhwiittixrnwvoiesseoesweortfvfxitofuwvivninwsnweeevfronotneeozwrwrxoneereneveiiofieffvfriivvweifnuiwefoxgefrfneiiiownsnnerisoxssvreoeotfnfxxirfwreeivioswovnnxxvneeoieoostievixnseooioessvooiuruxfoueroooessioeiiffwrwnfenwhfeihitfennoiiwoigsorrrwwotftxevxwvenxfvroovotssixtwoieuotioisnonrxnsnnvesntofvioesioovretonwitvsvxthovfwevfuwofrxnfexxnitronxwnsnueooxeeoxoeetveotwiooinioxviixefiovreinioivxxsnoeihuefotsrfrtieeetesxisoeneuwennfneieisnweneonhiefniihorfswefrnoseowrveeoieftowsoffvxnxtveervewoesvessesiswiwrntnwifwuuierfoiivetxeetetretnfnnnwrofensxwnwxseseesixeifereeihonffwsowntvihntioeorvovinoieeetsisfftgoofexfnwsivxievofeisiesretvezuiwifxihevtievtonniweixniriseessoooonxvirfitxuoeoeowvoontiuowterxeruofxetisuffroitfieeefxefoovtfxwxioxniifvxounvxeioiifoengixutsstnoixeifeivifxnrvsnftsfioritefosiozowenetexsnowwieonrfvnoxfueownrstnifsievvortoofinereeonneieseiinreetesrfrxxniexvvnnsfxxtwoiefwxniowiursrrttreunsforisernsvffinvexrrftseionixiesnixfoersentiteioiiierneienfssrinirxvwtovtifurneseneonnrsonsvrwrnoievieiowwvootnnfootiouotvtnnfeeefexrnhrninsoriounneoveiteessnutnxhtsovtfusxonovnrsuxrsxviinioonvoinniuussfrtennnonvevtooffrsonxvvrtrwovetesoteonougnuefnnioosfoexrxtorfnxeewteeiiiofvotenxseeezwrwitxhhxefrtfiinofvhiioeftsittwifhxvoeoitntfswwnuoititreeoeveseeriowviifnzexeefwvefssnsxesinonixxxsieerefxenfeeuiuefifwvxoniowenetorsnoxuosoinwiieweineeifesxewieixrhfteoiiszooiexfevioefwxvxetehnuuifextvfoiitntioviorutsowenoitrxnntxwsrvnouseiwifeiteeeennneeoffeoevvtuihtiwnuifniefioorxrfnfexsevntxnnsewosetutnxseseextonvvsiuxieivnoniiihoxxtextssirwowvoxfsvsirhoitrwvxifeoenoesoixenfruivtniweoxivsnfieniorwnesfviiservoeooeeonenehsnirieixofsieeiirfnoeioxfssftfwtiousiofwooeinsfnrvfvteeseievfffvowsesveviixxiretiieoewhvrovowienenvifreowsfvouteeiohsnxnirffnfsxeoeeiestfviiosoiohntwneifhfrfeniefnieeeixioiifrsoswxesofnwsietentoioifneniseuioeswroeiowwexvtwtnstrfgoeigiiwwiierwiexneefiswsxeewiuhvsefvsveoneietsxeiirxsiieuiefeissrieuoonreewtiooiwinrfuvuvsoersssovnvxonwiifvrroeoxuseoovfietxoteenvivtosofiorxxsernsexwnfeefihnseseoifrewfofossirieeivostwiugnoeeoifresntvixooieunfwuxrnxwivntsnfsnefeeorxnusrwexiefovvweneietfxvfeneesoxxeiioeiuutxeetexiuooiifeuieswnfiensewftfotsnuenrfoerszxrufvexxnoexniownotrnoftifnvrivinnnsxiioxosorwihsfninieetrfgefntfeeswfnrxssevseveennfnifgfetefhuitsenfnxsiefsooneivseoeoohesixesnotwwwivixxintfnixonftswwrerweetoxxnixosowuronenivhrsfutenfnuweeenioehneifntseeexreifeeftetwneexixeiesfennonnneoetoionfvnrenxeutiexunvoxwvriifvsoxsuxxnfvuxoiniroinesuioirusfurvstesrioewseixneowoineeveinevivtoeinsevtiwwswioriosfviveifrxoriisvioofosnornivfvonftnixfefvfwofnofevnfevvhnsieowiseonxrrsnxrifesioixxsuveotieixenffreoivxgofofnooroiisoteuuevefvisisfonoovtsefxneezsivvfrheeiteisuieivfnixfneounvfixfnervenfntesnsteototieeoiotvnvervxerrwewowsisxisrsuieuexxoeexoistiwirvwtfefnresfiifteoefviseussivexixteoewivouefoeuxfoexeotfiiiuevvewziexneefnonifveoihworexeunietnvvxnosoeoonrieovevoinoisvwionfrnvtfifeeooeiietintsosswenoeeetnvxuxfzrufeeinwwevwoiuusuueiviswfeznrsereuowsuwsirnznsvxfoeueruioeofeeeowsvetounixsvvvewiouxueeewevennxftwvfivniwufsxuoenewifeoeeewxtfxeoswvtisveovsivurwexfxeeotfxnwxtsvvsftesroeeiowfitoinunfnvwnwnswsiefivxttrveseivoxiinuntefenuewitfftxnrwsixsoheinxeenvetoownxeefneenwsoeoxoofoweohfrtexnsnviigiienefvxnveioiitttooviiooohsiiiwtsieinowretixniieorfofoeenrufhzxisisvonsnxeexxrwnxreresensitfeiohxihwesnuouwevtntioteofexxixorviioooxxtxoiwosuinvesisnethfneniwtxfiiuxihroiwvteentuoxoeseeesfweevixivvoeivetnswxishooioinexevxrreirerfntvfioresowursznoeneoeesihsvensenonswuvsfnsionovvrwfsffneeeixnofiuxtrxvnninhnstiownnefnnheiweieioixiiseeefsvvxeoonuofrniswxzuneeffteewhwofuesrrttseeoeonnoeesxirnfefigeveefiififvnuoinssfervxrrenesewniienoexfsiiiivvnineisinuwwsinsiveeewhisuxnineuufxrorntxtnxwewneexeewioeetoosieirxoinexieiefevofnstoonfstftfiseinexsneiesinetveftefeefneoeoxsfvxuironuvtevnesfvtwvrfvnfsooeeosfeeiotteoserfvwfivseinuoietofvhnxhsifonioowoowvosiffoseooefeeotewuortioionrrrussvvosxieiteniifroenuvotuonnseertnsfoxnweffsongswoxreievtxfrnhrtsssnoievfzwnnwiixrerfixrtioesneweeioooeevvenveuooigtwroxioontriwsewuewroeniorstneftrvoeeeseiirnxuvivxnfewxnnnsfnfuneeoevveiffnswviiwwieinnteoxvierefeioxnwonnxotrxenetxertsnfonntwtfwxxessreiesxoeixihxihnsnnxieuioifvnvirnsrusnnnivennihixtiinfeiiinonvgsiseiuieoesvreffwunutionsernstrfrvweivovtzritieoserevoonowfiieieviriivviinneouxfvnuvvrzoiieovnfwizeefwxnfwoewsnoweesftefhtheioiinnreeesxosxreexeevexieefeseifoifgooeensnfiifwwvteeeoonnexwrvioxossfwhzinixsoieevnnuvertrriiwvriwnuntftvnssesuntneifeeefewseexoierwsesnixxofitrhewxiiieizeiieiiissosnnxoeoewneviffwnoeoesontvnfgifiiiiwewiieiifevrvtrzifnovfennifiiinwfsefnssonvweooiweeoufwxeifisiieewevifovtwofvseirnonovrfninennvuuiofifsusnnnoivonrwesueeoirueiofuwrvvnovtiuxtnevtnoivfwivnsistsouffiouvtvnenuowvvsthrfniiwisxriinnefuresosisenfnsoreefuinixotioienoiwntovrewfiisonenetosovrfvotwenvsefowrvounvxihwnefunienrowenueeesieoeffsntvexnsnssoeniieevennxtunovevnevexenieifiinotsifofvissufewxotreeisnivxnfsveveefnosnhwefxxwniieiuevxutneiwtowniieoeieifwevtetssenovfnvtoiriseonooxtininwffewoifehwvitnwefwieonvefetveeetftesszefxioerftusrwxeefsverooisiwswsrinnsrunrrsetsvewrevoiesgoxreioieowveovoteiriefwnesoiiuinoneooeswsosiieuwtooeisueofefeeneuxovntxrnrxwworweoesffoeneoiossroefinoreritseeeixwisfisetrnwtfvrifesieereoinwxrifsostfveerxivrosofeiftfieisonvnnvwuieeiieixxsoieentsfnootuwvronsuooenrornxenunfntetnssftieeniineuvixifeeooihfvsfeeoinwereetinsoufsenefiisfrsfeneewtxeotnntooofexhixswenfevftvonixiifufintniviievfeoowveotvtexronrtesegtwuvsneennifxineohxwrionvtionoeinewnxrviseetegsoetexnunweienevveiexnoofietoofxvoirirothrfiennorienoneffoefsfveewwsfiweoetfeorooriieviwnotfutnnrsertrviwooefeoworefeureoenvwxrnneuntvevinoeereeoenrvnheexsnttrsnniveerenvueuovosxxuooefnesfrextenieonrsxeetowehnonsiieeietrenhfxeeresveiringefttowuxifovseooewiiuostixvsewrnevvogoiufirssfwnfisonroeewsvvoonexwvfowintetvnsrietxrexrvfwnxwfnwneieennrreffenureefitoeffrotxoxoenoteeenviootffoxrevifeoeiivtrrtvwiofsovssnonuosiiirstxtewfieotovsioetntinoovnitvfsoeeswvotnisinitoeietefvtxfofixsgefevwotixvoeswfvnferevfooioffseeoiifnefvwguioevofxxuovfvsuenotviuvxseeshfoxroixifvoweoesoxesiwtefvneueoeeiievwnvoitnrvtifeeifoeninvertxteriieeoxinuouhuiesvinihisfrnfoxxfwintrevnenfeeeosonffnxxentioitrretihiewszowoffeensrooeooisrexnsxeesfxufereirrntiiunfhzrhvxszeenoeniferfssnisnonxreoifsooxeinizfsefeefeioxvnoitiiefsninfifoeoenwueoeuingeeeusvnwruooxwsfoenwitiriitivsefheifoewxetrofswnoexntvvfhsiwtsonoooniwexeenfiiuitveeexwifntnniwvrufeofoioneevgsifoowoifevihoteuuongtfiuewesonvxiweusvuoooeiineehsfxssneisnoootstshfuirtsfierveiivewfniooioeenornufewevofinonifxvfwuveuutwnwtgnoetssoitiwninnwfnseonxehrxefesoiefveswnixtexefxexxxtenieefonhrrsisitfewnfwvwreswetexsusxhswvtnotwwiwvfeoiutiinrnoxxneoiueenwsiireehtvxwsreeeefvteoeievnnxfevforofooiwefvovwofvosxrntevnoeeitieoetniiittoiheieethfffxvrfeonnsrfeossetoxnvfehvnteiiioufixhftssniohnowfnfovnhtftiitureieonfeiesnwvnotuehutefsenntvwvuoffoeeewuitnorienrereeeefsvsextsonifgxszefiiertefhnroteiisxtoweifeexeoerevesriuxexosowiefffenoiienenoosieeotugunevuenfffsiivftnennsiivxnrxwfsoiestoeuvoieonnxivoeeneeivrveoefxtnoefxtsnrxsnhoiennoveitnoefxffieievieuseeorenhixoievinenertetfrnifrserosueeenwtwhiunhowtfinenssisnwrtefrnnxgxnetieiovwosoiiiirinevwovffwnrniiifivwtvtnesievnortennnffnvwsosorxentiofteehirirsotreoosevoxsvtewinovnenoinnvxwfxfxnoeeiinvefvnoxnznxfonniwoeeuetnerernfootrfeooiievireoseixotriehefonfvrhootioforvfweetfexfrninftntorotfeitixfeeneheiwinnrinretoetiofwinevxehrrxoswvtewroniiuxienevoifunouxefonfotsfioiteuietesfeinfvrsiivetrnxevoveirtfoevunseuufirvsnurteuoweuigreetovtvieonnenifsiooonnefntnnoovssnveennssvvfuntiiefixiixnveiseiseewinnowffneiwvviesittontesevxvtnvoieoentienenserwnniefxitnwevneiwonxeieeroeftoeiovtteriniennsiesnfrnnnxniofnheeeevnsnonnfwxetvnntnehiestfrioiwxnuwexseinfvoeftnvhiersexfisneieziexnexufwxtehenwieiuenowneinfirsxuovoieifrusieswieefofiwfoousiuevtsivvieeseoxtvtstuorfrfexeneneeieooneivieeewefntiviuhtvfeftooxewuwintfxeorosiiwonrefsxnonoiwenrstrnernfenienowteverfseeofwiwrteetweofoxseervwteethsivvnoviweowoieerhionvfoosoeowfiwhrsufooirweeewniuunrwsenrxoeoxfeoevnitiztreufxtuoettvoiefioeheineoeetitnnefntxwvixfiiiunexvniewxoniviftfifxivtenhvsftufninnosuoeeoeoionvennneinerieresostostwoovzwohoftreioeoixoxeininessneeetnntetieoieueiwrruieooowfsixeienefexehfuxxnhisosoeevevioinfoexniooefeinivfnviioneinofoeinsffifvesoooeonovtrterhewiisfvnoteutesetuooueiiufhnoitvotisovsieieovtoiwsieueieeosefouitevoertnotfoxonoetrnxoivhixisweoteonreiooffhivfesfwshueriwonowenseveoxftsivefihoeoofnoewtiorofuveofrnsinewvwtesvnrtsitiuouoeexweoenveiwothoowoeowexiioovvenierfxineiwxwxerenoetenieenenninsfineetnfnnsffvetsiitoffsssneseeeevftxoiiorwforznfestunurinvofevoeefenertovefiiofftreiooevxisuseosroeexsefisrsovftihfnsxttuforenoueosnonterounonfssntouieenesurewfieiovtsnoxteweenienuirfvitnonenwoeortnwiseutxestwiwuseetuefeuetwoevorinefnixsiseenstttovewireeooererotfiveoieinfnvvnevwsnsioxtvsnsterxereftnteeoeesfrxwneerfwtenrxifroeenexfonnsxovouiuroefvtofnneioeeessetntewvtxeifhvonewvnrinheneeeoifxwiffheszsnxonwifsfnfeoxxfeoiiefsfoxsxneeuieteveeeseieswoefzeeeofxownfeneeerxnufxextxwwierfiiifwineterwffifriurvoeofvwtrnronfrsrfieotfsseovsusfioexnsrenfnfsiiiwoeeiefifoxnenixnniefiesuxuvifeuwxuosfieosxfhnwfnfvvfexeinnntixwfohsoeenrheueieeeveotnvzoiwuoroxnoefsfovesiifewxxnsztevienvorovfseineofoofsvfooeriniooivessrxwfonwnveeuvsssiewxorsioveintnetetoiifsteeienooonisvnfrnttwieheinfifssorxnovoetonrroeswiutiosiuerwteitfweeroovfserneenofiionfiioteoievzeviieffoesowiwnoxoetssotteeeuvfneixnuzvetntivfrgeuerfxnxnifeftrntirvenefvsuivivnwiwfrvnoixxtrveefisizorrioeeitohesvuefnsotvtfrooowxfvonrovetosxiroeevoifoxfseevnfnoienetoenrsiineoessireoeroiifousfxerftswiievuiwrneisfinwonigfofwoxoiexionieroooetoiveefrwtovsfisexeonrnneiofuvewnusounnoffofvrieizoosnteswnfnofesifteuivnueisivrfxoofrutnitxwwnftxiuwxsouwfooxowiefruosooxiixvtrvotegntiosrveffhroieitxnetnfsiuifesfionevfsfoirefiviffittrwwevturooixfvinevhrtswneetritvteosttesineueoovoiexotufivutuoooofiuvwhfsviuteehoffofewoviwefooeivexuofvhiieienwfuwsehsriuntienrweoivoxvwouevsoifeeoixsnioeoenunofiexrixuexteereoovvurvvoreorftfneoftisesefowswrtfoorrxhifuivoveetfirseixfvituriwvwrreiioifevtixeionwffeeouisotfvfniwnfowrifftvneitooeevsvwwsoifueioxosutronenesnssiritoifffexfrnitetvrxveeufvtserzviseoneroossxrnuitteonnouinxrtiseettefrxvfxfinfoiieeivwrweenwerswxioefeetioivfwewtswvewvinfteeueofioohueseoootoorhufxownexirnoiiifisffiteofhxsreffeotiftiwieoxxevihusffsosrniviuwuxoexerosoievisiioeoetowtxostfrsfeefussoffwithihxtwrfifioououwensrisewesofroefuseresooxsooxvxtsixtosxgxeofieuoovfiwieoxuoenissieoouuifhuvuvwierrrsfosioieiseoioivioeefefonsfvxiifvweoorotuevofvotroresewffuieunseftvoexereofetneuifsxoviettxoxesxrvriesvvvfnwroioifvrhreffxueunentvueeeoivfoweeeeoosufrefrxextifexxfhsutefifwsievihirvsioiiiotffersetieweeioreiirfviefiriseeifvwottfvsweuneffhvfoewtioteoiooerxswioesivfetxeixvsoetexwxsuoorenrvoiortrefifixsfvoervvtrevnonsoovntoiievfrxsoivesvivisrewshuoetennsxexeoiorvgwoouuefvifstfriirveseriooeevvioxrzvtttieifhrohvtirrsiwvrfrosfwxxxfwsextesiriveiseiirsunwoinefwiwsexirusoowwufoesruxiesuvtssvwoufeneifenfwegoiovtvtooixifreeoonshnxwrhorotofewrnfwvenoxwweheshehetxvvneoittisvsewvsiwnwwuofovvwexiofonnfetrivwrftsowrhotroiuerxeiizsoeswxofehuviotenevthoessuofntoterneiowrtooreeivwezooivtvfuwifinisfttrvxvexunerewioretfrtissoevienuievofssueowniewwvovrfeeoxetvissxouxunwueivhewoorrtixseinofiexneentieeooeifetiiexreftfifeieoooeonfoetiihreninfifhtfesfoosisoirtruwfneesuwixxerxiriofiizgxiteixwrofneunvfvffxtrsreexfxirnhrwhsofnwveiiffrxfferifwiwnnxnsutvxffuevoeeerivtfthiiouieweiheeiioneusfenittvofxezrnehxotenehoewwiifiitenwisenvifxveeinitervifrseooiiveesutifrvewxntvneiifnfeoiootesnwutexihefgwniteohhoorfefixsefoexoioewvnveisviihsweofstrsioennfiwisiffixoeeuoisnfoeenusorfofenxowrrwiniifvuefortnrenfwwereetfrfterueeoetxnesieiwioxefteventrorrvieuovoxtffiretioxursnzovovoxtoefenevrrusesehtifnntvovsftuoviuoihvniiivxtoooneoeuufeixfeefxosowfnuetsinitfvsoonxsfxnxetheiuvifeoxosvniiesvifexsxvotouienvhneseofeoitovnuvfsisforwuifteserifrtxohnnoourhffexfoeotvtfsohofooeoxwrwstteinxursftsioesinnrsfrnitfiwieiosefsiooorxnevioieiisoeeuoeoinfuvtfefxwonttnvixorxtoveifenwsnnooovsiefwfieoeeeeneweexohiuefsvtxenrereestiofsofsofnettorzroxsiffewofseoeoxoiitostonvfvsxtverisovtvewwrsetehetwfeiuffniirirteeifrxerrvsgnteosettieiuoeveuwosvftsnwririueeoeeznnoovinwitetnvtvfuwsifieveionfoixrnhefrfefvevotohveievntxxeeuohfsvstterefuefxtoeoruoxxeeioosoevfifnfinosvivoteriivfosuehxtrnoteeiifosovfoentnnstrtsiievoiixooiinfosuvsfoonvxtvuoozouefeieetxhrnxixoxiereoenitefiehetenowviooihfxeeewivioofinftxtowoorneffeeonofoxeeihvniewxuiivrsinvrihftiiiihvotnwfeoonfweeinoriotofiwuxsuiinsieewhwnefeehwisrieenvieeriteiiiroesfeferofvwovfixowfwefevovrfvxeeeootnfrioreititrfvohoitoreiovotvfnnvnseusfxeoeinrotueooeuvxfoeofosxstetxsftonrvtesftfsovexotxronvieifeeueewiervuoeonuesxefienowoonfvntoufvewvietiiioorsnrhnfevefoowoixeefesexsttenwieuttnxoouootfiweofeoooxneutfiveonshxovwioetoisnxftohrusoeouhtfnevhxsfifuesfnihoofwexsovnerweoiezwxeueixiwvfwoeorffeefxrvexenrirviuueiruxovoeneioxuuiiuotttfvtxvsnevsisxftwestffnnsxonofnoifitoeiwintieisevsesgieorxonooxvhofusfuoteofonhvevovsenienoeerfoisffiiuinoeehnenoxtwnxwevwtufteusftvnsntxooesiweostvoerxitoxsnesefunioivnsesvzfofrgsirerefowfnsvvsferiveiofrofwssvneowoehoofiriouefovesvfeuonrwnineseuhfirvtetnivnefxeevwiofrhtenzeivoneinntwxevituostfeiowsweniivitnntooenerssoeriuoeitftfwevxruusuvowurexosvutowrffxoeneswoeifeoiioevehotuwuewnnwwintiuntfontovifffwfofivvenevhrrenfixeieweusevrfzohuovrtisooeexioonorioorvtfnnfteseirtuefestsweouxtxvvnrefwoitiefooveihsnweswnxniewffeexnrnvhsxeenhtizeiitzvivifeefoxwosifwfxeiixwosunttesvvrxrwesxioeeeexferohetoixtvtosereitfnerrwshuonhwoieeiofeeiioieefrfeixhexevnfosestfnnxvnsresowvwnunennietiiwototeunrissouoveitotnewroofeotfoonewfewxifvuefnrnnvseosffexfisetttuvrweovuzittiivtowvoiiofvoeevefxoisnwxneoooiiweeetwfvituofxvttfteweeifnreiisooreitotniwenonetffenvhzteviioinnoioixewnieefuwtfvnireefrfxennooesvuuwnesisvxfxfeoneonfoewoosneoooiiefuxnezxfvvffsteierovoneiunewrfeeiouuwtwooifiernetsooefxifoxeinzrieuieeunrienvnnroeioxvxweonxsennsxeuhzerwseewnoefsinnuxvovswevieuffsexieeewoifoennssioooufniixesensxnftegetnuiieeivfteoervxiinxeinfetsnnetixnewifnvieinnotroerwfessuooiewfrtooxofetivrnsohosoeseweseffnrxsneeintrfneoieioeixwneervvxxitewufiieeniieftsffisefooitxeteeenurvieefhieorneivvheunewevfxuoonfseffofrfnnfxwxsueffnirsiftirnfiwonoonoieonzeivheivfsxuoeerrtoiooeeszvweitnevstrxfeurtsrooevfiseonhnetnnxoinfeeixtvtrfrziieeenineevenftwirnixwuenwenteiiieufoffounfesfehuxiveinoifiofwfeiiexnfixivnetoixrieiueeeofoowteenrtoxunoenffisnefivvnosfvwewwexvoveovitrvnxoofnireovvfeeihierexntihinresexwnontteexrveeefeoneissswwfnsooisxevwfiivosonuusuowenusoennowiersorirrefufixueewofoeerioevnoinvietfnifxvsssnntoxeeeteniiiiehoefixnereiioefeoufifieesioseveviinwriiosoiuivesrnrwwsxoeiiiotnxnevsxrusoeeefiorihsoewxoifeuietexsweixfifowsefeifofowtfnevnfhvfotwsnevwifon"))
| [
"[email protected]"
] | |
bd32f3a3edb66736241c626c86d60844c4a1a252 | e8ecb520c73c335c9c163e8ce59fa25d3f8b1f1c | /automlToolkit/components/fe_optimizers/fe_optimizer_builder.py | 236f2edc5f8da0b39c76c59c4c83f9324a2d9e75 | [
"MIT"
] | permissive | zwt233/automl-toolkit | d3200a2fd5b01311f33a0e61a7cd6dc7dccbaacc | 67d057f5e0c74bec5b3cbde1440ec014696737ef | refs/heads/master | 2021-05-26T01:18:09.699592 | 2020-04-25T10:48:40 | 2020-04-25T10:48:40 | 253,997,217 | 0 | 0 | MIT | 2020-05-09T10:15:08 | 2020-04-08T05:49:48 | null | UTF-8 | Python | false | false | 1,173 | py | from automlToolkit.components.fe_optimizers.evaluation_based_optimizer import EvaluationBasedOptimizer
from automlToolkit.components.fe_optimizers.multithread_evaluation_based_optimizer import \
MultiThreadEvaluationBasedOptimizer
from automlToolkit.components.fe_optimizers.hyperband_evaluation_based_optimizer import HyperbandOptimizer
def build_fe_optimizer(eval_type, task_type, input_data, evaluator,
model_id: str, time_limit_per_trans: int,
mem_limit_per_trans: int, seed: int,
shared_mode: bool = False, n_jobs=4):
if eval_type == 'partial':
optimizer_class = HyperbandOptimizer
elif n_jobs == 1:
optimizer_class = EvaluationBasedOptimizer
else:
optimizer_class = MultiThreadEvaluationBasedOptimizer
return optimizer_class(task_type=task_type, input_data=input_data,
evaluator=evaluator, model_id=model_id,
time_limit_per_trans=time_limit_per_trans,
mem_limit_per_trans=mem_limit_per_trans,
seed=seed, shared_mode=shared_mode, n_jobs=n_jobs)
| [
"[email protected]"
] | |
a3bca85b9b9b07bbeb59a725e6fa0cb897347e06 | b79837918de72f26558f484a59639bfc8ae3fc1b | /dialogue-engine/src/programy/config/brain/binaries.py | 820afa307915f4ca5836ebee1e096254f040d54f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | jaygeet/cotoba-agent-oss | d0efcfb3f22271afa32d0ffde04cf9808bbc1368 | 26de67dbda401be5f1d50ae2165e4e1d820882f5 | refs/heads/master | 2022-04-20T14:32:04.790471 | 2020-04-07T02:41:33 | 2020-04-07T02:41:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,135 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.config.section import BaseSectionConfigurationData
from programy.utils.substitutions.substitues import Substitutions
class BrainBinariesConfiguration(BaseSectionConfigurationData):
def __init__(self):
BaseSectionConfigurationData.__init__(self, "binaries")
self._save_binary = False
self._load_binary = False
self._load_aiml_on_binary_fail = False
@property
def save_binary(self):
return self._save_binary
@property
def load_binary(self):
return self._load_binary
@property
def load_aiml_on_binary_fail(self):
return self._load_aiml_on_binary_fail
def check_for_license_keys(self, license_keys):
BaseSectionConfigurationData.check_for_license_keys(self, license_keys)
def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):
binaries = configuration_file.get_section("binaries", configuration)
if binaries is not None:
self._save_binary = configuration_file.get_bool_option(binaries, "save_binary", missing_value=None, subs=subs)
self._load_binary = configuration_file.get_bool_option(binaries, "load_binary", missing_value=None, subs=subs)
self._load_aiml_on_binary_fail = configuration_file.get_bool_option(binaries, "load_aiml_on_binary_fail", missing_value=None, subs=subs)
else:
YLogger.debug(self, "'binaries' section missing from bot config, using to defaults")
def to_yaml(self, data, defaults=True):
if defaults is True:
data['save_binary'] = False
data['load_binary'] = False
data['load_aiml_on_binary_fail'] = True
else:
data['save_binary'] = self._save_binary
data['load_binary'] = self._load_binary
data['load_aiml_on_binary_fail'] = self._load_aiml_on_binary_fail
| [
"[email protected]"
] | |
e0d5b1aa2a2b2fbd72ecb9606db9c9e16fe10b04 | 0ba671270ae1582b5b0833a2f9a92ad7560e8b07 | /src/atomate2/vasp/jobs/elastic.py | 0258ff16dc053dbaaf2b9466c8849a9690f77746 | [
"LicenseRef-scancode-hdf5",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | JosephMontoya-TRI/atomate2 | b6cb7f816882dbc63c7f451f7ab500b1551eab4a | 02787e213806abcb99601b1046effb02681ed7b4 | refs/heads/main | 2023-06-03T14:58:45.391371 | 2021-06-21T14:34:38 | 2021-06-21T14:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,381 | py | """Jobs used in the calculation of elastic tensors."""
from __future__ import annotations
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
from jobflow import Flow, Response, job
from pymatgen.analysis.elasticity import Deformation, Strain, Stress
from pymatgen.core import SymmOp
from pymatgen.core.structure import Structure
from pymatgen.core.tensors import symmetry_reduce
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import (
DeformStructureTransformation,
)
from atomate2.common.analysis.elastic import get_default_strain_states
from atomate2.common.schemas.elastic import ElasticDocument
from atomate2.common.schemas.math import Matrix3D
from atomate2.settings import settings
from atomate2.vasp.jobs.base import BaseVaspMaker
from atomate2.vasp.jobs.core import RelaxMaker
logger = logging.getLogger(__name__)
__all__ = [
"ElasticRelaxMaker",
"generate_elastic_deformations",
"run_elastic_deformations",
"fit_elastic_tensor",
]
@dataclass
class ElasticRelaxMaker(RelaxMaker):
"""
Maker to perform an elastic relaxation.
This is a tight relaxation where only the atom positions are allowed to relax.
"""
name = "elastic relax"
@job
def make(self, structure: Structure, prev_vasp_dir: Union[str, Path] = None):
"""
Make a job to perform a tight relaxation.
Parameters
----------
structure
A pymatgen structure.
prev_vasp_dir
A previous vasp calculation directory to use for copying outputs.
"""
incar_updates = {
"IBRION": 2,
"ISIF": 2,
"ENCUT": 700,
"EDIFF": 1e-7,
"LAECHG": False,
"EDIFFG": -0.001,
"LREAL": False,
"ALGO": "Normal",
}
kpoints_updates = {"grid_density": 7000}
# make sure we don't override user settings
incar_updates.update(self.input_set_kwargs.get("user_incar_settings", {}))
kpoints_updates.update(self.input_set_kwargs.get("user_kpoints_settings", {}))
self.input_set_kwargs["user_incar_settings"] = incar_updates
self.input_set_kwargs["user_kpoints_settings"] = kpoints_updates
# calling make would create a new job, instead we call the undecorated function
return super().make.original(self, structure, prev_vasp_dir=prev_vasp_dir)
@job
def generate_elastic_deformations(
structure: Structure,
order: int = 2,
strain_states: List[Tuple[int, int, int, int, int, int]] = None,
strain_magnitudes: Union[List[float], List[List[float]]] = None,
conventional: bool = False,
symprec: float = settings.SYMPREC,
sym_reduce: bool = True,
):
"""
Generate elastic deformations.
Parameters
----------
structure
A pymatgen structure object.
order
Order of the tensor expansion to be determined. Can be either 2 or 3.
strain_states
List of Voigt-notation strains, e.g. ``[(1, 0, 0, 0, 0, 0), (0, 1, 0, 0, 0, 0),
etc]``.
strain_magnitudes
A list of strain magnitudes to multiply by for each strain state, e.g. ``[-0.01,
-0.005, 0.005, 0.01]``. Alternatively, a list of lists can be specified, where
each inner list corresponds to a specific strain state.
conventional
Whether to transform the structure into the conventional cell.
symprec
Symmetry precision.
sym_reduce
Whether to reduce the number of deformations using symmetry.
Returns
-------
Dict[str, Any]
A dictionary with the keys:
- "deformations": containing a list of deformations.
- "symmetry_ops": containing a list of symmetry operations or None if
symmetry_reduce is False.
"""
if conventional:
sga = SpacegroupAnalyzer(structure, symprec=symprec)
structure = sga.get_conventional_standard_structure()
if strain_states is None:
strain_states = get_default_strain_states(order)
if strain_magnitudes is None:
strain_magnitudes = np.linspace(-0.01, 0.01, 5 + (order - 2) * 2)
if np.array(strain_magnitudes).ndim == 1:
strain_magnitudes = [strain_magnitudes] * len(strain_states) # type: ignore
strains = []
for state, magnitudes in zip(strain_states, strain_magnitudes):
strains.extend([Strain.from_voigt(m * np.array(state)) for m in magnitudes]) # type: ignore
# remove zero strains
strains = [strain for strain in strains if (abs(strain) > 1e-10).any()]
if np.linalg.matrix_rank([strain.voigt for strain in strains]) < 6:
# TODO: check for sufficiency of input for nth order
raise ValueError("strain list is insufficient to fit an elastic tensor")
deformations = [s.get_deformation_matrix() for s in strains]
symmetry_operations = None
if sym_reduce:
deformation_mapping = symmetry_reduce(deformations, structure, symprec=symprec)
logger.info(
f"Using symmetry to reduce number of deformations from {len(deformations)} "
f"to {len(list(deformation_mapping.keys()))}"
)
deformations = list(deformation_mapping.keys())
symmetry_operations = list(deformation_mapping.values())
return {"deformations": deformations, "symmetry_ops": symmetry_operations}
@job
def run_elastic_deformations(
structure: Structure,
deformations: List[Deformation],
symmetry_ops: List[SymmOp] = None,
prev_vasp_dir: Union[str, Path] = None,
elastic_relax_maker: BaseVaspMaker = None,
):
"""
Run elastic deformations.
Note, this job will replace itself with N relaxation calculations, where N is
the number of deformations.
Parameters
----------
structure
A pymatgen structure.
deformations
The deformations to apply.
symmetry_ops
A list of symmetry operations (must be same number as deformations).
prev_vasp_dir
A previous VASP directory to use for copying VASP outputs.
elastic_relax_maker
A VaspMaker to use to generate the elastic relaxation jobs.
"""
if elastic_relax_maker is None:
elastic_relax_maker = ElasticRelaxMaker()
if symmetry_ops is not None and len(symmetry_ops) != len(deformations):
raise ValueError(
"Number of deformations and lists of symmetry operations must be equal."
)
relaxations = []
outputs = []
for i, deformation in enumerate(deformations):
# deform the structure
dst = DeformStructureTransformation(deformation=deformation)
deformed_structure = dst.apply_transformation(structure)
# create the job
relax_job = elastic_relax_maker.make(
deformed_structure, prev_vasp_dir=prev_vasp_dir
)
relax_job.name += f" {i + 1}/{len(deformations)}"
relaxations.append(relax_job)
# extract the outputs we want
output = {
"stress": relax_job.output.output.stress,
"deformation": deformation,
}
if symmetry_ops is not None:
output["symmetry_ops"] = symmetry_ops[i]
outputs.append(output)
relax_flow = Flow(relaxations, outputs)
return Response(replace=relax_flow)
@job(output_schema=ElasticDocument)
def fit_elastic_tensor(
structure: Structure,
deformation_data: List[dict],
equilibrium_stress: Optional[Matrix3D] = None,
order: int = 2,
fitting_method: str = "finite_difference",
):
"""
Analyze stress/strain data to fit the elastic tensor and related properties.
Parameters
----------
structure
A pymatgen structure.
deformation_data
The deformation data, as a list of dictionaries, each containing the keys
"stress", "deformation", and (optionally) "symmetry_ops".
equilibrium_stress
The equilibrium stress of the (relaxed) structure, if known.
order
Order of the tensor expansion to be fitted. Can be either 2 or 3.
fitting_method
The method used to fit the elastic tensor. See pymatgen for more details on the
methods themselves. The options are:
- "finite_difference" (note this is required if fitting a 3rd order tensor)
- "independent"
- "pseudoinverse"
"""
stresses = []
deformations = []
for data in deformation_data:
# stress could be none if the deformation calculation failed
if data["stress"] is None:
continue
stress = Stress(data["stress"])
deformation = Deformation(data["deformation"])
stresses.append(stress)
deformations.append(deformation)
# add derived stresses and strains if symmetry operations are present
for symmop in data.get("symmetry_ops", []):
stresses.append(stress.transform(symmop))
deformations.append(deformation.transform(symmop))
logger.info("Analyzing stress/strain data")
elastic_doc = ElasticDocument.from_stresses(
structure,
stresses,
deformations,
fitting_method,
order,
equilibrium_stress=equilibrium_stress,
)
return elastic_doc
| [
"[email protected]"
] | |
6145cd5e87a549a0d9c20a7fc5b8a4552157ce2c | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-5513.py | a600187ac21cb207ac946ca0a65c2d0966855a06 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,756 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
$TypedVar = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
7df101ba9e9e9e6e90edf37196a59fcb939490a1 | 898be346f3fd476f625489ec6e85f97240e32ae5 | /LeetCode1000/LeetCode1446ConsecutiveCharacters.py | e11f5608019bf1f0e9621062e45f4788c7988dee | [] | no_license | lonely7yk/LeetCode_py | e3a0c47f274db8ef3e4540d4d570a874e198dfcd | 67054f724c6c0e1699118248788522cec624b831 | refs/heads/master | 2023-01-10T19:10:13.772069 | 2022-12-29T17:32:50 | 2022-12-29T17:32:50 | 228,568,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | """
Given a string s, the power of the string is the maximum length of a non-empty substring that
contains only one unique character.
Return the power of the string.
Example 1:
Input: s = "leetcode"
Output: 2
Explanation: The substring "ee" is of length 2 with the character 'e' only.
Example 2:
Input: s = "abbcccddddeeeeedcba"
Output: 5
Explanation: The substring "eeeee" is of length 5 with the character 'e' only.
Example 3:
Input: s = "triplepillooooow"
Output: 5
Example 4:
Input: s = "hooraaaaaaaaaaay"
Output: 11
Example 5:
Input: s = "tourist"
Output: 1
Constraints:
1 <= s.length <= 500
s contains only lowercase English letters.
"""
# Greedy: O(n)
class Solution:
def maxPower(self, s: str) -> int:
if not s: return 0
cnt = 0
res = 1
last = None
for c in s:
if c == last:
cnt += 1
res = max(res, cnt)
else:
cnt = 1
last = c
return res
| [
"[email protected]"
] | |
1be8a4e8591cd4ae0bc34100c5c35bc9ccea66d2 | b2bf90c852b6f5258b036dda26edf9b6aa35c4cf | /backend/lizz_2_4_5_dev_19037/urls.py | 4b20ea1f8945360ca166a62d4783ceb901cdce88 | [] | no_license | crowdbotics-apps/lizz-2-4-5-dev-19037 | 1079d95d8ae10b9dd2ff4a4eed0335ce2ca2a9a2 | ae27dc2926763f2c9fd10f9d7a9a3c0426168b84 | refs/heads/master | 2023-02-25T04:54:59.626240 | 2021-02-05T01:32:08 | 2021-02-05T01:32:08 | 336,130,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | """lizz_2_4_5_dev_19037 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "lizz 2-4-5"
admin.site.site_title = "lizz 2-4-5 Admin Portal"
admin.site.index_title = "lizz 2-4-5 Admin"
# swagger
api_info = openapi.Info(
title="lizz 2-4-5 API",
default_version="v1",
description="API documentation for lizz 2-4-5 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
7d4f9fdb9ab6f6463cca8a9564179547b0de0d51 | 83180906386bcb3d0a3062cc575f974c3dc1e0d8 | /tutorials/ACT-R Unit Tutorials/u5_grouped.py | 0bcf370790406f89606c85ddb21c96d0e93a2dbf | [] | no_license | MatthewAKelly/ccmsuite | 8c810ada908e7a957706ca8ebcde9c708f63c0e5 | b1249fcd85fedceb07f67209c368f18c47501cc8 | refs/heads/master | 2020-12-28T21:40:01.070200 | 2018-01-17T22:50:59 | 2018-01-17T22:50:59 | 17,306,060 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,726 | py | import ccm
log=ccm.log()
from ccm.lib.actr import *
class Env(ccm.Model):
result=[]
def say(self,x):
self.result.append(x)
log.x=x
class Grouped(ACTR):
focus=Buffer()
retrieval=Buffer()
memory=Memory(retrieval,threshold=-0.5,latency=1)
noise=DMNoise(memory,0.15)
partial=Partial(memory)
partial.similarity('first','second',-0.5)
partial.similarity('second','third',-0.5)
result=[]
def init():
memory.add('name:group1 parent:list position:first')
memory.add('parent:group1 name:1 position:first')
memory.add('parent:group1 name:2 position:second')
memory.add('parent:group1 name:3 position:third')
memory.add('name:group2 parent:list position:second')
memory.add('parent:group2 name:4 position:first')
memory.add('parent:group2 name:5 position:second')
memory.add('parent:group2 name:6 position:third')
memory.add('name:group3 parent:list position:third')
memory.add('parent:group3 name:7 position:first')
memory.add('parent:group3 name:8 position:second')
memory.add('parent:group3 name:9 position:third')
focus.set('start list')
def recall_first_group(focus='start ?list'):
focus.set('group first ?list')
memory.request('parent:?list position:first')
def start_recall_of_group(focus='group ?gpos ?list',retrieval='name:?groupname'):
memory.request('parent:?groupname position:first')
focus.set('item pos:first groupname:?groupname gpos:?gpos list:?list')
retrieval.clear()
def harvest_first_item(focus='item pos:first groupname:?groupname',retrieval='name:?x'):
self.parent.say(x)
focus.modify(pos='second')
memory.request('parent:?groupname position:second')
retrieval.clear()
def harvest_second_item(focus='item pos:second groupname:?groupname',retrieval='name:?x'):
self.parent.say(x)
focus.modify(pos='third')
memory.request('parent:?groupname position:third')
retrieval.clear()
def harvest_third_item(focus='item pos:third groupname:?groupname',retrieval='name:?x'):
self.parent.say(x)
focus.modify(pos='fourth')
memory.request('parent:?groupname position:fourth')
retrieval.clear()
def second_group(focus='item gpos:first list:?list',memory='error:True'):
memory.request('parent:?list position:second')
focus.set('group second ?list')
retrieval.clear()
def third_group(focus='item gpos:second list:?list',memory='error:True'):
memory.request('parent:?list position:third')
focus.set('group third ?list')
retrieval.clear()
env=Env()
env.m=Grouped()
env.run()
log.result=env.result
| [
"[email protected]"
] | |
bfba49e38303e77949de411ed0088ac40098eabf | 205f41ac0a04d14c8d7995ee66c1e5043f255a2d | /imagebot/pysix.py | 676861e86b76e1ec92598ccd3284e37ac2742e30 | [
"MIT"
] | permissive | piyushd26/imagebot | c4f6a2ac112ec84c268ce2ffa395648935ecf40e | 113ea3344b54502e11c028c1c4c391f60abe5dfe | refs/heads/master | 2022-12-25T07:02:56.151281 | 2020-10-01T14:13:19 | 2020-10-01T14:13:19 | 300,303,316 | 0 | 0 | MIT | 2020-10-01T14:12:00 | 2020-10-01T14:11:59 | null | UTF-8 | Python | false | false | 370 | py | import logging
import sys
ver = sys.version_info[0]
if ver < 3:
_logLevelNames = logging._levelNames
else:
_logLevelNames = {}
for (k, v) in logging._levelToName.items():
_logLevelNames[v] = k
def err_msg(e):
if ver < 3:
return e.message
else:
return e.msg
if ver < 3:
tkinter = 'Tkinter'
else:
tkinter = 'tkinter' | [
"[email protected]"
] | |
d263b08f7c598cd58f9fde23de084f6c4c2f8e7c | 3ca7f62630204be88d2280b9e60b5fecafdd4344 | /Test_task/Task_win.py | 114f518e88c58a9ecbe9231e474cf2110b407184 | [] | no_license | joinmm/Pycharm_python35 | bcee04adfa79da362df5478ac561ad616a1bb828 | eb66901d7936203f70ed2b1811fcd186c690b71f | refs/heads/master | 2022-01-22T05:49:27.725192 | 2018-10-31T06:01:34 | 2018-10-31T06:01:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,544 | py | # -*- coding: utf-8 -*-
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import sys
import os
import time
import Task
import analysis_cfg_fox
import analysis_cfg_china
#import QDesktopServices
import k_mapping
from CommonUtil import RBCommon as CLASS_COMMON_UTIL
class k_Taskwindow(Task.Ui_MainWindow,QWidget):
def __init__(self,parent=None):
super(k_Taskwindow,self).__init__()
self.setupUi(MainWindow)
#设置tabwidget的宽度
self.Mapping_tableWidget.setColumnWidth(1,330)
self.Plugins_tableWidget.setColumnWidth(1,130)
self.TextCMD.hide()
self.RBgroupBox_layoutWidget.hide()
#按钮功能设置
self.IPkey.clicked.connect(self.add_cmdkey)
self.Get_Button.clicked.connect(self.kGetcfg)
self.Mapping_netuse.clicked.connect(self.knetuse)
self.Mapping_add.clicked.connect(self.setBlankToMappingQTable)
self.Mapping_reduce.clicked.connect(self.clearMappingQTableItem)
self.Plugins_add.clicked.connect(self.setBlankToPluginQTable)
self.Plugins_reduce.clicked.connect(self.clearPluginQTableItem)
self.Close_Button.clicked.connect(self.kclose)
self.Input_Button.clicked.connect(self.OpenInput)
self.Output_Button.clicked.connect(self.OpenOutput)
self.Customfile_Button.clicked.connect(self.OpenCustom_files)
self.PreRender_Button.clicked.connect(self.OpenPrerender)
self.Maya_Button.clicked.connect(self.excuteMaya)
self.CMD_Button.clicked.connect(self.excuteCMD)
#self.k_inputPath = sysPath['tiles_path']
self.PlatformMode_CB.currentIndexChanged.connect(self.kchangeModel)
self.C_function_path = ''
self.custom_file = ''
self.k_one = True
self.k_ip_on = True
def kchangeModel(self):
"转换平台模式"
if self.PlatformMode_CB.currentText() in ['Fox']:
self.RBgroupBox_layoutWidget2.hide()
self.RBgroupBox_layoutWidget.show()
elif self.PlatformMode_CB.currentText() in ['China']:
self.RBgroupBox_layoutWidget2.show()
self.RBgroupBox_layoutWidget.hide()
def kGetcfg(self):
"""根据用户ID 任务ID分析数据 """
k_platform = ''
k_taskID = self.TaskID_lineEdit.text()
k_useID = self.UserID_lineEdit.text()
if self.PlatformMode_CB.currentText() in ['Fox']:
k_platform_group= [self.W2rb,self.W9rb,self.GPUrb]
for i in k_platform_group:
if i.isChecked():
k_platform = i.objectName()
elif self.PlatformMode_CB.currentText() in ['China']:
k_platform_group = [self.c_W2rb, self.c_W3rb, self.c_W4rb, self.c_W9rb, self.c_GPUrb]
for i in k_platform_group:
if i.isChecked():
k_platform = i.objectName()
#没有设置好 盘符 任务ID 和 用户ID 弹出报错
if not k_platform or not k_taskID or not k_useID:
self.msg('No specified Platform or ID')
else:
if self.PlatformMode_CB.currentText() in ['Fox']:
# 实例 配置json脚本
cfg = analysis_cfg_fox.analysisCfg(k_platform,k_taskID,k_useID)
elif self.PlatformMode_CB.currentText() in ['China']:
cfg = analysis_cfg_china.analysisCfg(k_platform, k_taskID, k_useID)
if cfg.k_jsonerror:
self.msg('cannot find cfg.json')
else:
#填入plugins数据
Plugins = cfg.analysisPlugins()
self.setItemToQTableWidget(self.Plugins_tableWidget,Plugins)
print('Finish get Plugins analysis')
#填入maya版本号
Mayaver = cfg.analysisSoft()
self.Version_lineEdit.setText(Mayaver)
print('Finish get Mayaver analysis')
#填入Mapping数据
Mapping = cfg.analysisMapping()
self.setItemToQTableWidget(self.Mapping_tableWidget, Mapping)
print('Finish get Mapping analysis')
Aspath= cfg.analysisPath()
#maya文件目录
self.k_inputPath = Aspath[0]
#maya输出图片目录
self.k_outputPath = Aspath[1]
#自定义文件夹目录
self.k_custompath = Aspath[2]
#prerender文件夹目录
self.C_script_path = Aspath[3]
#B盘路径
self.B_path = Aspath[4]
print('Finish get Path analysis')
# dirmap的字典
self.k_dirmap = ''
if self.PlatformMode_CB.currentText() in ['Fox']:
#自定义function的路径 (mayaplugin路径,自定义py文件路径)
self.C_function_path = cfg.C_function_path
print(self.C_function_path)
self.function_path = cfg.function_path
print(self.function_path)
#自定义文件路径
self.custom_file = os.path.normpath(os.path.join(self.C_function_path,'CustomConfig.py'))
print('Finish get Fox customfile')
elif self.PlatformMode_CB.currentText() in ['China']:
#自定义function的路径 (mayaplugin路径,自定义py文件路径)
self.function_path = cfg.function_path
print(self.function_path)
#自定义文件路径
self.custom_file = os.path.normpath(os.path.join(self.B_path,'custom_config',\
k_useID,'RayvisionCustomConfig.py'))
print('Finish get China customfile')
if 'mappings' in cfg.server_info:
self.k_dirmap = cfg.server_info['mappings']
print(self.k_dirmap)
def OpenInput(self):
""" Input按钮功能 """
#QDesktopServices.openUrl(QUrl(self.k_inputPath))
if os.path.exists(self.k_inputPath):
os.startfile(self.k_inputPath)
else:
self.msg('%s path is not exists' %self.k_inputPath)
def OpenOutput(self):
""" Output按钮功能 """
if os.path.exists(self.k_outputPath):
os.startfile(self.k_outputPath)
else:
#不存在路径时 弹出窗口
self.msg('%s path is not exists' %self.k_outputPath)
def OpenCustom_files(self):
"""Preferences按钮功能"""
if os.path.exists(self.k_custompath):
os.startfile(self.k_custompath)
else:
self.msg('%s path is not exists' %self.k_custompath)
def OpenPrerender(self):
"""prerender按钮功能"""
if os.path.exists(self.C_script_path):
os.startfile(self.C_script_path)
else:
self.msg('%s path is not exists' %self.C_script_path)
def knetuse(self):
"""根据 mapping的内容 映射盘符"""
self.getData()
k_mapping.k_mapping(self.getMappingQtab)
def kclose(self):
"""close按钮功能"""
app.exit()
def getData(self):
"""获取窗口内 Plugins Mapping maya版本 的数据"""
#获取Plugins的数据
self.getPluginsQtab = self.getItemfromQTableWidget(self.Plugins_tableWidget)
print (self.getPluginsQtab)
# 获取Mapping的数据
self.getMappingQtab = self.getItemfromQTableWidget(self.Mapping_tableWidget)
print (self.getMappingQtab)
# 获取maya版本数据
self.getMayaVer = self.Version_lineEdit.text()
print (self.getMayaVer)
def excuteBefore(self):
#获取窗口内的信息
self.getData()
plugin_cfg_file = {u'cg_name':'Maya','cg_version':self.getMayaVer,'plugins':self.getPluginsQtab}
if os.path.exists(self.C_function_path):
print('import C_function_path')
sys.path.append(self.C_function_path)
#__import__('MayaPlugin')
import MayaPlugin
elif os.path.exists(self.function_path):
print('import function_path')
print (self.function_path)
sys.path.append(self.function_path)
#__import__('MayaPlugin')
import MayaPlugin
print('plugin_cfg_file is %s' % plugin_cfg_file)
print('self.custom_file is %s' %self.custom_file)
maya_plugin = MayaPlugin.MayaPlugin(plugin_cfg_file,[self.custom_file])
maya_plugin.config()
def excuteMaya(self):
"""执行maya按钮"""
if self.k_one:
self.excuteBefore()
self.maya_dirmap()
self.k_one = False
cmd_str = r"C:\Program Files\Autodesk\Maya%s\bin\maya.exe" % (self.getMayaVer)
#cmd_str = r"D:\Autodesk\Maya2017\bin\maya.exe"
os.startfile('"' + cmd_str + '"')
print ('startfile %s' %cmd_str)
def excuteCMD(self):
"""执行cmd按钮"""
self.TextCMD.show()
CMDText = self.TextCMD.toPlainText()
if CMDText:
print ('excute %s' %CMDText)
if self.k_one:
self.excuteBefore()
self.maya_dirmap()
self.k_one = False
CLASS_COMMON_UTIL.cmd(CMDText,continue_on_error=True, my_shell=True)
def getItemfromQTableWidget(self,QTablename):
"""获取QTab内每个格子的数据,并组成字典,QTablename输入的数据为QTab的名字"""
#获取行数与列数
getRow=QTablename.rowCount()
getcolumn = QTablename.columnCount()
QTabData = {}
for row in range(getRow):
# 获取Qtab内的每个格子的数据
for column in range(getcolumn):
#先判断格子是否为空
if QTablename.item(row, column):
getText = QTablename.item(row, column).text()
#print (getText)
#将数据放入字典
if not column and getText:
QTabData[getText] = ''
elif column == 1:
# 先判断格子是否为空
if QTablename.item(row, column-1):
getText_key = QTablename.item(row, column-1).text()
QTabData[getText_key] = getText
return QTabData
def setItemToQTableWidget(self,QTablename,cfg_dic):
"""为QTab的第一行加入数据,QTablename=QTab的名称,cfg_dic=数据字典 key=column 0 value=column 1"""
for Plugin in cfg_dic:
#将数据转成 QTableWidgetItem
Plugins_Itemname = QTableWidgetItem(Plugin)
Plugins_Itemversion = QTableWidgetItem(cfg_dic[Plugin].replace('/','\\'))
#将数据塞入第一行
QTablename.insertRow(0)
QTablename.setItem(0, 0, Plugins_Itemname)
QTablename.setItem(0, 1, Plugins_Itemversion)
#选择的时候 选择一整行
QTablename.setSelectionBehavior(QAbstractItemView.SelectRows)
def setBlankToMappingQTable(self):
"""加入空白行"""
self.Mapping_tableWidget.insertRow(0)
#选择的时候 选择一整行
self.Mapping_tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
def clearMappingQTableItem(self):
#self.k_tabWidget.clearContents()
krow=self.Mapping_tableWidget.currentRow()
self.Mapping_tableWidget.removeRow(krow)
def setBlankToPluginQTable(self):
"""加入空白行"""
self.Plugins_tableWidget.insertRow(0)
#选择的时候 选择一整行
self.Plugins_tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
def clearPluginQTableItem(self):
#self.k_tabWidget.clearContents()
krow=self.Plugins_tableWidget.currentRow()
self.Plugins_tableWidget.removeRow(krow)
def msg(self,message):
kMessage = QMessageBox.information(self, # 使用infomation信息框
"出问题啦,请注意!!!",
message,
QMessageBox.Yes)
def add_cmdkey(self):
"""添加IP 凭据"""
keyplan1 = {'user': 'enfuzion', 'password': 'ruiyun2016'}
keyplan2 = {'user': 'enfuzion', 'password': 'ruiyun2017'}
keyplan3 = {'user': 'enfuzion', 'password': 'Raywing@host8'}
keyplan4 = {'user': 'enfuzion', 'password': 'Raywing@host8-2'}
keyplan5 = {'user': 'enfuzion', 'password': 'Raywing@host8-9'}
keyplan6 = {'user': 'tdadmin', 'password': 'Ray@td852'}
keyplan7 = {'user': 'administrator', 'password': 'Ruiyun@2016'}
k_ipsort = {'w2': ['10.60.100.101','10.60.100.102','10.60.100.103','10.60.100.104','10.60.200.101', \
'10.60.200.102','10.60.200.103','10.60.200.104','10.60.100.201','10.60.100.202', \
'10.60.200.201','10.60.200.202'],\
'w3': ['10.30.100.101','10.30.100.102', '10.30.100.102', '10.30.100.201', '10.30.100.202'], \
'w4': ['10.40.100.101', '10.40.100.102', '10.40.100.103', '10.40.100.201', '10.40.100.202'], \
'w9': ['10.80.100.101','10.80.100.102', '10.80.100.103', '10.80.100.104', '10.70.242.101', '10.70.242.102', \
'10.80.100.201', '10.80.100.202', '10.80.100.203','10.70.242.201'],\
'gpu': ['10.90.100.101', '10.90.100.102', '10.90.100.103', '10.90.100.201', '10.90.100.202'],\
'B_plugins':['10.60.100.151','10.60.200.150','10.60.200.151','10.60.100.152','10.80.243.50','10.80.243.51',\
'10.30.100.151','10.30.100.152','10.40.100.151','10.40.100.152','10.90.96.51']\
}
self.k_cmdkey = {}
for ip_key in k_ipsort:
if ip_key in ['w2']:
for ip_value in k_ipsort[ip_key]:
self.k_cmdkey.update({ip_value: keyplan4})
if ip_key in ['gpu']:
for ip_value in k_ipsort[ip_key]:
self.k_cmdkey.update({ip_value: keyplan1})
if ip_key in ['w3', 'w4']:
for ip_value in k_ipsort[ip_key]:
self.k_cmdkey.update({ip_value: keyplan3})
if ip_key in ['w9']:
for ip_value in k_ipsort[ip_key]:
self.k_cmdkey.update({ip_value: keyplan5})
if ip_key in ['B_plugins']:
for ip_value in k_ipsort[ip_key]:
self.k_cmdkey.update({ip_value: keyplan6})
for k_ip in self.k_cmdkey:
set_cmdkey = 'cmdkey /add:{0} /user:{1} /password:{2}'.format(k_ip,self.k_cmdkey[k_ip]['user'],self.k_cmdkey[k_ip]['password'])
CLASS_COMMON_UTIL.cmd(set_cmdkey, continue_on_error=True, my_shell=True)
print('添加全平台 地址凭据完毕!')
def maya_dirmap(self):
"""在当前路径生成 usersetup.py文件"""
current_file = os.path.realpath(__file__)
current_path = os.path.dirname(current_file)
k_usersetup_mel = os.path.normpath(os.path.join(current_path,'userSetup.py'))
print(k_usersetup_mel)
if self.k_dirmap:
with open(k_usersetup_mel, "w") as f:
f.write("import maya.cmds as cmds\n")
f.write("cmds.dirmap( en=True )\n")
for i in self.k_dirmap:
if not i.startswith("$"):
old_path = i
new_path = self.k_dirmap[i]
f.write("cmds.dirmap ( m=('%s' , '%s'))\n" % (old_path,
new_path))
f.write("print('Mapping successfully')\n")
_MAYA_SCRIPT_PATH = os.environ.get('MAYA_SCRIPT_PATH')
os.environ['MAYA_SCRIPT_PATH'] = (_MAYA_SCRIPT_PATH + r";" if _MAYA_SCRIPT_PATH else "") + current_path
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
kwin = k_Taskwindow()
MainWindow.setWindowTitle(u'Maya 环境配置工具 v1.4')
MainWindow.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
20485de31fef16693a9db32635516cddb05a8c2c | 6b6f68f507746e3e39b0e8789af5d044e27d6b0a | /Math/0172_FactorialTrailingZeroes_E.py | 147075d83adb6181237a370b650f93912329e869 | [] | no_license | PFZ86/LeetcodePractice | bb0012d8b3120451dda1745875836278d3362e45 | 6db9db1934bc0a8142124d8b56bf6c07bdf43d79 | refs/heads/master | 2021-08-28T08:43:27.343395 | 2021-08-17T20:38:32 | 2021-08-17T20:38:32 | 230,925,656 | 1 | 1 | null | 2021-08-17T20:38:32 | 2019-12-30T14:01:27 | Python | UTF-8 | Python | false | false | 315 | py | # https://leetcode.com/problems/factorial-trailing-zeroes/
# Solution 1:
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
while n:
n /= 5
result += n
return result
| [
"[email protected]"
] | |
e4a95cd7e79a23db19fd4e93a195f3590509cd82 | 7f8f49e77c3d71531688c41e5147c75257a661f6 | /scripts/mpl_stylesheet.py | 83c9301c5800b94fbc18c7a6f8a4e2ab988f1ced | [] | no_license | francosimonetti/trans-eqtl-pipeline | 4ef83e007ee9d4e5aeb8650b30e4684b4150d6e8 | 877b835d832649056cd80cbb7feeedcebebcfb69 | refs/heads/master | 2023-06-19T18:40:55.354166 | 2021-03-02T20:34:29 | 2021-03-02T20:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,144 | py | import matplotlib
from matplotlib import cycler
## Resources from:
## https://matplotlib.org/users/customizing.html
## How to change color and plot styles?
## https://matplotlib.org/users/dflt_style_changes.html
## matplotlib.rcParams[] =
def banskt_presentation(black = '#333333', linewidth = 2, ticksize = 8, fontsize = 28, padding = 10, fontfamily = 'latex', colors = 'banskt'):
if colors == 'banskt':
mcolors = banskt_colors()
elif colors == 'kelly':
mcolors = kelly_colors()
if fontfamily == 'latex':
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage[sfdefault,scaled=.85, lining]{FiraSans}',
r'\usepackage[cmintegrals]{newtxsf}',
r'\usepackage{microtype}',
]
matplotlib.rcParams['text.usetex'] = True
elif fontfamily == 'latex-clearsans':
matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage[scaled=.86]{ClearSans}',
r'\usepackage[libertine]{newtxmath}',
r'\usepackage{microtype}',
]
elif fontfamily == 'system':
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'DejaVu Sans'
matplotlib.rcParams['mathtext.fontset'] = 'stixsans'
# Size
matplotlib.rcParams['figure.figsize'] = 8, 8
# Fonts
matplotlib.rcParams['font.size'] = fontsize
matplotlib.rcParams['text.color'] = black
matplotlib.rcParams['axes.titlesize'] = fontsize * 1.2
matplotlib.rcParams['axes.labelsize'] = fontsize
matplotlib.rcParams['axes.labelweight'] = 'normal'
matplotlib.rcParams['axes.labelcolor'] = black
matplotlib.rcParams['xtick.labelsize'] = fontsize
matplotlib.rcParams['ytick.labelsize'] = fontsize
matplotlib.rcParams['legend.fontsize'] = fontsize
# Axes
#matplotlib.rcParams['axes.titlepad'] = 50
matplotlib.rcParams['axes.edgecolor'] = black
matplotlib.rcParams['axes.facecolor'] = 'white'
matplotlib.rcParams['axes.labelpad'] = 20
matplotlib.rcParams['axes.linewidth'] = linewidth
# Legend
matplotlib.rcParams['legend.facecolor'] = 'inherit'
matplotlib.rcParams['legend.edgecolor'] = black
matplotlib.rcParams['legend.frameon'] = False
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.scatterpoints'] = 1
matplotlib.rcParams['legend.markerscale'] = 1.0
# Dimensions as fraction of fontsize
matplotlib.rcParams['legend.borderpad'] = 0
matplotlib.rcParams['legend.labelspacing'] = 0.3
matplotlib.rcParams['legend.handlelength'] = 0.5
matplotlib.rcParams['legend.handleheight'] = 0.9
matplotlib.rcParams['legend.handletextpad'] = 0.5
# Ticks
matplotlib.rcParams['xtick.major.top'] = False
matplotlib.rcParams['xtick.major.bottom'] = True
matplotlib.rcParams['xtick.minor.top'] = False
matplotlib.rcParams['xtick.minor.bottom'] = False
matplotlib.rcParams['ytick.major.left'] = True
matplotlib.rcParams['ytick.major.right'] = False
matplotlib.rcParams['ytick.minor.left'] = False
matplotlib.rcParams['ytick.minor.right'] = False
matplotlib.rcParams['xtick.major.size'] = ticksize
matplotlib.rcParams['xtick.minor.size'] = 2 * ticksize / 3.0
matplotlib.rcParams['ytick.major.size'] = ticksize
matplotlib.rcParams['ytick.minor.size'] = 2 * ticksize / 3.0
matplotlib.rcParams['xtick.major.pad'] = padding
matplotlib.rcParams['xtick.minor.pad'] = padding
matplotlib.rcParams['ytick.major.pad'] = padding
matplotlib.rcParams['ytick.minor.pad'] = padding
matplotlib.rcParams['xtick.major.width'] = linewidth
matplotlib.rcParams['xtick.minor.width'] = linewidth
matplotlib.rcParams['ytick.major.width'] = linewidth
matplotlib.rcParams['ytick.minor.width'] = linewidth
matplotlib.rcParams['xtick.color'] = black
matplotlib.rcParams['ytick.color'] = black
# Color cycle
matplotlib.rcParams['axes.prop_cycle'] = cycler('color', mcolors)
# Histogram
matplotlib.rcParams['hist.bins'] = 20
# Patches
# matplotlib.rcParams['patch.facecolor'] = mcolors[0] # doesn't have any effect, comes from prop_cycle
matplotlib.rcParams['patch.edgecolor'] = black
matplotlib.rcParams['patch.linewidth'] = linewidth / 2
matplotlib.rcParams['patch.force_edgecolor'] = True
# For scatter plot, show only left and bottom axes
matplotlib.rcParams['axes.spines.left'] = True
matplotlib.rcParams['axes.spines.bottom'] = True
matplotlib.rcParams['axes.spines.top'] = True
matplotlib.rcParams['axes.spines.right'] = True
return
def banskt_colors():
banskt_colors_hex = [
'#2D69C4', # blue
'#CC2529', # red
'#93AA00', # Vivid Yellowish Green
'#535154', # gray
'#6B4C9A', # purple
'#FFB300', # Vivid Yellow
'#922428', # dark brown
'#948B3D', # olive
]
return banskt_colors_hex
def kelly_colors():
kelly_colors_hex = [
'#FFB300', # Vivid Yellow
'#803E75', # Strong Purple
'#FF6800', # Vivid Orange
'#A6BDD7', # Very Light Blue
'#C10020', # Vivid Red
'#CEA262', # Grayish Yellow
'#817066', # Medium Gray
# The following don't work well for people with defective color vision
'#007D34', # Vivid Green
'#F6768E', # Strong Purplish Pink
'#00538A', # Strong Blue
'#FF7A5C', # Strong Yellowish Pink
'#53377A', # Strong Violet
'#FF8E00', # Vivid Orange Yellow
'#B32851', # Strong Purplish Red
'#F4C800', # Vivid Greenish Yellow
'#7F180D', # Strong Reddish Brown
'#93AA00', # Vivid Yellowish Green
'#593315', # Deep Yellowish Brown
'#F13A13', # Vivid Reddish Orange
'#232C16', # Dark Olive Green
]
return kelly_colors_hex
| [
"[email protected]"
] | |
dfc4f1546e396f096141ac3c4bee2450af2b669a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/g8a.py | 11499735584b852d0a13bce338767c285daba558 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'g8A':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.