blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
โ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
โ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d6b8983002b4ac5f964d347fa89ea307ea1b1d9b | c3126ad2a8b6c48419892ead051b0a6dbdad6efc | /route/__init__.py | c6aab6985688ac931e8e7defecd529546ebbbcc0 | [] | no_license | xieyalong/python_tronado_web | c927df649c24041451f8c4aa1cd99bddfa0b8800 | 571187a54da27d8aa460268b470481fa7a758b08 | refs/heads/master | 2020-09-14T11:54:35.448127 | 2020-01-09T10:02:36 | 2020-01-09T10:02:36 | 223,121,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | #่ฏดๆ๏ผ่ทฏ็ฑ | [
"123456"
] | 123456 |
83d8e7ca934db2de8ed827172cff2f8794ca29de | 685f4474699d769dae88537c69f5517ac13a8431 | /EL258.py | b7ef048ddcb0d6128037a7c929726637dd19012c | [] | no_license | Pumafied/Project-Euler | 7466f48e449b7314598c106398c0be0424ae72d5 | 0c3e80a956893ce1881a9694131d52b156b9d3d8 | refs/heads/master | 2016-09-05T22:45:09.733696 | 2013-04-20T04:46:48 | 2013-04-20T04:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | # A sequence is defined as:
# gk = 1, for 0 k 1999
# gk = gk-2000 + gk-1999, for k 2000.
# Find gk mod 20092010 for k = 1018. | [
"[email protected]"
] | |
299b53e0d2dda81682265b3cd1289d5cb5b5425a | 5f806ddab1ca60e3cbd576f57768940bbb5a62c3 | /sample/__main__.py | b79fce6d3c2b9325b967544f567746a87a0e77fc | [
"BSD-3-Clause"
] | permissive | pyapp-org/pyapp-messaging | 1f56c3a1ab61df196c44cb7fc320fad0e8796526 | 159dede0d9a9823997dd37641cc2f46b139d24e3 | refs/heads/develop | 2022-10-10T15:33:58.625747 | 2020-08-20T03:01:40 | 2020-08-20T03:01:40 | 195,380,641 | 1 | 0 | NOASSERTION | 2022-09-29T00:02:25 | 2019-07-05T09:24:28 | Python | UTF-8 | Python | false | false | 67 | py | from sample.cli import main
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
e029887d12b7ae495e426e2301094b410b3f5302 | 46f1e7c1d81f271b2a3357b2e133049893725d82 | /Solution/ๅจๆ่งๅ/ไธ็ปด/70. ็ฌๆฅผๆขฏ/ๅจๆ่งๅ1.py | 5f06161b6ab8f5ae526b7d050d229ba88cb1390b | [] | no_license | WeiS49/leetcode | 203fed67773592a45186c99fd6a2f16dff426c3e | 76ddcec959c18164ae7efb564f2287981f5ab5ca | refs/heads/master | 2023-08-13T16:06:45.152951 | 2021-09-26T15:36:06 | 2021-09-26T15:36:06 | 319,033,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #
# @lc app=leetcode.cn id=70 lang=python3
#
# [70] ็ฌๆฅผๆขฏ
# ๅจๆ่งๅ, ๆฒกๆ็จๅฎ้
ๆไฝๅป่ฟ็ฎf(n)=f(n-1)+f(n-2)
# ่ๆฏๅจๆพๅฐ้ป่พๅ, ไฝฟ็จๅ ๆณๅฎๆๆนๆณๆฐ้็่ฎก็ฎ
# ๆถ้ดๅคๆๅบฆ: ๅๅฑๅพช็ฏ, O(n)
# ็ฉบ้ดๅคๆๅบฆ: ๅ ็จ็ฉบ้ด้ๆฅผๆขฏๆฐn็บฟๆงๅๅ, O(n)
# @lc code=start
class Solution:
def climbStairs(self, n: int) -> int:
dp = [0] * (n + 1) # ๅๅปบๆฐ็ป, ไฟๅญๆฏไธ็บง็ๆญฅๆฐ, ่่ๅฐdp[0], ๆไปฅ้ฟๅบฆ+1
dp[0] = dp[1] = 1 # ไธบไปไน่ฆ่ฎพ็ฝฎdp[0]? ็ฌฌ0็บงๆฅผๆขฏๅ
for i in range(2, n + 1): # ไป็ฌฌ2็บงๅฐ็ฌฌn็บงๆฅผๆขฏๅผๅงไฝฟ็จ้ๆจๅผ
dp[i] = dp[i - 1] + dp[i - 2] # ็จๅ ๆณไปฃๆฟไบ้ๅฝ็ๆง่ก
return dp[-1] # ่ฟๅๆฐ็ปไธญๆๅไธไธชๅ
็ด , ๅณไธบๆ็ป็ๆนๆณๆฐ
# @lc code=end
| [
"[email protected]"
] | |
92e4f8e251b201a9642253fbf8807dba64c8fb89 | f3b99fdd48bc38dbe5d972c07dcbce204e5cff2b | /Projetos_Django/project/products/views.py | 97b0c6e78e026d3b71de54c1569823a558248cc6 | [] | no_license | Aleleonel/Python_codes | 5b66251d45bbf3678451b6380ca4d5a81f416e25 | fd535da3f2f4c510b4e85f9ec1dee59c9d07ffcb | refs/heads/master | 2020-04-03T16:44:24.923059 | 2018-12-06T00:16:21 | 2018-12-06T00:16:21 | 155,416,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | from django.shortcuts import render, redirect
from .models import Product
from .forms import ProductForm
def list_products(request):
products = Product.objects.all()
return render(request, 'products.html', {'products': products})
def create_product(request):
form = ProductForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('list_products')
return render(request, 'products-form.html', {'form': form})
def update_product(request, id):
product = Product.objects.get(id=id)
form = ProductForm(request.POST or None, instance=product)
if form.is_valid():
form.save()
return redirect('list_products')
return render(request, 'products-form.html', {'form': form, 'product': product})
def delete_product(request, id):
product = Product.objects.get(id=id)
if request.method == 'POST':
product.delete()
return redirect('list_products')
return render(request, 'prod-delete-confirm.html', {'product':product}) | [
"[email protected]"
] | |
9aa8968084570663211a42b8fd02fb7b0d5d36e1 | 0b9802d039ffee38fd666659719034cf7e42c04b | /faker/factory.py | bdf466c29ae071633f3150b19b9ff187d06e7c07 | [
"MIT"
] | permissive | SysGrove/faker | e1f633f3231ee94fdb82a15518ec8ecf899c5385 | 61c1aa3eeece341c0984e95cd3128bcdf3797a78 | refs/heads/master | 2021-01-16T18:42:56.837210 | 2013-07-23T14:46:09 | 2013-07-23T14:46:09 | 11,422,041 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | import sys
from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES
from faker import Generator
from faker import providers
class Factory(object):
@classmethod
def create(cls, locale=None, providers=None ):
# fix locale to package name
locale = locale.replace('-','_') if locale else DEFAULT_LOCALE
if '_' in locale:
locale = locale[:2] + locale[2:].upper()
if locale not in AVAILABLE_LOCALES:
raise AttributeError('Invalid configuration for faker locale "%s"' % locale)
providers = providers or DEFAULT_PROVIDERS
generator = Generator()
for provider in providers:
providerClass = cls._getProviderClass( provider, locale )
generator.addProvider( providerClass(generator) )
return generator
@classmethod
def _getProviderClass(cls, provider, locale=''):
providerClass = cls._findProviderClass( provider, locale )
if providerClass:
return providerClass
if locale and locale != DEFAULT_LOCALE:
# fallback to default locale
providerClass = cls._findProviderClass( provider, DEFAULT_LOCALE )
if providerClass:
return providerClass
# fallback to no locale
providerClass = cls._findProviderClass( provider )
if providerClass:
return providerClass
raise ValueError('Unable to find provider "%s" with locale "%s"' % (provider, locale))
@classmethod
def _findProviderClass(cls, provider, locale=''):
path = "{providers}{lang}.{provider}".format(
providers=providers.__package__,
lang='.' + locale if locale else '',
provider=provider
)
try:
__import__(path)
except ImportError:
return None
return sys.modules[path].Provider
| [
"[email protected]"
] | |
c02481c7ac213d1465183d1cc02ade2e36da39ae | 076d4b8a007fd01e41b357342aad100c87367562 | /venv/bin/rst2xetex.py | 93ea0bba24a5121b9d01203e0a5e6f59a88857ed | [] | no_license | AlexanderMcNulty/publicpolls | b755c0922949018125d5eb18ac8b07fa087f97b7 | 56f340f5d9d044af65262f4099f146d6e22af754 | refs/heads/master | 2020-04-15T04:19:59.931533 | 2019-01-07T04:31:03 | 2019-01-07T04:31:03 | 164,378,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | #!/home/ammc/pyramid/publicpolls/venv/bin/python3
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| [
"[email protected]"
] | |
fb4850984909d60534cbe43c3bce4336a65383b1 | 76dd8343cb5d04fec631c1711a5642e6f83d8ae2 | /python/oneflow/test/modules/test_roll.py | 6f378f8cf56fe1fa6d4ba64262039cbc0eb7ccdc | [
"Apache-2.0"
] | permissive | weinapianyun/oneflow | 56c580ca2d6019f7d3e184a476ee9cb0699eea3e | 748501a5383f50bf9f3a5d3b3da81d4f31b425de | refs/heads/master | 2023-09-03T05:40:03.313826 | 2021-11-22T08:44:34 | 2021-11-22T08:44:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,875 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import GenArgList
import torch
def _test_roll(test_case, device):
torch_x = torch.rand(
(2, 3, 5, 10, 20), device=device, dtype=torch.float32, requires_grad=True
)
torch_grad = torch.rand_like(torch_x, device=device)
shifts = (
np.random.randint(-100, 100),
np.random.randint(-100, 100),
np.random.randint(-100, 100),
np.random.randint(-100, 100),
)
dims = (0, 2, 3, 4)
torch_y = torch.roll(torch_x, shifts, dims)
torch_y.backward(torch_grad)
of_x = flow.tensor(
torch_x.detach().cpu().numpy(),
device=device,
dtype=flow.float32,
requires_grad=True,
)
of_y = flow.roll(of_x, shifts, dims)
of_grad = flow.tensor(torch_grad.cpu().numpy(), device=device, dtype=flow.float32)
of_y.backward(of_grad)
test_case.assertTrue(np.array_equal(of_y.numpy(), torch_y.detach().cpu().numpy()))
test_case.assertTrue(np.array_equal(of_x.grad.numpy(), torch_x.grad.cpu().numpy()))
def _test_roll_single_dims(test_case, device):
torch_x = torch.rand(
(2, 3, 5, 10, 20), device=device, dtype=torch.float32, requires_grad=True
)
torch_grad = torch.rand_like(torch_x, device=device)
shifts = np.random.randint(-100, 100)
dims = np.random.randint(0, 4)
torch_y = torch.roll(torch_x, shifts, dims)
torch_y.backward(torch_grad)
of_x = flow.tensor(
torch_x.detach().cpu().numpy(),
device=device,
dtype=flow.float32,
requires_grad=True,
)
of_y = flow.roll(of_x, shifts, dims)
of_grad = flow.tensor(torch_grad.cpu().numpy(), device=device, dtype=flow.float32)
of_y.backward(of_grad)
test_case.assertTrue(np.array_equal(of_y.numpy(), torch_y.detach().cpu().numpy()))
test_case.assertTrue(np.array_equal(of_x.grad.numpy(), torch_x.grad.cpu().numpy()))
def _test_roll_none_dims(test_case, device):
torch_x = torch.rand(
(2, 3, 5, 10, 20), device=device, dtype=torch.float32, requires_grad=True
)
torch_grad = torch.rand_like(torch_x, device=device)
shifts = np.random.randint(-100, 100)
dims = None
torch_y = torch.roll(torch_x, shifts, dims)
torch_y.backward(torch_grad)
of_x = flow.tensor(
torch_x.detach().cpu().numpy(),
device=device,
dtype=flow.float32,
requires_grad=True,
)
of_y = flow.roll(of_x, shifts, dims)
of_grad = flow.tensor(torch_grad.cpu().numpy(), device=device, dtype=flow.float32)
of_y.backward(of_grad)
test_case.assertTrue(np.array_equal(of_y.numpy(), torch_y.detach().cpu().numpy()))
test_case.assertTrue(np.array_equal(of_x.grad.numpy(), torch_x.grad.cpu().numpy()))
@flow.unittest.skip_unless_1n1d()
class TestRoll(flow.unittest.TestCase):
def test_expand_compare_with_torch(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_roll,
_test_roll_single_dims,
_test_roll_none_dims,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
985a0b85e8c772412f08772053bd8a1972ca244d | 0494c9caa519b27f3ed6390046fde03a313d2868 | /build/masters/master.client.drmemory/master.cfg | 6ba0741da8090cb099e1a2646e67909aef6a3892 | [] | no_license | mhcchang/chromium30 | 9e9649bec6fb19fe0dc2c8b94c27c9d1fa69da2c | 516718f9b7b95c4280257b2d319638d4728a90e1 | refs/heads/master | 2023-03-17T00:33:40.437560 | 2017-08-01T01:13:12 | 2017-08-01T01:13:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,506 | cfg | # -*- python -*-
# ex: set syntax=python:
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# It has one job: define a dictionary named BuildmasterConfig. This
# dictionary has a variety of keys to control different aspects of the
# buildmaster. They are documented in docs/config.xhtml .
from buildbot.changes import svnpoller
from buildbot.status.mail import MailNotifier
from buildbot import scheduler
from master import master_utils
from master import slaves_list
from master.factory import drmemory_factory
from master.factory import chromium_factory
import config
import master_site_config
ActiveMaster = master_site_config.DrMemory
MASTER_HOST = ActiveMaster.master_host
MAIL_NOTIFIER = ActiveMaster.is_production_host
MASTER_PORT = ActiveMaster.master_port
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
config.DatabaseSetup(c, require_dbconfig=ActiveMaster.is_production_host)
# 'slavePortnum' defines the TCP port to listen on. This must match the value
# configured into the buildslaves (with their --master option)
c['slavePortnum'] = ActiveMaster.slave_port
slaves = slaves_list.SlavesList('slaves.cfg', 'DrMemory')
####### CHANGESOURCES
from buildbot.changes.svnpoller import SVNPoller
drm_poller = SVNPoller(svnurl=drmemory_factory.drm_svnurl,
pollinterval=60,
histmax=50,
svnbin='/usr/bin/svn',
)
c['change_source'] = [ drm_poller ]
####### SCHEDULERS
## configure the Schedulers
MAINBOTS = [
["win", ["xp", "vista_x64", "7_x64", "8_x64"]],
["linux", ["lucid_x64"]],
]
DRM_BUILDERS = []
for (os, os_versions) in MAINBOTS:
for version in os_versions:
DRM_BUILDERS.append("%s-%s-drm" % (os, version))
# We expect these bots to be green. xp and vista aren't green yet, and the
# stability bots have regressed.
STABLE_BUILDERS = ["win-builder", "linux-builder",
"win-7_x64-drm", "linux-lucid_x64-drm",
"linux-cr-builder", "linux-cr",
"win7-cr-builder", "win7-cr",
# If we add more *-cr bots we could consider sharing a
# single cr build, but these build just once a week, and
# only target component-config tests (~15 mins to build),
# so sharing wouldn't buy us much.
"win8-cr-builder", "win8-cr",
]
c['schedulers'] = []
c['schedulers'].append(scheduler.Scheduler(
name="all",
branch=None,
treeStableTimer=60,
builderNames=(["win-builder"] +
["linux-builder"] +
DRM_BUILDERS)))
c['schedulers'].append(scheduler.Periodic(
name="periodic",
branch=None,
periodicBuildTimer=12*60*60, # Every 12 hours
builderNames=DRM_BUILDERS))
c['schedulers'].append(scheduler.Periodic(
name="periodic_stable",
branch=None,
periodicBuildTimer=2*60*60, # Every 2 hours
builderNames=["win8-cr", "win7-cr", "linux-cr"]))
c['schedulers'].append(scheduler.Nightly(
name='weekly',
branch=None,
dayOfWeek=6, # Monday=0, Sunday=6
properties={'is_weekly': True},
builderNames=['win8-cr-builder',
'win7-cr-builder',
'linux-cr-builder']))
####### BUILDERS
F_DRM = drmemory_factory.CreateDrMFactory
F_DRMPACK = drmemory_factory.CreateDrMPackageFactory
F_WIN_CR = drmemory_factory.CreateWinChromeFactory
F_LINUX_CR = drmemory_factory.CreateLinuxChromeFactory
def win_cr_build():
return chromium_factory.ChromiumFactory('src/build', 'win32')
f_win_cr_builder = win_cr_build().ChromiumFactory(
target='Debug',
slave_type='Builder',
project='all.sln;chromium_builder_dbg_drmemory_win',
factory_properties={
'lkgr': True, # Try to avoid bad Chrome revs.
'gclient_env': {
'GYP_DEFINES': 'build_for_tool=drmemory component=shared_library'
},
'package_pdb_files': True,
},
)
def linux_cr_build():
return chromium_factory.ChromiumFactory('src/out', 'linux2')
f_linux_cr_builder = linux_cr_build().ChromiumFactory(
target='Release',
slave_type='Builder',
options=(['--build-tool=ninja', '--compiler=goma'] +
drmemory_factory.LINUX_CHROME_TESTS),
factory_properties={
'lkgr': True, # Try to avoid bad Chrome revs.
'gclient_env': {
'GYP_DEFINES': 'build_for_tool=drmemory component=shared_library',
},
},
)
c['builders'] = []
# Bots that run on every commit: the pre-commit suite and the packager.
for (os, os_versions) in MAINBOTS:
c['builders'].append({'name': '%s-builder' % os,
'factory': F_DRMPACK(os == "win")})
for version in os_versions:
c['builders'].append({'name': '%s-%s-drm' % (os, version),
'factory': F_DRM(os == 'win')})
# Miscellaneous stability bots that run periodically.
c['builders'].append({'name': 'win7-cr-builder',
'factory': f_win_cr_builder,
'auto_reboot': False,
})
c['builders'].append({'name': 'win7-cr',
'factory': F_WIN_CR('win7-cr-builder'),
'auto_reboot': True, # Kill stale processes
})
c['builders'].append({'name': 'win8-cr-builder',
'factory': f_win_cr_builder,
'auto_reboot': False,
})
c['builders'].append({'name': 'win8-cr',
'factory': F_WIN_CR('win8-cr-builder'),
'auto_reboot': True, # Kill stale processes
})
c['builders'].append({'name': 'linux-cr-builder',
'factory': f_linux_cr_builder,
'auto_reboot': False,
})
c['builders'].append({'name': 'linux-cr',
'factory': F_LINUX_CR(),
'auto_reboot': False,
})
####### BUILDSLAVES
for builder in c['builders']:
builder['slavenames'] = slaves.GetSlavesName(builder=builder['name'])
# The 'slaves' list defines the set of allowable buildslaves. List all the
# slaves registered to a builder. Remove dupes.
c['slaves'] = master_utils.AutoSetupSlaves(c['builders'],
config.Master.GetBotPassword())
# Make sure everything works together.
master_utils.VerifySetup(c, slaves)
####### STATUS TARGETS
# Adds common status and tools to this master.
master_utils.AutoSetupMaster(c, ActiveMaster)
c['status'].append(MailNotifier(fromaddr="[email protected]",
lookup="gmail.com", # add @gmail.com if "@" is not in the commiter's ID
extraRecipients=['[email protected]'],
builders=STABLE_BUILDERS,
mode='problem'))
# Keep last build logs, the default is too low.
c['buildHorizon'] = 1000
c['logHorizon'] = 500
# Must be at least 2x the number of slaves.
c['eventHorizon'] = 200
# Must be at least 1x the number of builds listed in console.
c['buildCacheSize'] = 60
####### DEBUGGING OPTIONS
# if you set 'debugPassword', then you can connect to the buildmaster with
# the diagnostic tool in contrib/debugclient.py . From this tool, you can
# manually force builds and inject changes, which may be useful for testing
# your buildmaster without actually commiting changes to your repository (or
# before you have a functioning 'sources' set up). The debug tool uses the
# same port number as the slaves do: 'slavePortnum'.
#c['debugPassword'] = 'debugpassword'
# if you set 'manhole', you can ssh into the buildmaster and get an
# interactive python shell, which may be useful for debugging buildbot
# internals. It is probably only useful for buildbot developers. You can also
# use an authorized_keys file, or plain telnet.
#from buildbot import manhole
#c['manhole'] = manhole.PasswordManhole('tcp:9999:interface=127.0.0.1',
# 'admin', 'password')
####### PROJECT IDENTITY
# the 'projectName' string will be used to describe the project that this
# buildbot is working on. For example, it is used as the title of the
# waterfall HTML page. The 'projectURL' string will be used to provide a link
# from buildbot HTML pages to your project's home page.
c['projectName'] = ActiveMaster.project_name
c['projectURL'] = config.Master.project_url
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.Waterfall page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = 'http://build.chromium.org/p/client.drmemory/'
| [
"[email protected]"
] | |
8d7c1c02839e814bee3d5d5ab60f6f5d06442c78 | 90174b56d08ca79d30d3b1bcace14fa72e228532 | /tests/experiments/test_prior.py | da07cd05bcdd928f94e39071db8bbe4863671d1b | [] | no_license | miclaraia/swap-tools | 559cebfb112649dcaa61f52c278fdfbf86ee5ece | fb3468a6fef254cf43e46373c940d0a867c4445d | refs/heads/master | 2021-01-15T20:08:46.928830 | 2017-10-03T19:53:54 | 2017-10-03T19:53:54 | 99,840,718 | 0 | 0 | null | 2017-10-03T19:53:55 | 2017-08-09T18:30:13 | Python | UTF-8 | Python | false | false | 2,115 | py |
from swaptools.experiments.prior import Prior
import swaptools.experiments.config as config
import swaptools.experiments.db.experiment as edb
import swaptools.experiments.db.trials as tdb
from swaptools.experiments.iterators import ValueIterator as VI
from unittest.mock import patch, MagicMock
import pytest
@pytest.fixture(scope='module')
def override():
config.experiments.name = 'testexperiments'
patch.object(tdb.Trials, 'next_id', 0)
patch.object(edb.Experiments, 'next_id', 0)
def generate():
prior = VI.range(.2, .8, .2)
golds = VI.single(100)
series = VI.range(1, 3, 1)
kwargs = {'name': None, 'description': None}
e = Prior.new(prior, golds, series, **kwargs)
gg = MagicMock()
gg.golds = {i: i for i in range(200)}
e.gg = gg
return e
# pylint: disable=W0613,W0621,R0201
class TestPrior:
def test_setup_first(self, override):
e = generate()
e._setup_next()
assert e.trial_info == {
'n': 0,
'golds': 100,
'prior': .2,
'series': 1
}
def test_setup_next(self, override):
e = generate()
e._setup_next()
e._setup_next()
assert e.trial_info == {
'n': 1,
'golds': 100,
'prior': .4,
'series': 1
}
def test_rollover(self, override):
e = generate()
e.n = 4
e.values['prior'].current = .8
e._setup_next()
assert e.trial_info == {
'n': 5,
'golds': 100,
'prior': .2,
'series': 2
}
def test_has_next_true(self, override):
e = generate()
e.n = 4
e.values['prior'].current = .8
e.values['series'].current = 2
assert e.has_next() is True
def test_has_next_false(self, override):
e = generate()
e.n = 4
e.values['prior'].current = .8
e.values['series'].current = 3
assert e.has_next() is False
def test_count(self, override):
e = generate()
assert e.count() == 12
| [
"[email protected]"
] | |
f2d0b8ecf990b3bd64d4ed8f7ac429a1f7618569 | e11dff811ca981f428644fd70d10a7369c671bcb | /src/tools/ecos/cvxpy/examples/advanced/circuits.py | f13a6e85c11de273d99651ed51274c17c59334ff | [
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] | permissive | riadnassiffe/Simulator | 3c4a036b5635534929fdb04b0e9c96d64c0da71f | 7d9ff09f26367d3714e3d10be3dd4a9817b8ed6b | refs/heads/master | 2021-06-20T09:31:36.033427 | 2021-04-17T00:03:17 | 2021-04-17T00:03:17 | 16,033,879 | 0 | 0 | MIT | 2021-03-22T23:20:34 | 2014-01-18T20:58:10 | Jupyter Notebook | UTF-8 | Python | false | false | 3,057 | py | # An object oriented model of a circuit.
from cvxpy import *
import abc
class Node(object):
""" A node connecting devices. """
def __init__(self):
self.voltage = Variable()
self.current_flows = []
# The current entering a node equals the current leaving the node.
def constraints(self):
return [sum(f for f in self.current_flows) == 0]
class Ground(Node):
""" A node at 0 volts. """
def constraints(self):
return [self.voltage == 0] + super(Ground, self).constraints()
class Device(object):
__metaclass__ = abc.ABCMeta
""" A device on a circuit. """
def __init__(self, pos_node, neg_node):
self.pos_node = pos_node
self.pos_node.current_flows.append(-self.current())
self.neg_node = neg_node
self.neg_node.current_flows.append(self.current())
# The voltage drop on the device.
@abc.abstractmethod
def voltage(self):
return NotImplemented
# The current through the device.
@abc.abstractmethod
def current(self):
return NotImplemented
# Every path between two nodes has the same voltage drop.
def constraints(self):
return [self.pos_node.voltage - self.voltage() == self.neg_node.voltage]
class Resistor(Device):
""" A resistor with V = R*I. """
def __init__(self, pos_node, neg_node, resistance):
self._current = Variable()
self.resistance = resistance
super(Resistor, self).__init__(pos_node, neg_node)
def voltage(self):
return -self.resistance*self.current()
def current(self):
return self._current
class VoltageSource(Device):
""" A constant source of voltage. """
def __init__(self, pos_node, neg_node, voltage):
self._current = Variable()
self._voltage = voltage
super(VoltageSource, self).__init__(pos_node, neg_node)
def voltage(self):
return self._voltage
def current(self):
return self._current
class CurrentSource(Device):
""" A constant source of current. """
def __init__(self, pos_node, neg_node, current):
self._current = current
self._voltage = Variable()
super(CurrentSource, self).__init__(pos_node, neg_node)
def voltage(self):
return self._voltage
def current(self):
return self._current
# # Create a simple circuit and find the current and voltage.
nodes = [Ground(),Node(),Node()]
# A 5 V battery
devices = [VoltageSource(nodes[0], nodes[2], 10)]
# A series of pairs of parallel resistors.
# 1/4 Ohm resistor and a 1 Ohm resistor in parallel.
devices.append( Resistor(nodes[0], nodes[1], 0.25) )
devices.append( Resistor(nodes[0], nodes[1], 1) )
# 4 Ohm resistor and a 1 Ohm resistor in parallel.
devices.append( Resistor(nodes[1], nodes[2], 4) )
devices.append( Resistor(nodes[1], nodes[2], 1) )
# Create the problem.
constraints = []
for obj in nodes + devices:
constraints += obj.constraints()
Problem(Minimize(0), constraints).solve()
for node in nodes:
print node.voltage.value | [
"[email protected]"
] | |
cc7f87e4bc994ca90cb52cbb925e54d73bd1bb7c | 6684f88abb4dde0e1295fd65e1d82b5d5a3a0414 | /mysite/models.py | 3483dc080b012728c9554e723193a1abec2fd9e4 | [] | no_license | mostafaitalian/mostafaprofile | f6242bcdb60af3c679530a9dc4f6dfb3aee6bfd9 | ac7fa2e2e73cc9dca08e3127dd2a1859e7bbdd28 | refs/heads/main | 2023-06-12T12:01:22.766499 | 2021-07-05T19:53:43 | 2021-07-05T19:53:43 | 382,725,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from django.db import models
from myprofile.models import Profile
# Create your models here.
class MySite(models.Model):
project_name = models.CharField(max_length=100)
link = models.URLField()
description = models.TextField()
images = models.ImageField(upload_to='image/')
myprofile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="sites")
def __str__(self):
return self.project_name | [
"[email protected]"
] | |
5d858f6dd2ad50fdc1f51166b1f4bc8ece460ce7 | 38a5a87d04e16cc7af2de659516f534853302ed2 | /scrapy/core/downloader/__init__.py | 62f48ec5a69a12cfe6d962000b07511880e9cc6e | [
"BSD-3-Clause"
] | permissive | zhangcheng/scrapy | d623232b946779c386eb7ca56bcfb6d5706a0ccb | 88e33ad0ad95d5f9049d8d8b1359819f4fbbf704 | refs/heads/master | 2021-01-18T12:07:01.174623 | 2011-06-09T03:15:53 | 2011-06-09T03:15:53 | 1,871,569 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,985 | py | """
Download web pages using asynchronous IO
"""
import random
from time import time
from collections import deque
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from scrapy.exceptions import IgnoreRequest
from scrapy.conf import settings
from scrapy.utils.python import setattr_default
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.signal import send_catch_log
from scrapy import signals
from scrapy import log
from .middleware import DownloaderMiddlewareManager
from .handlers import DownloadHandlers
class SpiderInfo(object):
"""Simple class to keep information and state for each open spider"""
def __init__(self, spider):
setattr_default(spider, 'download_delay', spider.settings.getfloat('DOWNLOAD_DELAY'))
setattr_default(spider, 'randomize_download_delay', spider.settings.getbool('RANDOMIZE_DOWNLOAD_DELAY'))
setattr_default(spider, 'max_concurrent_requests', spider.settings.getint('CONCURRENT_REQUESTS_PER_SPIDER'))
if spider.download_delay > 0 and spider.max_concurrent_requests > 1:
spider.max_concurrent_requests = 1
msg = "Setting max_concurrent_requests=1 because of download_delay=%s" % spider.download_delay
log.msg(msg, spider=spider)
self.spider = spider
self.active = set()
self.queue = deque()
self.transferring = set()
self.closing = False
self.lastseen = 0
self.next_request_calls = set()
def free_transfer_slots(self):
return self.spider.max_concurrent_requests - len(self.transferring)
def needs_backout(self):
# use self.active to include requests in the downloader middleware
return len(self.active) > 2 * self.spider.max_concurrent_requests
def download_delay(self):
delay = self.spider.download_delay
if self.spider.randomize_download_delay:
delay = random.uniform(0.5*delay, 1.5*delay)
return delay
def cancel_request_calls(self):
for call in self.next_request_calls:
call.cancel()
self.next_request_calls.clear()
class Downloader(object):
"""Mantain many concurrent downloads and provide an HTTP abstraction.
It supports a limited number of connections per spider and many spiders in
parallel.
"""
def __init__(self):
self.sites = {}
self.handlers = DownloadHandlers()
self.middleware = DownloaderMiddlewareManager.from_settings(settings)
self.concurrent_spiders = settings.getint('CONCURRENT_SPIDERS')
def fetch(self, request, spider):
"""Main method to use to request a download
This method includes middleware mangling. Middleware can returns a
Response object, then request never reach downloader queue, and it will
not be downloaded from site.
"""
site = self.sites[spider]
if site.closing:
raise IgnoreRequest('Cannot fetch on a closing spider')
site.active.add(request)
def _deactivate(response):
site.active.remove(request)
self._close_if_idle(spider)
return response
dfd = self.middleware.download(self.enqueue, request, spider)
return dfd.addBoth(_deactivate)
def enqueue(self, request, spider):
"""Enqueue a Request for a effective download from site"""
site = self.sites[spider]
if site.closing:
raise IgnoreRequest
def _downloaded(response):
send_catch_log(signal=signals.response_downloaded, \
response=response, request=request, spider=spider)
return response
deferred = defer.Deferred().addCallback(_downloaded)
site.queue.append((request, deferred))
self._process_queue(spider)
return deferred
def _process_queue(self, spider):
"""Effective download requests from site queue"""
site = self.sites.get(spider)
if not site:
return
# Delay queue processing if a download_delay is configured
now = time()
delay = site.download_delay()
if delay:
penalty = delay - now + site.lastseen
if penalty > 0 and site.free_transfer_slots():
d = defer.Deferred()
d.addCallback(self._process_queue)
call = reactor.callLater(penalty, d.callback, spider)
site.next_request_calls.add(call)
d.addBoth(lambda x: site.next_request_calls.remove(call))
return
site.lastseen = now
# Process enqueued requests if there are free slots to transfer for this site
while site.queue and site.free_transfer_slots() > 0:
request, deferred = site.queue.popleft()
if site.closing:
dfd = defer.fail(Failure(IgnoreRequest()))
else:
dfd = self._download(site, request, spider)
dfd.chainDeferred(deferred)
self._close_if_idle(spider)
def _close_if_idle(self, spider):
site = self.sites.get(spider)
if site and site.closing and not site.active:
del self.sites[spider]
site.closing.callback(None)
def _download(self, site, request, spider):
# The order is very important for the following deferreds. Do not change!
# 1. Create the download deferred
dfd = mustbe_deferred(self.handlers.download_request, request, spider)
# 2. After response arrives, remove the request from transferring
# state to free up the transferring slot so it can be used by the
# following requests (perhaps those which came from the downloader
# middleware itself)
site.transferring.add(request)
def finish_transferring(_):
site.transferring.remove(request)
self._process_queue(spider)
# avoid partially downloaded responses from propagating to the
# downloader middleware, to speed-up the closing process
if site.closing:
log.msg("Crawled while closing spider: %s" % request, \
level=log.DEBUG, spider=spider)
raise IgnoreRequest
return _
return dfd.addBoth(finish_transferring)
def open_spider(self, spider):
"""Allocate resources to begin processing a spider"""
assert spider not in self.sites, "Spider already opened: %s" % spider
self.sites[spider] = SpiderInfo(spider)
def close_spider(self, spider):
"""Free any resources associated with the given spider"""
assert spider in self.sites, "Spider not opened: %s" % spider
site = self.sites.get(spider)
site.closing = defer.Deferred()
site.cancel_request_calls()
self._process_queue(spider)
return site.closing
def is_idle(self):
return not self.sites
| [
"[email protected]"
] | |
7330e8ddf7bfa6a119363f5bf3b1a1260872366e | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc080/B/4879977.py | 22d2fbd77be16ddf55ae52e4f18a5d7aa1a0bb6c | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | N=input()
print('Yes' if int(N)%sum(int(i) for i in N)==0 else 'No') | [
"[email protected]"
] | |
2424d2a90d0aaef68de0d6f758b31fc067c00c5d | de64b143a346585f51590bd674e8d13bbc672386 | /algorithm/2022/1219_952_Largest_Component_Size_by_Common_Factor/Juwan.py | b646a287a0b1aa11aadaf34bd77f8cb879296a9b | [] | no_license | ai-kmu/etc | 304ec20f59e4026025abdcbcae21863c80630dcb | 9c29941e19b7dd2a2037b110dd6e16690e9a0cc2 | refs/heads/master | 2023-08-21T16:30:31.149956 | 2023-08-21T16:26:19 | 2023-08-21T16:26:19 | 199,843,899 | 3 | 24 | null | 2023-05-31T09:56:59 | 2019-07-31T11:36:16 | Jupyter Notebook | UTF-8 | Python | false | false | 881 | py | class Solution:
def largestComponentSize(self, nums: List[int]) -> int:
parent = [-1]*100001 # parent node ์ถ์ ์ ํ๊ธฐ ์ํ List
def find(x): # union find ๋ฐฉ์์ผ๋ก ํ์ด
if parent[x] == -1:
return x
parent[x] = find(parent[x])
return parent[x]
def union(a, b): # node๋ค์ parent node๋ค์ ์ฐพ๊ธฐ ์ํ ํจ์
a = find(a)
b = find(b)
if a != b:
parent[b] = a
for i in nums:
for j in range(2, int(sqrt(i)) + 1):
if i % j == 0:
union(j, i)
union(i, i//j)
cnt = 0
h_t = {}
for i in nums:
a = find(i)
cnt = max(cnt, 1 + h_t.get(a, 0))
h_t[a] = 1 + h_t.get(a, 0)
return cnt
| [
"[email protected]"
] | |
1a5a0fecba5719fbafefe8d2a0202fd233083119 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/rdbms/azure-mgmt-rdbms/generated_samples/postgresql/configuration_create_or_update.py | fd2ce019539a6fd4667037e606ec4d697827004b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,743 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python configuration_create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PostgreSQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="ffffffff-ffff-ffff-ffff-ffffffffffff",
)
response = client.configurations.begin_create_or_update(
resource_group_name="TestGroup",
server_name="testserver",
configuration_name="array_nulls",
parameters={"properties": {"source": "user-override", "value": "off"}},
).result()
print(response)
# x-ms-original-file: specification/postgresql/resource-manager/Microsoft.DBforPostgreSQL/stable/2017-12-01/examples/ConfigurationCreateOrUpdate.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b19da845110781324e7547090ada1cf6297e4fed | 6044266e775c87afed99397c8bb88366fbbca0e7 | /scrapy_file/csrf_token_extract_with_re.py | 054ec197c5e44ddfab392e7960aca50fad972aa6 | [] | no_license | ranafge/all-documnent-projects | e4434b821354076f486639419598fd54039fb5bd | c9d65ddea291c53b8e101357547ac63a36406ed9 | refs/heads/main | 2023-05-08T20:01:20.343856 | 2021-05-30T10:44:28 | 2021-05-30T10:44:28 | 372,186,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from bs4 import BeautifulSoup
import requests
import json
url= ["https://www.premierleague.com/stats/top/clubs/wins?se={}".format(x) for x in range(1,100)]
print(url)
for url in url:
data= requests.get(url).text
soup=BeautifulSoup(data,"html.parser")
PLtable = soup.find_all('table')[0]
data = []
for td in PLtable.find_all("td"):
data.append(td.text.replace('\n', ' ').strip())
print(data)
| [
"[email protected]"
] | |
2af1aad03697a4881cf62d2aba159672b8dd4e77 | 5ec7b086aed5341bdb6356e4f013d92f4eef961f | /app_local/client.py | 1138dc2454d825a3fbb8a9345ec65334357d47eb | [] | no_license | Mizterbox/MizterboxLogs | 4de843587ce38909de893eb98e50c2ccb2027654 | b1c7c2e7e5eacdf8528e6c13ec71564faa7ef82a | refs/heads/master | 2020-04-25T19:46:38.818903 | 2019-03-03T09:08:44 | 2019-03-03T09:08:44 | 173,032,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import requests, numpy as np, time
from time import gmtime, strftime, localtime
sprinklerid = np.random.randint(100,size=100)
address = np.random.randint(1000,size=100)
status = np.random.randint(1000,size=100)
maxcount = 5000000
counter = 0
actualsprinkids = [1,2,4,0]
status = ['Running Active', 'Restarting','Connecting to Wifi', 'Connecting to Internet']
while counter < maxcount:
res = requests.post('http://mizterboxlogs.herokuapp.com/sprinklerlogs/', json={
"id":int(np.random.choice(actualsprinkids,size=1)[0]),
"status":np.random.choice(status,size=1)[0],
})
if res.ok:
print (res.json())
time.sleep(2)
counter+=1
| [
"[email protected]"
] | |
9785e0140ac83818493992d4910ac3f403e90e9f | 42e5fd024ca7522c990d9627863302aa1f792804 | /DeepWNCS/Inverted_Pendulum_sihoon/Common/initialize.py | e5e97772145b48d36cec12f9d46d7bee7bd2f66e | [] | no_license | msh0576/RL_WCPS | 1f36de09ab6e4664b56ff929c69fab7de7314988 | 498a54f9777c5a849b0af491d9e76fcc470aa083 | refs/heads/master | 2023-06-08T19:13:33.676970 | 2020-11-30T05:30:02 | 2020-11-30T05:30:02 | 317,114,515 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 12:41:23 2020
@author: Sihoon
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
def linear_weights_init(m):
if isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.uniform_(-stdv, stdv)
def conv_weights_init(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
torch.nn.init.zeros_(m.bias) | [
"[email protected]"
] | |
5165947fc94888a1cc057b8dae59e599ae2c2e82 | 6a8eac5877ea4f782c094ad7b974d03e1dc86401 | /src/brouwers/albums/tests/factory_models.py | 85253b626987baa65d422813ba585327d3347edd | [] | permissive | modelbrouwers/modelbrouwers | cb2bbea34e70f4a1d9a7361dfe7131a20ea26b02 | 7713e78eeb31809e04b0b316ec8f8deed0808fc9 | refs/heads/main | 2023-08-06T10:49:33.804123 | 2023-07-30T20:28:34 | 2023-07-30T20:28:34 | 13,872,961 | 7 | 3 | MIT | 2023-05-29T15:33:06 | 2013-10-25T21:51:20 | Python | UTF-8 | Python | false | false | 179 | py | import warnings
from .factories import *
warnings.warn(
"Import from albums.tests.factories, the factory_models " "module will be removed",
PendingDeprecationWarning,
)
| [
"[email protected]"
] | |
6a3fea967bb843876033c7044091961fc7cfb259 | f77d97840915ff2318c8f3841096019337c58689 | /_admin/admin_service/digestmonkey/models.py | 1ae642e4624a3ed2f41e871fac4e851e8d1d1b6a | [] | no_license | rrader/events-service | f35d7e237e0ef5e3598b90878713539960153895 | 5933a6ba83aacb63832dd6efa806409bb37812aa | refs/heads/master | 2021-01-10T04:25:45.875103 | 2015-11-20T16:21:32 | 2015-11-20T16:21:32 | 44,528,882 | 4 | 1 | null | 2015-11-01T19:28:47 | 2015-10-19T11:02:48 | Python | UTF-8 | Python | false | false | 1,231 | py | from sqlalchemy.dialects.postgresql import ARRAY, JSON
from admin_service.extensions import db
from sqlalchemy.orm import backref, relationship
class DigestMonkeyConfig(db.Model):
__tablename__ = 'mailchimpkeys'
id = db.Column(db.Integer, primary_key=True)
mailchimp_key = db.Column(db.String(100))
templates_uri = db.Column(db.String(100))
github_key = db.Column(db.String(100))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
team = relationship("Team", backref=backref("digestmonkey_config", uselist=False))
class PublishedDigest(db.Model):
__tablename__ = 'digests'
id = db.Column(db.Integer, primary_key=True)
events_data = db.Column(JSON)
events_ids = db.Column(ARRAY(db.Integer))
template = db.Column(db.String(100))
preview = db.Column(db.Text)
s_list = db.Column(db.String(20))
s_list_name = db.Column(db.String(100))
from_name = db.Column(db.String(100))
from_email = db.Column(db.String(100))
subject = db.Column(db.String(200))
campaign_id = db.Column(db.String(20))
web_id = db.Column(db.String(20))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
team = relationship("Team", backref=backref("digests"))
| [
"[email protected]"
] | |
ca9938e289f72a91088f1d3ffb1dd9dbee75ce3b | d5cc71ec7bbf2d6be0916e2c0a019501692979e6 | /main.py | 8024cd3359a72a1524430f87022d70076c750f9c | [] | no_license | JellyWX/BattleTanks | 208216df0bc0dc15a553d1624938060307690408 | fc92ac40b126325b932e43721a803bef45f34a90 | refs/heads/master | 2020-12-30T23:36:59.030501 | 2018-02-19T13:20:09 | 2018-02-19T13:20:09 | 86,604,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | from gui import GUI
from tank import Tank
from bullet import Bullet
from tile import Tile, Flower, Crate, MiniCrate, WeaponCrate
from grid import Grid
from BaseClass import BaseClass
from imageLoader import imageLoader
from random import random
import math
import os
import pygame
import sys
for arg in sys.argv:
if arg == '-dH' or arg == '--debug-hitboxes':
GUI.debug = True
imageloader = imageLoader('assets/images/')
gui = GUI(400,400,'Battle Tanks')
grid = Grid(16,16)
BaseClass.grid = grid
BaseClass.gui = gui
BaseClass.images = imageloader
done = False
process_stage = 0
player = Tank(40,40)
player_sequence = [player]
render_sequence = [grid]
grid.Draw('park2')
grid.sortRenderingComponents()
def stage(n):
global gui
for e in gui.event():
if e.type == pygame.QUIT:
return -1
if e.type == pygame.VIDEORESIZE:
gui.resize(e.dict['size'][0],e.dict['size'][1])
if n == 0:
dx = pygame.mouse.get_pos()[0] - player.x
dy = pygame.mouse.get_pos()[1] - player.y
rad_angle_turret = math.atan2(dx,dy)
final_rotation_turret = rad_angle_turret*180/math.pi
if gui.mouseAction(0):
if not (-8 < dx < 8 and -8 < dy < 8):
rad_angle = math.atan2(dy,dx)
hyp_tank = math.sqrt(dx*dx + dy*dy)
hyp_dis_x = dx / hyp_tank
hyp_dis_y = dy / hyp_tank
final_vec = (hyp_dis_x,hyp_dis_y)
final_rotation = math.atan2(final_vec[0],final_vec[1])*180/math.pi
player.move_cursor(final_vec,final_rotation+180)
elif gui.keyAction(pygame.K_UP) or gui.keyAction(pygame.K_w):
player.move_keys(1)
elif gui.keyAction(pygame.K_DOWN) or gui.keyAction(pygame.K_s):
player.move_keys(0)
if gui.mouseAction(2):
hyp_bullet = math.sqrt(dx*dx + dy*dy)
hyp_dis_x_bullet = dx / hyp_bullet
hyp_dis_y_bullet = dy / hyp_bullet
bullet_vec = (hyp_dis_x_bullet,hyp_dis_y_bullet)
player.attack(bullet_vec)
## Rotate turret ##
player.rotate_turret(final_rotation_turret+180)
for p in player_sequence:
for b in p.bullets:
b.move()
return 0
elif n == 1:
return -1
while not done:
if process_stage == -1:
done = True
process_stage = stage(process_stage)
gui.page.fill((0,0,0))
for i in render_sequence:
if isinstance(i,Tank):
for b in i.bullets:
b.render()
i.render()
gui.flip(64)
| [
"[email protected]"
] | |
5ed4349c84e99c1bf997607eaf87034cc25e4bf5 | 44e7e02425b3ddd69f20d3545e3f32c3af55875d | /model/my_model/ccr.py | 71c8dfacf4c8db02b3dba04a61ef7d70ebeb9746 | [] | no_license | vigorwei/Segmentation | 6c7702d79b1b8141aeda8d8e282880d3b1402810 | 0a5ffc25aca11ec25c9f889d0fbe7e505e5e141e | refs/heads/master | 2023-02-01T14:39:58.174196 | 2020-12-17T03:05:22 | 2020-12-17T03:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,436 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.utils.data import WeightedRandomSampler
import numpy as np
from model.segbase import SegBaseModel
from model.model_utils import init_weights, _FCNHead
from model.my_model.blocks import *
from model.my_model.SPUnet import SPSP
from .CaCNet import CaC_Module
from .TANet import Tensor_Attention
from model.my_model.Class_GCN import class_gcn_2
def softmax_T(x, dim, T=1):
x = torch.exp(x)
sum = torch.sum(x, dim=dim, keepdim=True)
x = x/sum
return x
class ccr(nn.Module):
def __init__(self, c, k, n_class, stage_num=3):
super(ccr, self).__init__()
self.stage_num = stage_num
self.k = k
# self.conv_aux_pred = nn.Conv2d(c, n_class, kernel_size=1)
self.conv1 = nn.Conv2d(c, c, 1)
self.conv2 = nn.Sequential(
nn.Conv2d(c, c, 1, bias=False),
nn.BatchNorm2d(c))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
b, c, h, w = x.size()
idn = x
# The first 1x1 conv
x = self.conv1(x)
# ้ๆ ท
# ๆทฑ็็ฃ
# aux_pred = self.conv_aux_pred(idn)
# sample_weight = softmax_T(aux_pred, dim=1, T=20)
# sample_weight = 1 - torch.max(sample_weight, dim=1)[0] # ้ๆ ทๆ้๏ผ ็ฝฎไฟกๅบฆ่ถๅฐ๏ผๆ้่ถๅคง
sample_weight = torch.zeros((b, h, w)) # ๅๅ้ๆ ท
base = []
# base_label = []
# label = F.interpolate(label.unsqueeze(1), size=(h, w)).squeeze(1)
for batch in range(b):
samples = list(WeightedRandomSampler(list(sample_weight[batch, :, :].reshape(-1)), self.k, replacement=True))
base.append(idn.reshape(b, c, -1)[batch, :, samples].unsqueeze(0)) # (1, c, k)
# base_label.append(label.reshape(b, -1)[batch, samples].unsqueeze(0)) # (1, k)
base = torch.cat(base, dim=0) # (b, c, num_sample)
# base_label = torch.cat(base_label, dim=0) # (b, num_sample)
# The EM Attention
b, c, h, w = x.size()
x = x.view(b, c, h * w) # b * c * n
mu = base
for i in range(self.stage_num):
x_t = x.permute(0, 2, 1) # b * n * c
z = torch.bmm(x_t, mu) # b * n * k # ็นๅพๅพไธญ็nไธช็นไธkไธชๅบ็็ธไผผๆง
z = F.softmax(z, dim=2) # b * n * k # ๆฏไธช็นๅฑไบๆไธไธชๅบ็ๆฆ็
z_ = z / (1e-6 + z.sum(dim=1, keepdim=True)) # ่ฎก็ฎๆฏไธช็นๅฏนๅบ็ๅฝไธๅ็ๆ้๏ผ
mu = torch.bmm(x, z_) # b * c * k # ็จๆฏไธช็นๅปๅ ๆ็ปๅ๏ผๅพๅฐๅบ
mu = self._l2norm(mu, dim=1)
z_t = z.permute(0, 2, 1) # b * k * n
x = mu.matmul(z_t) # b * c * n # ็จๅบ้ๅปบ็นๅพๅพ
x = x.view(b, c, h, w) # b * c * h * w
x = F.relu(x, inplace=True)
# The second 1x1 conv
x = self.conv2(x)
x = x + idn
x = F.relu(x, inplace=True)
return x
def _l2norm(self, inp, dim):
'''Normlize the inp tensor with l2-norm.
Returns a tensor where each sub-tensor of input along the given dim is
normalized such that the 2-norm of the sub-tensor is equal to 1.
Arguments:
inp (tensor): The input tensor.
dim (int): The dimension to slice over to get the ssub-tensors.
Returns:
(tensor) The normalized tensor.
'''
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
class EMA_UP_docoder(nn.Module):
def __init__(self, channel_h, channel_l, k=32):
super().__init__()
self.channel = channel_h + channel_l
self.conv_in_h = nn.Conv2d(channel_h, channel_h, kernel_size=1)
self.conv_in_l = nn.Conv2d(channel_l, channel_l, kernel_size=1)
self.em = EM(self.channel, k=k)
self.conv_trans_low = nn.Conv2d(self.channel, self.channel, kernel_size=1)
self.conv_gobal = nn.Conv2d(self.channel, self.channel, kernel_size=1)
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv_trans_attention = nn.Conv2d(self.channel, self.channel, kernel_size=1)
self.rebuild_conv = nn.Sequential(
nn.Conv2d(self.channel, self.channel, 1, bias=False),
nn.BatchNorm2d(self.channel),
nn.ReLU())
self.conv_out = nn.Sequential(
nn.Conv2d(self.channel*2, channel_l, kernel_size=1, bias=False),
nn.BatchNorm2d(channel_l),
nn.ReLU())
def forward(self, x_h, x_l):
# Multi-scale features fusion.
x_h = self.conv_in_h(x_h)
x_l = self.conv_in_l(x_l)
x_h_up = F.interpolate(x_h, size=x_l.size()[-2:], mode="bilinear", align_corners=True)
x_l_down = F.interpolate(x_l, size=x_h.size()[-2:], mode="bilinear", align_corners=True)
m_deep = torch.cat((x_l_down, x_h), dim=1)
m_low = torch.cat((x_l, x_h_up), dim=1)
# Holistic codebook generation.
em_out = self.em(m_deep)
base = em_out["mu"]
# Codeword assembly for high-resolution feature upsampling.
m_low = self.conv_trans_low(m_low) # (b, 1024, h/8, w/8)
W = self.conv_trans_attention(m_low + self.pool(self.conv_gobal(m_deep))) # (b, 1024, h/8, w/8)
b, c, h, w = W.size()
W = W.view(b, c, -1).permute(0, 2, 1) # (b, h/8*w/8, 1024)
similarity = F.softmax(torch.bmm(W, base).permute(0, 2, 1), dim=1) # (b, k, hw)
m_up = torch.bmm(base, similarity).view(b, c, h, w) #(b, c, hw)
m_up = self.rebuild_conv(m_up)
f = torch.cat((m_up, m_low), dim=1)
out = self.conv_out(f)
return {"out": out,
"base": base,
"A": similarity.view(b, -1, h, w)}
# class EMA_UP_docoder(nn.Module):
# def __init__(self, channel_h, channel_l, k=64):
# super().__init__()
# self.channel = channel_h + channel_l
#
# self.conv_in_h = conv_bn_relu(channel_h, channel_h, kernel_size=1, padding=0)
# self.conv_in_l = conv_bn_relu(channel_l, channel_l, kernel_size=1, padding=0)
#
# self.em = EM(self.channel, k=k)
#
# self.conv_trans_low = nn.Conv2d(self.channel, self.channel, kernel_size=1, padding=0)
#
# self.pool = nn.AdaptiveAvgPool2d(1)
# self.conv_trans_attention = nn.Conv2d(self.channel, self.channel, kernel_size=1)
#
# self.rebuild_conv = nn.Sequential(
# nn.Conv2d(self.channel, self.channel, 1, bias=False),
# nn.BatchNorm2d(self.channel),
# nn.ReLU())
#
# self.conv_out = nn.Sequential(
# nn.Conv2d(self.channel*2, channel_l, kernel_size=1, bias=False),
# nn.BatchNorm2d(channel_l),
# nn.ReLU())
#
# def forward(self, x_h, x_l):
# # Multi-scale features fusion.
# x_h = self.conv_in_h(x_h)
# x_l = self.conv_in_l(x_l)
# x_h_up = F.interpolate(x_h, size=x_l.size()[-2:], mode="bilinear", align_corners=True)
# x_l_down = F.interpolate(x_l, size=x_h.size()[-2:], mode="bilinear", align_corners=True)
# m_deep = torch.cat((x_l_down, x_h), dim=1)
# m_low = torch.cat((x_l, x_h_up), dim=1)
#
# # Holistic codebook generation.
# em_out = self.em(m_deep)
# base = em_out["mu"]
# x = em_out["x_trans"]
#
# # Codeword assembly for high-resolution feature upsampling.
# m_low = self.conv_trans_low(m_low) # (b, 1024, h/8, w/8)
# W = self.conv_trans_attention(m_low + self.pool(x)) # (b, 1024, h/8, w/8)
# b, c, h, w = W.size()
# W = W.view(b, c, -1).permute(0, 2, 1) # (b, h/8*w/8, 1024)
# similarity = F.softmax(torch.bmm(W, base).permute(0, 2, 1), dim=1) # (b, k, hw)
# m_up = torch.bmm(base, similarity).view(b, c, h, w) #(b, c, hw)
# m_up = self.rebuild_conv(m_up)
#
# f = torch.cat((m_up, m_low), dim=1)
# out = self.conv_out(f)
#
# return {"out": out,
# "base": base,
# "A": similarity.view(b, -1, h, w)}
#
# # ๆฎๅทฎ่ฟๆฅ
# class EMA_UP_docoder(nn.Module):
# def __init__(self, channel_h, channel_l, k=64):
# super().__init__()
# self.channel = channel_h + channel_l
#
# self.conv_in_h = conv_bn_relu(channel_h, channel_h, kernel_size=1, padding=0)
# self.conv_in_l = conv_bn_relu(channel_l, channel_l, kernel_size=1, padding=0)
#
# self.em = EM(self.channel, k=k)
#
# self.conv_trans_low = nn.Conv2d(self.channel, self.channel, kernel_size=1, padding=0)
#
# self.pool = nn.AdaptiveAvgPool2d(1)
# self.conv_trans_attention = nn.Conv2d(self.channel, self.channel, kernel_size=1)
#
# self.rebuild_conv = nn.Sequential(
# nn.Conv2d(self.channel, channel_l, 1, bias=False),
# nn.BatchNorm2d(channel_l))
# self.conv_out = nn.Sequential(
# nn.Conv2d(channel_l, channel_l, 1, bias=False),
# nn.BatchNorm2d(channel_l),
# nn.ReLU())
#
# def forward(self, x_h, x_l):
# idn = x_l
#
# # Multi-scale features fusion.
# x_h = self.conv_in_h(x_h)
# x_l = self.conv_in_l(x_l)
# x_h_up = F.interpolate(x_h, size=x_l.size()[-2:], mode="bilinear", align_corners=True)
# x_l_down = F.interpolate(x_l, size=x_h.size()[-2:], mode="bilinear", align_corners=True)
# m_deep = torch.cat((x_l_down, x_h), dim=1)
# m_low = torch.cat((x_l, x_h_up), dim=1)
#
# # Holistic codebook generation.
# em_out = self.em(m_deep)
# base = em_out["mu"]
# x = em_out["x_trans"]
#
# # Codeword assembly for high-resolution feature upsampling.
# m_low = self.conv_trans_low(m_low) # (b, 1024, h/8, w/8)
# W = self.conv_trans_attention(m_low + self.pool(x)) # (b, 1024, h/8, w/8)
# b, c, h, w = W.size()
# W = W.view(b, c, -1).permute(0, 2, 1) # (b, h/8*w/8, 1024)
# similarity = F.softmax(torch.bmm(W, base).permute(0, 2, 1), dim=1) # (b, k, hw)
# m_up = torch.bmm(base, similarity).view(b, c, h, w) #(b, c, hw)
# m_up = self.rebuild_conv(m_up)
#
# # fusion
# out = self.conv_out(m_up + idn)
#
# return {"out": out,
# "base": base,
# "A": similarity.view(b, -1, h, w)}
class EM(nn.Module):
'''
c (int): The input and output channel number.
k (int): The number of the bases.
stage_num (int): The iteration number for EM.
'''
def __init__(self, c, k, stage_num=3, inter_channel=None):
super(EM, self).__init__()
self.stage_num = stage_num
if inter_channel == None:
inter_channel = c
# ๅๅงๅๅบ
mu = torch.Tensor(1, inter_channel, k) # kไธชๆ่ฟฐๅญ
mu.normal_(0, math.sqrt(2. / k)) # Init with Kaiming Norm.
mu = self._l2norm(mu, dim=1) # ๅฝไธๅ
self.register_buffer('mu', mu)
self.conv1 = nn.Conv2d(c, inter_channel, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
# The first 1x1 conv
x = self.conv1(x)
x_trans = x
# The EM Attention
b, c, h, w = x.size()
x = x.view(b, c, h * w) # b * c * n
# ๆนไธญ็ๆฏไธชๅพ็้ฝๅคๅถไธไธชๅบ
mu = self.mu.repeat(b, 1, 1) # b * c * k
with torch.no_grad():
for i in range(self.stage_num):
x_t = x.permute(0, 2, 1) # b * n * c
z = torch.bmm(x_t, mu) # b * n * k # ็นๅพๅพไธญ็nไธช็นไธkไธชๅบ็็ธไผผๆง
z = F.softmax(z, dim=2) # b * n * k # ๆฏไธช็นๅฑไบๆไธไธชๅบ็ๆฆ็
z_ = z / (1e-6 + z.sum(dim=1, keepdim=True)) # ่ฎก็ฎๆฏไธช็นๅฏนๅบ็ๅฝไธๅ็ๆ้๏ผ
mu = torch.bmm(x, z_) # b * c * k # ็จๆฏไธช็นๅปๅ ๆ็ปๅ๏ผๅพๅฐๅบ
mu = self._l2norm(mu, dim=1)
# !!! The moving averaging operation is writtern in train.py, which is significant.
return {"mu": mu,
"x_trans": x_trans}
def _l2norm(self, inp, dim):
'''Normlize the inp tensor with l2-norm.
Returns a tensor where each sub-tensor of input along the given dim is
normalized such that the 2-norm of the sub-tensor is equal to 1.
Arguments:
inp (tensor): The input tensor.
dim (int): The dimension to slice over to get the ssub-tensors.
Returns:
(tensor) The normalized tensor.
'''
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
class Attention_UP_decoder(nn.Module):
def __init__(self, channel_h, channel_l, n_codeword=512):
super().__init__()
self.n_codeword = n_codeword
self.channel = channel_h + channel_l
self.conv_in_h = conv_bn_relu(channel_h, channel_h, kernel_size=1, padding=0)
self.conv_in_l = conv_bn_relu(channel_l, channel_l, kernel_size=1, padding=0)
self.conv_B = nn.Conv2d(self.channel, self.channel, kernel_size=1)
self.conv_A = nn.Conv2d(self.channel, n_codeword, kernel_size=1)
self.conv_G = nn.Conv2d(self.channel, self.channel, kernel_size=1)
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv_W = nn.Conv2d(self.channel, n_codeword, kernel_size=1)
self.conv_out = nn.Conv2d(self.channel*2, channel_l, kernel_size=1)
def forward(self, x_h, x_l):
# Multi-scale features fusion.
x_h = self.conv_in_h(x_h)
x_l = self.conv_in_l(x_l)
x_h_up = F.interpolate(x_h, size=x_l.size()[-2:], mode="bilinear", align_corners=True)
x_l_down = F.interpolate(x_l, size=x_h.size()[-2:], mode="bilinear", align_corners=True)
m_deep = torch.cat((x_l_down, x_h), dim=1)
m_low = torch.cat((x_l, x_h_up), dim=1)
# Holistic codebook generation.
b, c, h, w = m_deep.size()
A = F.softmax(self.conv_A(m_deep).reshape(b, -1, h*w), dim=-1).permute(0, 2, 1) #weight (b, h*w, n)
B = self.conv_B(m_deep) # base code word (b, 1024, h, w)
B_ = B.reshape(b, -1, h*w) # (b,1024, h*w)
code_word = torch.bmm(B_, A) # (b, 1024, n)
# Codeword assembly for high-resolution feature upsampling.
G = self.conv_G(m_low) # (b, 1024, h/8, w/8)
W = self.conv_W(G + self.pool(B)) # (b, n, h/8, w/8)
b, c, h, w = W.size()
W = W.view(b, c, -1) # (b, n, h/8*w/8)
f = torch.bmm(code_word, W).view(b, -1, h, w)
f = torch.cat((f, G), dim=1)
out = self.conv_out(f)
return out
class out_conv(nn.Module):
def __init__(self, in_channel, n_class):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(in_channel),
nn.ReLU(),
nn.Conv2d(in_channel, n_class, kernel_size=1),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm2d):
init_weights(m, init_type='kaiming')
def forward(self, x):
return self.conv(x)
class CMSI(nn.Module):
def __init__(self, in_channel, scales):
super().__init__()
assert in_channel % 4 == 0
out_channel = int(in_channel / 4)
self.conv1 = conv_bn_relu(in_channel, out_channel)
self.conv2 = conv_bn_relu(out_channel, out_channel)
self.conv3 = conv_bn_relu(out_channel, out_channel)
self.conv4 = conv_bn_relu(out_channel, out_channel, kernel_size=1, padding=0)
self.conv2_1 = conv_bn_relu(in_channel+out_channel, out_channel)
self.conv2_2 = conv_bn_relu(out_channel*2, out_channel)
self.conv2_3 = conv_bn_relu(out_channel*2, out_channel, kernel_size=1, padding=0)
self.conv3_1 = conv_bn_relu(in_channel+out_channel, out_channel)
self.conv3_2 = conv_bn_relu(out_channel * 2, out_channel, kernel_size=1, padding=0)
self.conv4_1 = conv_bn_relu(in_channel + out_channel, out_channel, kernel_size=1, padding=0)
self.pool1 = nn.AdaptiveAvgPool2d((scales[0], scales[0]))
self.pool2 = nn.AdaptiveAvgPool2d((scales[1], scales[1]))
self.pool3 = nn.AdaptiveAvgPool2d((scales[2], scales[2]))
self.pool4 = nn.AdaptiveAvgPool2d((scales[3], scales[3]))
self.sconv1 = nn.Sequential(nn.Conv2d(in_channel, out_channel, (1, 3), 1, (0, 1), bias=False),
nn.BatchNorm2d(out_channel))
self.sconv2 = nn.Sequential(nn.Conv2d(in_channel, out_channel, (3, 1), 1, (1, 0), bias=False),
nn.BatchNorm2d(out_channel))
self.spool1 = nn.AdaptiveAvgPool2d((1, None))
self.spool2 = nn.AdaptiveAvgPool2d((None, 1))
self.conv_out = conv_bn_relu(in_channel*2+out_channel, in_channel)
def forward(self, x):
b, c, h, w = x.size()
x1 = self.conv1(self.pool1(x))
x2 = self.conv2(self.pool2(x1))
x3 = self.conv3(self.pool3(x2))
x4 = self.conv4(self.pool4(x3))
x2_1 = self.conv2_1(torch.cat((self.pool2(x), x2), dim=1))
x2_2 = self.conv2_2(torch.cat((self.pool3(x2_1), x3), dim=1))
x2_3 = self.conv2_3(torch.cat((self.pool4(x2_2), x4), dim=1))
x3_1 = self.conv3_1(torch.cat((self.pool3(x), x2_2), dim=1))
x3_2 = self.conv3_2(torch.cat((self.pool4(x3_1), x2_3), dim=1))
x4_1 = self.conv4_1(torch.cat((self.pool4(x), x3_2), dim=1))
# ไธ้ๆ ท
y1 = F.interpolate(x1, size=(h, w), mode="bilinear", align_corners=True)
y2 = F.interpolate(x2_1, size=(h, w), mode="bilinear", align_corners=True)
y3 = F.interpolate(x3_1, size=(h, w), mode="bilinear", align_corners=True)
y4 = F.interpolate(x4_1, size=(h, w), mode="bilinear", align_corners=True)
# ๆกๅฝขๆฑ ๅ
x5 = F.interpolate(self.sconv1(self.spool1(x)), size=(h, w), mode="bilinear", align_corners=True)
x6 = F.interpolate(self.sconv2(self.spool2(x)), size=(h, w), mode="bilinear", align_corners=True)
y5 = F.relu(x5 + x6)
# concat
out = torch.cat((x, y1, y2, y3, y4, y5), dim=1)
out = self.conv_out(out)
return out
class EMUPNet(SegBaseModel):
def __init__(self, n_class, image_size=None, backbone='resnet34', pretrained_base=False, deep_stem=False, **kwargs):
super(EMUPNet, self).__init__(backbone, pretrained_base=pretrained_base, deep_stem=deep_stem, **kwargs)
channels = self.base_channel # [256, 512, 1024, 2048]
if deep_stem or backbone == 'resnest101':
conv1_channel = 128
else:
conv1_channel = 64
# self.class_gcn = class_gcn_2(channels[3], n_class)
self.donv_up1 = EMA_UP_docoder(channels[3], channels[2], k=64)
self.donv_up2 = EMA_UP_docoder(channels[2], channels[1], k=64)
self.donv_up3 = EMA_UP_docoder(channels[1], channels[0], k=64)
self.donv_up4 = EMA_UP_docoder(channels[0], conv1_channel, k=64)
self.out_conv = out_conv(conv1_channel, n_class)
def forward(self, x):
outputs = dict()
size = x.size()[2:]
c1, c2, c3, c4, c5 = self.backbone.extract_features(x)
# aux_out, c5 = self.class_gcn(c5)
# aux_out = F.interpolate(aux_out, size, mode='bilinear', align_corners=True)
# outputs.update({"aux_out": [aux_out]})
x1 = self.donv_up1(c5, c4)
x2 = self.donv_up2(x1["out"], c3)
x3 = self.donv_up3(x2["out"], c2)
x4 = self.donv_up4(x3["out"], c1)
x = self.out_conv(x4["out"])
x = F.interpolate(x, size, mode='bilinear', align_corners=True) # ๆๅไธ้ๆ ท
outputs.update({"main_out": x})
outputs.update({"mu1": x1["base"],
"mu2": x2["base"],
"mu3": x3["base"],
"mu4": x4["base"]})
outputs.update({"A1": x1["A"],
"A2": x2["A"],
"A3": x3["A"],
"A4": x4["A"]})
return outputs
#
# class EMUPNet(SegBaseModel):
#
# def __init__(self, n_class, image_size=None, backbone='resnet34', pretrained_base=False, deep_stem=False, **kwargs):
# super(EMUPNet, self).__init__(backbone, pretrained_base=pretrained_base, deep_stem=deep_stem, **kwargs)
# channels = self.base_channel # [256, 512, 1024, 2048]
# if deep_stem or backbone == 'resnest101':
# conv1_channel = 128
# else:
# conv1_channel = 64
#
# self.donv_up1 = Attention_UP_decoder(channels[3], channels[2])
# self.donv_up2 = Attention_UP_decoder(channels[2], channels[1])
# self.donv_up3 = Attention_UP_decoder(channels[1], channels[0])
# self.donv_up4 = Attention_UP_decoder(channels[0], conv1_channel)
#
# self.out_conv = out_conv(conv1_channel, n_class)
#
#
# def forward(self, x):
# outputs = dict()
# size = x.size()[2:]
#
# c1, c2, c3, c4, c5 = self.backbone.extract_features(x)
#
# x = self.donv_up1(c5, c4)
# x = self.donv_up2(x, c3)
# x = self.donv_up3(x, c2)
# x = self.donv_up4(x, c1)
#
# x = self.out_conv(x)
# x = F.interpolate(x, size, mode='bilinear', align_corners=True) # ๆๅไธ้ๆ ท
#
# outputs.update({"main_out": x})
#
# return outputs
| [
"[email protected]"
] | |
eeff373e45e52f34ff7290461c61af68eb909dfb | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/databoxedge/v20190801/get_device_extended_information.py | 820de0f3738b39d62317888cba5931c33eb4e51a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,774 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDeviceExtendedInformationResult',
'AwaitableGetDeviceExtendedInformationResult',
'get_device_extended_information',
]
@pulumi.output_type
class GetDeviceExtendedInformationResult:
"""
The extended Info of the Data Box Edge/Gateway device.
"""
def __init__(__self__, encryption_key=None, encryption_key_thumbprint=None, id=None, name=None, resource_key=None, type=None):
if encryption_key and not isinstance(encryption_key, str):
raise TypeError("Expected argument 'encryption_key' to be a str")
pulumi.set(__self__, "encryption_key", encryption_key)
if encryption_key_thumbprint and not isinstance(encryption_key_thumbprint, str):
raise TypeError("Expected argument 'encryption_key_thumbprint' to be a str")
pulumi.set(__self__, "encryption_key_thumbprint", encryption_key_thumbprint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_key and not isinstance(resource_key, str):
raise TypeError("Expected argument 'resource_key' to be a str")
pulumi.set(__self__, "resource_key", resource_key)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[str]:
"""
The public part of the encryption certificate. Client uses this to encrypt any secret.
"""
return pulumi.get(self, "encryption_key")
@property
@pulumi.getter(name="encryptionKeyThumbprint")
def encryption_key_thumbprint(self) -> Optional[str]:
"""
The digital signature of encrypted certificate.
"""
return pulumi.get(self, "encryption_key_thumbprint")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceKey")
def resource_key(self) -> str:
"""
The Resource ID of the Resource.
"""
return pulumi.get(self, "resource_key")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetDeviceExtendedInformationResult(GetDeviceExtendedInformationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeviceExtendedInformationResult(
encryption_key=self.encryption_key,
encryption_key_thumbprint=self.encryption_key_thumbprint,
id=self.id,
name=self.name,
resource_key=self.resource_key,
type=self.type)
def get_device_extended_information(device_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeviceExtendedInformationResult:
"""
The extended Info of the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20190801:getDeviceExtendedInformation', __args__, opts=opts, typ=GetDeviceExtendedInformationResult).value
return AwaitableGetDeviceExtendedInformationResult(
encryption_key=__ret__.encryption_key,
encryption_key_thumbprint=__ret__.encryption_key_thumbprint,
id=__ret__.id,
name=__ret__.name,
resource_key=__ret__.resource_key,
type=__ret__.type)
| [
"[email protected]"
] | |
2c1371f7dcb0284f2757359fe2e367bc9542b2f5 | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /python/basic/arg_expr.py | e2978cda02c519982301970fb696eff2cc1c5580 | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!/usr/bin/python
#\file arg_expr.py
#\brief certain python script
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
#\date Mar.14, 2016
import sys, random, math
if __name__=='__main__':
s_expr= sys.argv[1]
print 'arg[1]=',s_expr
expr= eval('lambda x:'+s_expr)
print 'expr=',expr
for i in range(10):
x= random.random()
print 'expr({x})= {value}'.format(x=x, value=expr(x))
| [
"[email protected]"
] | |
7622573ee5a322ca35255c62269e353ba2ad8f81 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02418/s050810012.py | 813576c56b0f65866e959ed464e23f400f903cdc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | def check(s, p):
for i in range(len(s)):
count = 0
for j in range(len(p)):
if s[(i+j) % len(s)] != p[j]:
break
count += 1
if count == len(p):
return True
return False
s = raw_input()
p = raw_input()
flag = check(s, p)
if flag:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
82d4f8a638d84f91ab3f4cf61ad517ef8eeec04a | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/agc011/A/4811622.py | 80e3e9613e7fb93e920fc7e005481f593391fd61 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | n, c, k = map(int, input().split())
t = [int(input()) for i in range(n)]
t.sort()
result = 1
count = 0
f = 0
for i in range(n):
if (t[i] - t[f]) > k or c == count:
result += 1
count = 1
f = i
else:
count += 1
print(result) | [
"[email protected]"
] | |
86fcf585e7121caa6968a3c5a0bfd281544770c3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02410/s418924252.py | 63557771bea67c49bf2723c66e25a07d7c4cec3a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | n,m=list(map(int,input().split()))
mA=[list(map(int,input().split())) for i in range(n)]
mB=[int(input()) for j in range(m)]
for ma in mA:
print(sum([a*b for a, b in zip(ma,mB)])) | [
"[email protected]"
] | |
3a2ed58e973763684ba6813e31a0326f3c22804c | 6e060e9730b58e4d7819335438f915179504e72c | /bit_account/wizard/statement_account_supplier.py | e1396166ffeacd8fd83e3663b196b96e9860f338 | [] | no_license | missionpetroleumgit/addons_missiongit | 4dcdf1d0e79da982670c573d59574a939d1636c0 | 714514719d5d4d96f371dd529a70ac282070c43b | refs/heads/master | 2023-03-10T20:34:10.154050 | 2021-02-24T23:50:20 | 2021-02-24T23:50:20 | 342,027,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,457 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution - Ecuador
# Copyright (C) 2014 BitConsultores (<http://http://bitconsultores-ec.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, fields, models
from openerp.tools.translate import _
from openerp.exceptions import except_orm, Warning, RedirectWarning
import base64
import StringIO
from string import upper
from time import mktime
from datetime import datetime
from dateutil.relativedelta import relativedelta
import unicodedata
from openerp.tools import float_compare
import xlwt as pycel #Libreria que Exporta a Excel
import cStringIO
import logging
_logger = logging.getLogger(__name__)
class statement_account_supplier(models.TransientModel):
_name = "statement.account.supplier"
company_id = fields.Many2one('res.company', 'Compania', required=True)
datefrom = fields.Date('Fecha desde', required=True)
dateto = fields.Date('Fecha hasta', required=True)
summary = fields.Boolean(string='Resumen')
partner_ids = fields.Many2many('res.partner', string='Proveedor')
account_id = fields.Many2one('account.account', 'Cuenta')
@api.model
def default_get(self, fields_list):
res = super(statement_account_supplier, self).default_get(fields_list)
user = self.env['res.users'].browse(self._uid)
res['company_id'] = user.company_id.id
return res
@api.multi
def generar(self):
wb = pycel.Workbook(encoding='utf-8')
style_cabecera = pycel.easyxf('font: colour black, bold True;'
'align: vertical center, horizontal center;'
)
style_partner = pycel.easyxf('font: colour black;'
'align: vertical center, horizontal left;'
)
style_header = pycel.easyxf('font: bold True;'
'align: vertical center, horizontal center, wrap on;'
'borders: left 1, right 1, top 1, bottom 1;')
linea = pycel.easyxf('borders:bottom 1;')
linea_izq = pycel.easyxf('font: colour black, height 150;'
'align: vertical center, horizontal left, wrap on;'
)
linea_izq_red = pycel.easyxf('font: bold True, colour blue, height 150;'
'align: vertical center, horizontal left, wrap on;'
)
linea_der = pycel.easyxf('font: colour black, height 150;'
'align: vertical center, horizontal right;'
)
linea_der_blue = pycel.easyxf('font: colour blue, height 150, bold True;'
'align: vertical center, horizontal right;'
)
company = self.company_id
ws = wb.add_sheet('Facturas Proveedores')
ws.show_grid = False
ws.header_str = u"&LFecha de Impresion: &D Hora: &T&RPagina &P de &N"
ws.footer_str = u""
ws.write_merge(1, 1, 1, 7, company.name, style_cabecera)
ws.write_merge(2, 2, 1, 7, 'Direccion: ' + company.partner_id.street, style_cabecera)
ws.write_merge(3, 3, 1, 7, 'Ruc: ' + company.partner_id.part_number, style_cabecera)
ws.write_merge(5, 5, 1, 7, 'Historial estado de cta clientes '+ self.datefrom + ' AL ' + self.dateto, style_cabecera)
ws.write_merge(6, 6, 1, 7,'Fecha Impresion: '+ datetime.today().strftime('%Y-%m-%d'), style_cabecera)
xi = 8 # Cabecera de Cliente
ws.write(xi, 1, 'No. Factura', style_header)
ws.write(xi, 2, 'Tipo Documento', style_header)
ws.write(xi, 3, 'No. Transaccion', style_header)
ws.write(xi, 4, 'Fecha factura', style_header)
ws.write(xi, 5, 'Fecha vencimiento', style_header)
ws.write(xi, 6, 'No. Asiento', style_header)
ws.write(xi, 7, 'Detalle', style_header)
ws.write(xi, 8, 'Debito', style_header)
ws.write(xi, 9, 'Credito', style_header)
ws.write(xi, 10, 'Saldo', style_header)
ws.row(xi).height = 500
xi += 1
supplier = self.get_supplier(self.dateto)[0]
sumdebitall = 0
sumceditall = 0
sumsaldoall = 0
for supp in supplier:
invoice_line = self.get_lines(supp.get('id'),self.datefrom, self.dateto,supp.get('account'))[0][0]
if invoice_line:
ws.write_merge(xi, xi, 1, 7, supp.get('ruc') + ' - ' + supp.get('name'), style_partner)
if self.summary != True:
xi += 1
sumdebitgroup = 0
sumceditgroup = 0
sumsaldogroup = 0
for line in invoice_line:
if self.summary != True:
ws.write(xi, 1, line.get('invoicenumber'), linea_izq)
ws.write(xi, 2, line.get('documenttype'), linea_izq)
ws.write(xi, 3, line.get('transacctionnumber'), linea_izq)
ws.write(xi, 4, line.get('date'), linea_izq)
ws.write(xi, 5, line.get('duedate'), linea_izq)
ws.write(xi, 6, line.get('journal'), linea_izq)
ws.write(xi, 7, line.get('description'), linea_izq)
ws.write(xi, 8, line.get('debit'), linea_der)
ws.write(xi, 9, line.get('credit'), linea_der)
ws.write(xi, 10, line.get('saldo'), linea_der)
xi += 1
sumdebitgroup += line.get('debit')
sumceditgroup += line.get('credit')
sumsaldogroup += round(sumdebitgroup,2) - round(sumceditgroup,2)
ws.write_merge(xi, xi, 8, 8, sumdebitgroup, linea_der_blue)
ws.write_merge(xi, xi, 9, 9, sumceditgroup, linea_der_blue)
ws.write_merge(xi, xi, 10, 10, round(sumdebitgroup,2) - round(sumceditgroup,2), linea_der_blue)
ws.row(xi).height = 500
xi += 1
sumdebitall += sumdebitgroup
sumceditall += sumceditgroup
sumsaldoall += sumdebitall - sumceditall
ws.write(xi, 8, sumdebitall, linea_der_blue)
ws.write(xi, 9, sumceditall, linea_der_blue)
ws.write(xi, 10, round(sumdebitall,2) - round(sumceditall,2), linea_der_blue)
ws.row(xi).height = 500
ws.col(0).width = 2000
ws.col(1).width = 5000
ws.col(2).width = 7000
ws.col(3).width = 5000
ws.col(4).width = 3000
ws.col(5).width = 3000
ws.col(6).width = 5000
ws.col(7).width = 9000
ws.col(8).width = 3000
ws.col(9).width = 3000
ws.col(10).width = 3000
try:
buf = cStringIO.StringIO()
wb.save(buf)
out = base64.encodestring(buf.getvalue())
buf.close()
data_fname = "Historial cuenta proveedores.xls"
archivo = '/opt/temp/' + data_fname
res_model = 'statement.account.supplier'
self.load_doc(out, data_fname, res_model)
return self.write({'data': out, 'txt_filename': data_fname, 'name': data_fname})
except ValueError:
raise Warning('Error a la hora de salvar el archivo')
@api.one
def get_supplier(self,dateto):
parnert_list = []
query = {}
partner_id = []
cuent = []
user = self.env['res.users'].browse(self._uid)
company_id= user.company_id.id
account_id = self.account_id
if self.partner_ids:
partner_id = [part.id for part in self.partner_ids]
account_obj = self.env['account.account'].search([('id','=',account_id.id)]) # (account_id.id,['type'])
if account_obj['type'] == 'view':
sqlaccount = "SELECT id FROM account_account WHERE parent_id in (%s)"%(account_id.id)
self._cr.execute(sqlaccount)
cuent1 = self._cr.dictfetchall()
for ct in cuent1:
cuent.append(ct['id'])
elif account_obj['type']== 'payable':
cuent.append(account_id.id)
# if partner_id:
#
# sql = "select a.part_number, a.id, a.name from res_partner a where a.supplier = True and a.part_number is not null and a.id in (%s) " \
# "group by a.part_number, a.id, a.name order by a.name" %\
# (','.join(str(i) for i in partner_id))
# else:
# sql = "select a.part_number, a.id, a.name from res_partner a where a.supplier = True and a.part_number is not null " \
# "group by a.part_number, a.id, a.name order by a.name"
#
# print "sql ", sql
# self._cr.execute(sql)
# supplier = self._cr.dictfetchall()
#
# for supp in supplier:
# list = {}
# list['id'] = supp['id']
# list['name'] = supp['name']
# list['ruc'] = supp['part_number']
# list['company_id'] = company_id
# parnert_list.append(list)
if len(cuent) <= 0:
raise except_orm('Error!', 'Cuenta contable no tiene configuracion para reporte cuentas por cobrar (view, receivable)')
if partner_id:
partner = self.env['res.partner'].search([('property_account_payable','in',cuent),('supplier','=', True),('id','in', partner_id)])
else:
partner = self.env['res.partner'].search([('property_account_payable','in',cuent),('supplier','=', True)])
for supp in partner:
list = {}
list['id'] = supp.id
list['name'] = supp.name or 'S/N'
list['ruc'] = supp.part_number or 'S/R'
list['account'] = supp.property_account_payable.id or 'S/A'
parnert_list.append(list)
return parnert_list
@api.one
def get_lines(self,partner, datefrom, dateto, account):
where = []
list = []
invoice_list = []
whereprov = []
if datefrom:
where.append(('date_invoice','>=',datefrom))
whereprov.append(('date_invoice','>=',datefrom))
if dateto:
where.append(('date_invoice','<=',dateto))
whereprov.append(('date_invoice','<=',dateto))
if where:
where.append(('state','in',['open','paid']))
where.append(('type', 'in', ['in_invoice','in_refund']))
where.append(('partner_id','=',partner))
lines_invoice = self.env['account.invoice'].search(where,order='number_seq asc')
saldo = 0.00
#Factura de provisiones
whereprov.append(('type','in',['in_invoice']))
whereprov.append(('partner_id','=',partner))
whereprov.append(('state_provision','in',['prov','rever']))
lines_invoice_prov = self.env['account.invoice'].search(whereprov,order='number_seq asc')
#Saldos Iniciales
# saldo += self.get_saldo(partner, datetime.strptime('2016-01-01','%Y-%m-%d'), datefrom)
saldo += self.get_saldo(partner, '2016-01-01', datefrom, account)
if saldo != 0:
saldolist = {}
saldolist['id'] = 1
saldolist['documenttype'] = 'SALDOS INICIALES'
saldolist['invoicenumber'] = ''
saldolist['transacctionnumber'] = ''
saldolist['date'] = self.datefrom
saldolist['duedate'] = ''
saldolist['journal'] = ''
saldolist['description'] = ''
if saldo > 0:
saldolist['debit'] = saldo
saldolist['credit'] = 0.00
elif saldo < 0:
saldolist['debit'] = 0.00
saldolist['credit'] = abs(saldo)
elif saldo == 0:
saldolist['debit'] = saldo
saldolist['credit'] = saldo
saldolist['saldo'] = round(saldo,2)
invoice_list.append(saldolist)
#Diarios de Saldo Inicial
wherejournal = []
company = self.company_id
account_ids = [account] # [acc.id for acc in company.payable_ids]
# account_ids.append([part.id for part in company.advsuppl_account_id])
wherejournal.append(('account_id','in',account_ids))
if where:
wherejournal.append(('partner_id','=',partner))
journal = self.env['account.journal'].search([('code','=', 'DSIN')])
wherejournal.append(('journal_id','=',journal.id))
moves = self.env['account.move.line'].search(wherejournal, order="partner_id asc, date_created asc")
for mov in moves:
if mov.move_id.date <= dateto and mov.move_id.date >= datefrom :
movlist = {}
movlist['id'] = mov.id
movlist['documenttype'] = mov.journal_id.name
movlist['invoicenumber'] = ''
movlist['transacctionnumber'] = ''
movlist['date'] = mov.move_id.date
movlist['duedate'] = ''
movlist['journal'] = mov.move_id.name
movlist['description'] = mov.name
movlist['debit'] = round(mov.debit,2)
movlist['credit'] = round(mov.credit,2)
saldo += round(movlist['debit'] - movlist['credit'],2)
movlist['saldo'] = round(saldo,2)
invoice_list.append(movlist)
wherereconcile = []
if mov.reconcile_id.id:
wherereconcile.append(('reconcile_id','=',mov.reconcile_id.id))
elif mov.reconcile_partial_id.id:
wherereconcile.append(('reconcile_partial_id','=',mov.reconcile_partial_id.id))
if wherereconcile:
partial_reconcile = self.env['account.move.line'].search(wherereconcile)
amount = 0
for parrec in partial_reconcile:
if parrec.debit > 0 and parrec.move_id.date <= dateto and parrec.move_id.date >= datefrom:
parlist = {}
parlist['id'] = parrec.id
parlist['documenttype'] = parrec.journal_id.name
parlist['invoicenumber'] = ''
parlist['transacctionnumber'] = ''
parlist['date'] = parrec.move_id.date
parlist['duedate'] = ''
parlist['journal'] = parrec.move_id.name
parlist['description'] = parrec.ref
parlist['debit'] = abs(round(parrec.debit,2))
parlist['credit'] = abs(round(parrec.credit,2))
saldo += round(parlist['debit'],2)
parlist['saldo'] = round(saldo,2)
invoice_list.append(parlist)
#Add Pagos de diarios, retenciones, pagos mayor a la fecha de la factura
# invoice_after = self.get_lines_after(partner,datetime.strptime('2016-01-01','%Y-%m-%d'), datefrom,)[0]
# invoice_after = self.get_lines_after(partner, datetime.strptime('2016-01-01','%Y-%m-%d'), self.datefrom)[0]
invoice_after = self.get_lines_after(partner, '2016-01-01', self.datefrom, account)[0]
for invaft in invoice_after:
invaftlis = {}
invaftlis['id'] = invaft.get('id')
invaftlis['documenttype'] = invaft.get('documenttype')
invaftlis['invoicenumber'] = invaft.get('invoicenumber')
invaftlis['transacctionnumber'] = invaft.get('transacctionnumber')
invaftlis['date'] = invaft.get('date')
invaftlis['duedate'] = invaft.get('duedate')
invaftlis['journal'] = invaft.get('journal')
invaftlis['description'] = invaft.get('description')
invaftlis['debit'] = invaft.get('debit')
invaftlis['credit'] = invaft.get('credit')
saldo += round(invaftlis['debit'] - invaftlis['credit'],2)
invaftlis['saldo'] = saldo
invoice_list.append(invaftlis)
invoice_order = []
for line in lines_invoice:
invoice_order.append({'id':line.id, 'date_invoice':line.date_invoice, 'type':'F'})
for lineprov in lines_invoice_prov:
if lineprov.prov_id:
invoice_order.append({'id':lineprov.id, 'date_invoice': lineprov.prov_id.date, 'type':'P'})
if invoice_order:
invoice_order.sort(key=lambda x: x['date_invoice'])
#Facturas Normales
for invord in invoice_order:
if invord['type'] == 'F':
lines_invoice = self.env['account.invoice'].search([('id','=', invord['id'])])
for line in lines_invoice:
invlist = {}
invlist['id'] = line.id
invlist['documenttype'] = line.document_type.name
invlist['invoicenumber'] = line.number_seq
invlist['transacctionnumber'] = line.number_seq
invlist['date'] = line.date_invoice
invlist['duedate'] = line.date_due
invlist['journal'] = line.move_id.name
invlist['description'] = str(line.move_id.name)
if line.type == 'in_invoice':
invlist['debit'] = 0.00
v_taxes = self._get_supplier_iva_taxes(line.id)
invlist['credit'] = v_taxes['amount_total_iva'] #round(line.amount_total_iva,2)
elif line.type == 'in_refund':
invlist['debit'] = round(line.amount_total,2)
invlist['credit'] = 0.00
saldo += round(invlist['debit'] - invlist['credit'],2)
invlist['saldo'] = saldo
invoice_list.append(invlist)
#Asume retencion de la factura
# if line.is_asum and line.account_retiva:
# move = self.env['account.move.line'].search([('move_id','=',line.move_id.id),('account_id','=',line.account_retiva.id)])
# if move:
# invasum = {}
# invasum['id'] = move.id
# invasum['documenttype'] = move.name
# invasum['invoicenumber'] = '' #line.number_seq
# invasum['transacctionnumber'] = line.number_seq
# invasum['date'] = line.date_invoice
# invasum['duedate'] = line.date_due
# invasum['journal'] = line.move_id.name
# invasum['description'] = move.name
# invasum['debit'] = 0.00
# invasum['credit'] = abs(round(move.debit,2))
# saldo += round(invasum['debit'],2) - round(invasum['credit'],2)
# invasum['saldo'] = saldo
# invoice_list.append(invasum)
#Impuestos
deduction = self.env['account.deduction'].search([('id','=',line.deduction_id.id),('state','in',['open','paid']),('emission_date','<=',dateto)])
if deduction:
taxinvoice = self.env['account.invoice.tax'].search([('deduction_id','=',deduction.id),])
for tax in taxinvoice:
taxeslist = {}
taxeslist['id'] = tax.id
taxeslist['documenttype'] = 'Retencion a proveedores' if deduction.type == 'supplier' else '----'
taxeslist['invoicenumber'] = '' #line.number_seq
taxeslist['transacctionnumber'] = deduction.number
taxeslist['date'] = deduction.emission_date
taxeslist['duedate'] = deduction.emission_date
taxeslist['journal'] = deduction.move_id.name
taxeslist['description'] = tax.name
taxeslist['debit'] = abs(round(tax.tax_amount,2))
taxeslist['credit'] = 0.00
saldo += round(taxeslist['debit'] - taxeslist['credit'],2)
taxeslist['saldo'] = saldo
invoice_list.append(taxeslist)
#Pagos
for pay in line.payment_ids.sorted(reverse= True):
if pay.date <= dateto:
paylist = {}
paylist['id'] = pay.id
paylist['documenttype'] = 'Pagos'
paylist['invoicenumber'] = '' #line.number_seq
paylist['transacctionnumber'] = str(pay.id) + ' - ' + pay.reconcile_id.name if pay.reconcile_id else pay.reconcile_partial_id.name
paylist['date'] = pay.date
paylist['duedate'] = pay.date
paylist['journal'] = pay.move_id.name
paylist['description'] = pay.ref
paylist['debit'] = round(pay.debit,2)
paylist['credit'] = round(pay.credit,2)
saldo += round(paylist['debit'] - paylist['credit'],2)
paylist['saldo'] = round(saldo,2)
invoice_list.append(paylist)
elif invord['type'] == 'P':
lines_invoice_prov = self.env['account.invoice'].search([('id','=', invord['id'])])
#Factura de Provision y Reverso
for lineprov in lines_invoice_prov:
if lineprov.prov_id:
invlistprov = {}
invlistprov['id'] = lineprov.id
invlistprov['documenttype'] = lineprov.document_type.name + '-' + 'Prov'
invlistprov['invoicenumber'] = lineprov.prov_id.name
invlistprov['transacctionnumber'] = lineprov.prov_id.name
invlistprov['date'] = lineprov.prov_id.date
invlistprov['duedate'] = lineprov.date_due
invlistprov['journal'] = lineprov.prov_id.name
invlistprov['description'] = lineprov.comment
invlistprov['debit'] = 0.00
invlistprov['credit'] = round(lineprov.amount_total,2)
saldo += round(invlistprov['debit'] - invlistprov['credit'],2)
invlistprov['saldo'] = saldo
invoice_list.append(invlistprov)
if lineprov.provrev_id:
if lineprov.provrev_id.date >= datefrom and lineprov.provrev_id.date <= dateto:
invlistprov = {}
invlistprov['id'] = lineprov.id
invlistprov['documenttype'] = lineprov.document_type.name + '-' + 'Rev'
invlistprov['invoicenumber'] = lineprov.provrev_id.name
invlistprov['transacctionnumber'] = lineprov.provrev_id.name
invlistprov['date'] = lineprov.provrev_id.date
invlistprov['duedate'] = lineprov.date_due
invlistprov['journal'] = lineprov.provrev_id.name
invlistprov['description'] = lineprov.comment
invlistprov['debit'] = round(lineprov.amount_total,2)
invlistprov['credit'] = 0.00
saldo += round(invlistprov['debit'] - invlistprov['credit'],2)
invlistprov['saldo'] = saldo
invoice_list.append(invlistprov)
list.append(invoice_list)
list.append(saldo)
return list
@api.one
def get_lines_after(self,partner, datefrom, dateto, account):
where = []
list = []
invoice_list = []
whereprov = []
if datefrom:
where.append(('date_invoice','>=',datefrom))
whereprov.append(('date_invoice','>=',datefrom))
if dateto:
where.append(('date_invoice','<',dateto))
whereprov.append(('date_invoice','<',dateto))
if where:
where.append(('state','in',['open','paid']))
where.append(('type', 'in', ['in_invoice','in_refund']))
where.append(('partner_id','=',partner))
lines_invoice = self.env['account.invoice'].search(where,order='number_seq asc')
saldo = 0.00
#Factura de provisiones
whereprov.append(('type','in',['in_invoice']))
whereprov.append(('partner_id','=',partner))
whereprov.append(('state_provision','in',['prov','rever']))
lines_invoice_prov = self.env['account.invoice'].search(whereprov,order='number_seq asc')
#Diarios de Saldo Inicial
wherejournal = []
company = self.company_id
account_ids = [account] #[acc.id for acc in company.payable_ids]
# account_ids.append([part.id for part in company.advsuppl_account_id])
wherejournal.append(('account_id','in',account_ids))
if where:
wherejournal.append(('partner_id','=',partner))
journal = self.env['account.journal'].search([('code','=', 'DSIN')])
wherejournal.append(('journal_id','=',journal.id))
moves = self.env['account.move.line'].search(wherejournal, order="partner_id asc, date_created asc")
for mov in moves:
if mov.move_id.date < dateto and mov.move_id.date >= datefrom :
wherereconcile = []
if mov.reconcile_id.id:
wherereconcile.append(('reconcile_id','=',mov.reconcile_id.id))
elif mov.reconcile_partial_id.id:
wherereconcile.append(('reconcile_partial_id','=',mov.reconcile_partial_id.id))
if wherereconcile:
partial_reconcile = self.env['account.move.line'].search(wherereconcile)
amount = 0
for parrec in partial_reconcile:
if parrec.debit > 0 and parrec.move_id.date >= dateto :
parlist = {}
parlist['id'] = parrec.id
parlist['documenttype'] = parrec.journal_id.name
parlist['invoicenumber'] = ''
parlist['transacctionnumber'] = ''
parlist['date'] = parrec.move_id.date
parlist['duedate'] = ''
parlist['journal'] = parrec.move_id.name
parlist['description'] = parrec.ref
parlist['debit'] = round(parrec.debit,2)
parlist['credit'] = round(parrec.credit,2)
saldo += round(parlist['debit'],2)
parlist['saldo'] = round(saldo,2)
invoice_list.append(parlist)
#Facturas
for line in lines_invoice:
#Asume retencion de la factura
# if line.is_asum and line.account_retiva:
# move = self.env['account.move.line'].search([('move_id','=',line.move_id.id),('account_id','=',line.account_retiva.id)])
# if move:
# invasum = {}
# invasum['id'] = move.id
# invasum['documenttype'] = move.name
# invasum['invoicenumber'] = '' #line.number_seq
# invasum['transacctionnumber'] = line.number_seq
# invasum['date'] = line.date_invoice
# invasum['duedate'] = line.date_due
# invasum['journal'] = line.move_id.name
# invasum['description'] = move.name
# invasum['debit'] = 0.00
# invasum['credit'] = abs(round(move.debit,2))
# saldo += round(invasum['debit'],2) - round(invasum['credit'],2)
# invasum['saldo'] = saldo
# invoice_list.append(invasum)
#Impuestos
deduction = self.env['account.deduction'].search([('id','=',line.deduction_id.id),('state','in',['open','paid']),('emission_date','>=',dateto)])
if deduction:
taxinvoice = self.env['account.invoice.tax'].search([('deduction_id','=',deduction.id),])
for tax in taxinvoice:
taxeslist = {}
taxeslist['id'] = tax.id
taxeslist['documenttype'] = 'Retencion a proveedores' if deduction.type == 'supplier' else '----'
taxeslist['invoicenumber'] = line.number_seq
taxeslist['transacctionnumber'] = deduction.number
taxeslist['date'] = deduction.emission_date
taxeslist['duedate'] = deduction.emission_date
taxeslist['journal'] = deduction.move_id.name
taxeslist['description'] = tax.name
taxeslist['debit'] = abs(round(tax.tax_amount,2))
taxeslist['credit'] = 0.00
saldo += round(taxeslist['debit'] - taxeslist['credit'],2)
taxeslist['saldo'] = saldo
invoice_list.append(taxeslist)
#Pagos
for pay in line.payment_ids.sorted(reverse= True):
if pay.date >= dateto:
paylist = {}
paylist['id'] = pay.id
paylist['documenttype'] = 'Pagos'
paylist['invoicenumber'] = line.number_seq
paylist['transacctionnumber'] = str(pay.id) + ' - ' + pay.reconcile_id.name if pay.reconcile_id else pay.reconcile_partial_id.name
paylist['date'] = pay.date
paylist['duedate'] = pay.date
paylist['journal'] = pay.move_id.name
paylist['description'] = pay.ref
paylist['debit'] = round(pay.debit,2)
paylist['credit'] = round(pay.credit,2)
saldo += round(paylist['debit'] - paylist['credit'],2)
paylist['saldo'] = round(saldo,2)
invoice_list.append(paylist)
#Factura de Provision y Reverso
for lineprov in lines_invoice_prov:
if lineprov.state_provision == 'rever' and lineprov.provrev_id:
if lineprov.provrev_id.date >= dateto:
invlistprov = {}
invlistprov['id'] = lineprov.id
invlistprov['documenttype'] = lineprov.document_type.name + '-' + 'Rev'
invlistprov['invoicenumber'] = lineprov.provrev_id.name
invlistprov['transacctionnumber'] = lineprov.provrev_id.name
invlistprov['date'] = lineprov.provrev_id.date
invlistprov['duedate'] = lineprov.date_due
invlistprov['journal'] = lineprov.provrev_id.name
invlistprov['description'] = lineprov.comment
invlistprov['debit'] = round(lineprov.amount_total,2)
invlistprov['credit'] = 0.00
saldo += round(invlistprov['debit'] - invlistprov['credit'],2)
invlistprov['saldo'] = saldo
invoice_list.append(invlistprov)
return invoice_list
def get_saldo(self,partner, datefrom, dateto, account):
where = []
list = []
invoice_list = []
whereprov = []
if datefrom:
where.append(('date_invoice','>=',datefrom))
whereprov.append(('date_invoice','>=',datefrom))
if dateto:
where.append(('date_invoice','<',dateto))
whereprov.append(('date_invoice','<',dateto))
if where:
where.append(('state','in',['open','paid']))
where.append(('type', 'in', ['in_invoice','in_refund']))
where.append(('partner_id','=',partner))
lines_invoice = self.env['account.invoice'].search(where,order='number_seq asc')
saldo = 0.00
whereprov.append(('type','in',['in_invoice']))
whereprov.append(('partner_id','=',partner))
whereprov.append(('state_provision','in',['prov','rever']))
lines_invoice_prov = self.env['account.invoice'].search(whereprov,order='number_seq asc')
#Diarios de Saldo Inicial
wherejournal = []
company = self.company_id
account_ids = [account] # [acc.id for acc in company.payable_ids]
# account_ids.append([part.id for part in company.advsuppl_account_id])
wherejournal.append(('account_id','in',account_ids))
if where:
wherejournal.append(('partner_id','=',partner))
journal = self.env['account.journal'].search([('code','=', 'DSIN')])
wherejournal.append(('journal_id','=',journal.id))
moves = self.env['account.move.line'].search(wherejournal, order="partner_id asc, date_created asc")
for mov in moves:
if mov.move_id.date < dateto and mov.move_id.date >= datefrom :
saldo += round(mov.debit - mov.credit,2)
wherereconcile = []
if mov.reconcile_id.id:
wherereconcile.append(('reconcile_id','=',mov.reconcile_id.id))
elif mov.reconcile_partial_id.id:
wherereconcile.append(('reconcile_partial_id','=',mov.reconcile_partial_id.id))
if wherereconcile:
partial_reconcile = self.env['account.move.line'].search(wherereconcile)
amount = 0
for parrec in partial_reconcile:
if parrec.debit > 0 and parrec.move_id.date < dateto and parrec.move_id.date >= datefrom :
saldo += round(parrec.debit,2)
#Facturas
for line in lines_invoice:
# saldo += round(0.00 - line.amount_total,2)
if line.type == 'in_invoice':
v_taxes = self._get_supplier_iva_taxes(line.id)
saldo += round(0.00 - v_taxes['amount_total_iva'],2)
elif line.type == 'in_refund':
saldo += round(round(line.amount_total,2) - 0.00,2)
#Impuestos
deduction = self.env['account.deduction'].search([('id','=',line.deduction_id.id),('state','in',['open','paid']),('emission_date','<',dateto)])
if deduction:
taxinvoice = self.env['account.invoice.tax'].search([('deduction_id','=',deduction.id),])
for tax in taxinvoice:
saldo += round(tax.tax_amount - 0.00,2)
#Pagos
for pay in line.payment_ids.sorted(reverse= True):
if pay.date < dateto:
saldo += round(pay.debit - pay.credit,2)
#Factura de Provision y Reverso
for lineprov in lines_invoice_prov:
if lineprov.prov_id:
saldo += 0.00 - round(lineprov.amount_total,2)
if lineprov.provrev_id:
if lineprov.provrev_id.date < dateto:
saldo += round(lineprov.amount_total,2) - 0.00
return saldo
@api.one
def load_doc(self, out, data_fname, res_model):
attach_vals = {
'name': data_fname,
'datas_fname': data_fname,
'res_model': res_model,
'datas': out,
'type': 'binary',
'file_type': 'file_type',
}
if self.id:
attach_vals.update( {'res_id': self.id})
self.env['ir.attachment'].create(attach_vals)
def _get_supplier_iva_taxes(self,id):
# Inicializo las variables que necesito devolver
subtotal = tax_iva = amount_other = base_sin_iva = base_iva = 0.0
res = {}
invoice_list = self.env['account.invoice'].search([('id','=',id)])
for invoice in invoice_list:
for line in invoice.invoice_line:
if line.price_subtotal:
for tax in line.invoice_line_tax_id:
value = 0
if tax.child_ids: # Para los que tengan hijos
for c in tax.child_ids:
value += tax.amount * c.amount * line.price_subtotal
else:
value = tax.amount * line.price_subtotal
#CSV: IVA O FACTURAS COMPRAS
if tax.is_iva and not tax.description == '507' and line.invoice_id.type == 'in_invoice':
tax_iva += value # IVA siempre es solo 1
base_iva += line.price_subtotal
elif tax.description == '507' and line.invoice_id.type == 'in_invoice':
base_sin_iva += line.price_subtotal
else:
amount_other += value # Sumo el resto de los taxes
#CSV: IVA O FACTURAS VENTAS
if tax.is_iva and not tax.description == '403' and line.invoice_id.type == 'out_invoice':
tax_iva += value # IVA siempre es solo 1
base_iva += line.price_subtotal
elif tax.description == '403' and line.invoice_id.type == 'out_invoice':
base_sin_iva += line.price_subtotal
subtotal += line.price_subtotal
res = {
'base_iva' : round(base_iva,2),
'base_sin_iva' : round(base_sin_iva,2),
'amount_iva' : round(tax_iva,2),
'amount_total_iva' : round((subtotal + tax_iva ),2),
'amount_other' : round(amount_other,2)
}
return res
| [
"[email protected]"
] | |
975e35f4d46d96b077ccec902d83a50f0befc10b | 6725eff72a6cf04c9cf62cb6f7f9df6373d5ceb5 | /backend/eplan_18788/settings.py | fb957dea65dc264ef337508f1c3991c3f454f306 | [] | no_license | crowdbotics-apps/eplan-18788 | ae064dc4fc95bb3f2eb2218402cf7375fd6b7273 | 652b38c70d36fe84eebd335dffed30b8e3ade581 | refs/heads/master | 2022-11-16T06:07:53.552260 | 2020-07-10T09:38:39 | 2020-07-10T09:38:39 | 278,595,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,785 | py | """
Django settings for eplan_18788 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eplan_18788.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eplan_18788.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
3be018c394015f43dc2ee52d2ece2a6651fa046e | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/l06.py | 515136658f604c66165a4ec3da41969bbaff7f8b | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'l06':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
54a2ac39d5f82bb0261de10d4bd2935a611a6881 | e3beccff804b034047dc50e0247c28b1606c7fdb | /bogo_sort.py | 061ff6d79acf27c1b15d6d8a18c97d2dd614c9bf | [] | no_license | Rutrle/algorithms | 0434249a3d9616cc478697c78327f643166db3e7 | bfd5237c6420b84b3e43d321530dc4778fdd79ca | refs/heads/master | 2023-09-01T09:10:20.618467 | 2021-11-02T21:49:37 | 2021-11-02T21:49:37 | 357,696,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | import random
import sys
numbers = [1, 5, 8, 44, 6, 45, 468]
def is_sorted(values):
for index in range(len(values)-1):
if values[index] > values[index+1]:
return False
return True
def bogo_sort(values):
attempts = 0
while not is_sorted(values):
random.shuffle(values)
print(values)
attempts += 1
print(attempts)
return values
print(bogo_sort(numbers))
| [
"[email protected]"
] | |
32199bf70c40c64d9497a96e4c056000b9c9a54f | 18508cea9458b2879017b44e6f18520cd8cf4f6c | /UCMDBPython/src/plugin_ntcmd_file_version.py | be371b707921cbbacd41e47fc3580ff9493e7e73 | [] | no_license | kvt11/dd-git | 7d4935962e06d835ad0023c4abb185876a5a9e77 | 49aafa7081b861c5f6d0e1753b425e78948116d0 | refs/heads/master | 2022-11-23T19:03:19.763423 | 2016-04-04T14:54:18 | 2016-04-04T14:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | #coding=utf-8
import file_ver_lib
from plugins import Plugin
class FileVersionInformationPluginByNTCMD(Plugin):
def __init__(self):
Plugin.__init__(self)
def isApplicable(self, context):
client = context.client
if client.isWinOs():
return 1
else:
return 0
def process(self, context):
client = context.client
applicationOsh = context.application.getOsh()
processes = context.application.getProcesses()
for process in processes:
fullFileName = process.executablePath
if fullFileName:
fileVer = file_ver_lib.getWindowsWMICFileVer(client, fullFileName)
if not fileVer:
fileVer = file_ver_lib.getWindowsShellFileVer(client, fullFileName)
if fileVer:
applicationOsh.setAttribute("application_version_number", fileVer)
break
| [
"[email protected]"
] | |
7a8a85dc5a3a16c2d803052a8d902c7eb41278e1 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/rasbt_mlxtend/mlxtend-master/mlxtend/_base/_classifier.py | c28a3668f82e55c395cafb7d08c381bc0104d08e | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,243 | py | # Sebastian Raschka 2014-2017
# mlxtend Machine Learning Library Extensions
#
# Base Clusteer (Clutering Parent Class)
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: BSD 3 clause
import numpy as np
class _Classifier(object):
def __init__(self):
pass
def _check_target_array(self, y, allowed=None):
if not np.issubdtype(y[0], int):
raise AttributeError('y must be an integer array.\nFound %s'
% y.dtype)
found_labels = np.unique(y)
if (found_labels < 0).any():
raise AttributeError('y array must not contain negative labels.'
'\nFound %s' % found_labels)
if allowed is not None:
found_labels = tuple(found_labels)
if found_labels not in allowed:
raise AttributeError('Labels not in %s.\nFound %s'
% (allowed, found_labels))
def score(self, X, y):
""" Compute the prediction accuracy
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (true class labels).
Returns
---------
acc : float
The prediction accuracy as a float
between 0.0 and 1.0 (perfect score).
"""
y_pred = self.predict(X)
acc = np.sum(y == y_pred, axis=0) / float(X.shape[0])
return acc
def fit(self, X, y, init_params=True):
"""Learn model from training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
init_params : bool (default: True)
Re-initializes model parameters prior to fitting.
Set False to continue training with weights from
a previous model fitting.
Returns
-------
self : object
"""
self._is_fitted = False
self._check_arrays(X=X, y=y)
self._check_target_array(y)
if hasattr(self, 'self.random_seed') and self.random_seed:
self._rgen = np.random.RandomState(self.random_seed)
self._fit(X=X, y=y, init_params=init_params)
self._is_fitted = True
return self
def predict(self, X):
"""Predict targets from X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
target_values : array-like, shape = [n_samples]
Predicted target values.
"""
self._check_arrays(X=X)
if not self._is_fitted:
raise AttributeError('Model is not fitted, yet.')
return self._predict(X)
| [
"[email protected]"
] | |
c0a9380b65fa405b9ecd5afca937d85fc43dff4d | 9be57e13dae005f7138879871cf4deb50bb32d3a | /tests/test_module.py | 6925827ce0e2a141c347c564e0ce65f595c52349 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | shnizzedy/progressivis | 28321e3187b49b9fe034bb1786729a4b15b4a519 | d3e67925253ff3dc34dc72282ac82bb2a9571354 | refs/heads/master | 2021-05-30T21:50:41.702094 | 2016-05-13T07:45:39 | 2016-05-13T07:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import unittest
from progressivis import *
class TestProgressiveModule(unittest.TestCase):
def setUp(self):
self.scheduler = Scheduler()
def test_scheduler(self):
self.assertEqual(len(self.scheduler), 0)
def test_module(self):
module = Module(id='a', scheduler=self.scheduler)
self.assertEqual(module.id, 'a')
self.assertEqual(self.scheduler.exists('a'), True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4e808b55d68fc959b93093eef571a1166e03efdd | d6b99ab3cc7108f4f0cc0be899641ac990e30db9 | /split_string/split_string.py | 2cf9b6dc6a0f81d094a6f334823d09ab0204003e | [] | no_license | AsemAntar/codewars_problems | ef97e8a8058551276cdb943a07474cbeb9353c4d | c0ae0a769e16211c2b8e325d1116a6cebd3be016 | refs/heads/master | 2020-08-10T02:01:12.411030 | 2019-12-15T22:45:20 | 2019-12-15T22:45:20 | 214,229,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # Author : Asem Antar Abdesamee
# Problem Description:
"""
Complete the solution so that it splits the string into pairs of two characters.
If the string contains an odd number of characters
then it should replace the missing second character of the final pair with an underscore ('_').
Examples:
solution('abc') # should return ['ab', 'c_']
solution('abcdef') # should return ['ab', 'cd', 'ef']
"""
"""
====================================
My Solution
====================================
"""
import re
def solution(s):
sol = []
while s:
if len(s) % 2 == 0:
sol.append(s[:2])
s = s[2:]
else:
s += '_'
sol.append(s[:2])
s = s[2:]
return sol
"""
====================================
Better Solution
====================================
"""
def solution2(s):
return re.findall(".{2}", s + "_")
print(solution2('abcde'))
| [
"[email protected]"
] | |
26e51c4921eb1736c51e781cd828b919a4b4b897 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1190267/snippet.py | 2688e49f081b03d355fbc37009d8a0b90b919415 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 9,652 | py | #!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Python client library for the Facebook Platform.
This client library is designed to support the Graph API and the official
Facebook JavaScript SDK, which is the canonical way to implement
Facebook authentication. Read more about the Graph API at
http://developers.facebook.com/docs/api. You can download the Facebook
JavaScript SDK at http://github.com/facebook/connect-js/.
If your application is using Google AppEngine's webapp framework, your
usage of this module might look like this:
user = facebook.get_user_from_cookie(self.request.cookies, key, secret)
if user:
graph = facebook.GraphAPI(user["access_token"])
profile = graph.get_object("me")
friends = graph.get_connections("me", "friends")
"""
import cgi
import hashlib
import time
import urllib
# Find a JSON parser
try:
import json
_parse_json = lambda s: json.loads(s)
except ImportError:
try:
import simplejson
_parse_json = lambda s: simplejson.loads(s)
except ImportError:
# For Google AppEngine
from django.utils import simplejson
_parse_json = lambda s: simplejson.loads(s)
class GraphAPI(object):
"""A client for the Facebook Graph API.
See http://developers.facebook.com/docs/api for complete documentation
for the API.
The Graph API is made up of the objects in Facebook (e.g., people, pages,
events, photos) and the connections between them (e.g., friends,
photo tags, and event RSVPs). This client provides access to those
primitive types in a generic way. For example, given an OAuth access
token, this will fetch the profile of the active user and the list
of the user's friends:
graph = facebook.GraphAPI(access_token)
user = graph.get_object("me")
friends = graph.get_connections(user["id"], "friends")
You can see a list of all of the objects and connections supported
by the API at http://developers.facebook.com/docs/reference/api/.
You can obtain an access token via OAuth or by using the Facebook
JavaScript SDK. See http://developers.facebook.com/docs/authentication/
for details.
If you are using the JavaScript SDK, you can use the
get_user_from_cookie() method below to get the OAuth access token
for the active user from the cookie saved by the SDK.
"""
def __init__(self, access_token=None):
self.access_token = access_token
def get_object(self, id, **args):
"""Fetchs the given object from the graph."""
return self.request(id, args)
def get_objects(self, ids, **args):
"""Fetchs all of the given object from the graph.
We return a map from ID to object. If any of the IDs are invalid,
we raise an exception.
"""
args["ids"] = ",".join(ids)
return self.request("", args)
def get_connections(self, id, connection_name, **args):
"""Fetchs the connections for given object."""
return self.request(id + "/" + connection_name, args)
def put_object(self, parent_object, connection_name, **data):
"""Writes the given object to the graph, connected to the given parent.
For example,
graph.put_object("me", "feed", message="Hello, world")
writes "Hello, world" to the active user's wall. Likewise, this
will comment on a the first post of the active user's feed:
feed = graph.get_connections("me", "feed")
post = feed["data"][0]
graph.put_object(post["id"], "comments", message="First!")
See http://developers.facebook.com/docs/api#publishing for all of
the supported writeable objects.
Most write operations require extended permissions. For example,
publishing wall posts requires the "publish_stream" permission. See
http://developers.facebook.com/docs/authentication/ for details about
extended permissions.
"""
assert self.access_token, "Write operations require an access token"
return self.request(parent_object + "/" + connection_name, post_args=data)
def put_wall_post(self, message, attachment={}, profile_id="me"):
"""Writes a wall post to the given profile's wall.
We default to writing to the authenticated user's wall if no
profile_id is specified.
attachment adds a structured attachment to the status message being
posted to the Wall. It should be a dictionary of the form:
{"name": "Link name"
"link": "http://www.example.com/",
"caption": "{*actor*} posted a new review",
"description": "This is a longer description of the attachment",
"picture": "http://www.example.com/thumbnail.jpg"}
"""
return self.put_object(profile_id, "feed", message=message, **attachment)
def put_comment(self, object_id, message):
"""Writes the given comment on the given post."""
return self.put_object(object_id, "comments", message=message)
def put_like(self, object_id):
"""Likes the given post."""
return self.put_object(object_id, "likes")
def delete_object(self, id):
"""Deletes the object with the given ID from the graph."""
self.request(id, post_args={"method": "delete"})
def request(self, path, args=None, post_args=None):
"""Fetches the given path in the Graph API.
We translate args to a valid query string. If post_args is given,
we send a POST request to the given path with the given arguments.
"""
if not args: args = {}
if self.access_token:
if post_args is not None:
post_args["access_token"] = self.access_token
else:
args["access_token"] = self.access_token
post_data = None if post_args is None else urllib.urlencode(post_args)
file = urllib.urlopen("https://graph.facebook.com/" + path + "?" +
urllib.urlencode(args), post_data)
try:
response = _parse_json(file.read())
finally:
file.close()
if response.get("error"):
raise GraphAPIError(response["error"]["type"],
response["error"]["message"])
return response
class GraphAPIError(Exception):
def __init__(self, type, message):
Exception.__init__(self, message)
self.type = type
##### NEXT TWO FUNCTIONS PULLED FROM https://github.com/jgorset/facepy/blob/master/facepy/signed_request.py
import base64
import hmac
def urlsafe_b64decode(str):
"""Perform Base 64 decoding for strings with missing padding."""
l = len(str)
pl = l % 4
return base64.urlsafe_b64decode(str.ljust(l+pl, "="))
def parse_signed_request(signed_request, secret):
"""
Parse signed_request given by Facebook (usually via POST),
decrypt with app secret.
Arguments:
signed_request -- Facebook's signed request given through POST
secret -- Application's app_secret required to decrpyt signed_request
"""
if "." in signed_request:
esig, payload = signed_request.split(".")
else:
return {}
sig = urlsafe_b64decode(str(esig))
data = _parse_json(urlsafe_b64decode(str(payload)))
if not isinstance(data, dict):
raise SignedRequestError("Pyload is not a json string!")
return {}
if data["algorithm"].upper() == "HMAC-SHA256":
if hmac.new(secret, payload, hashlib.sha256).digest() == sig:
return data
else:
raise SignedRequestError("Not HMAC-SHA256 encrypted!")
return {}
def get_user_from_cookie(cookies, app_id, app_secret):
"""Parses the cookie set by the official Facebook JavaScript SDK.
cookies should be a dictionary-like object mapping cookie names to
cookie values.
If the user is logged in via Facebook, we return a dictionary with the
keys "uid" and "access_token". The former is the user's Facebook ID,
and the latter can be used to make authenticated requests to the Graph API.
If the user is not logged in, we return None.
Download the official Facebook JavaScript SDK at
http://github.com/facebook/connect-js/. Read more about Facebook
authentication at http://developers.facebook.com/docs/authentication/.
"""
cookie = cookies.get("fbsr_" + app_id, "")
if not cookie:
return None
response = parse_signed_request(cookie, app_secret)
if not response:
return None
args = dict(
code = response['code'],
client_id = app_id,
client_secret = app_secret,
redirect_uri = '',
)
file = urllib.urlopen("https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(args))
try:
token_response = file.read()
finally:
file.close()
access_token = cgi.parse_qs(token_response)["access_token"][-1]
return dict(
uid = response["user_id"],
access_token = access_token,
)
| [
"[email protected]"
] | |
56e10d0655c2f6f7366ce2a46d971413d855fa76 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/common/util/suppression.py | 1240bb00dd13c5e937571f07d45e8b1364fc12da | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 2,199 | py | from __future__ import annotations
import re
from collections.abc import Iterable
from checkov.common.bridgecrew.integration_features.features.policy_metadata_integration import (
integration as metadata_integration,
)
from checkov.common.comment.enum import COMMENT_REGEX
from checkov.common.models.enums import CheckResult
from checkov.common.typing import _CheckResult, _SkippedCheck
def collect_suppressions_for_report(code_lines: list[tuple[int, str]]) -> dict[str, _CheckResult]:
"""Searches for suppressions in a config block to be used in a report"""
suppressions = {}
for _, line in code_lines:
skip_search = re.search(COMMENT_REGEX, line)
if skip_search:
check_result: _CheckResult = {
"result": CheckResult.SKIPPED,
"suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided",
}
suppressions[skip_search.group(2)] = check_result
return suppressions
def collect_suppressions_for_context(code_lines: Iterable[tuple[int, int | str]]) -> list[_SkippedCheck]:
"""Searches for suppressions in a config block to be used in a context"""
skipped_checks = []
bc_id_mapping = metadata_integration.bc_to_ckv_id_mapping
for line_number, line_text in code_lines:
skip_search = re.search(COMMENT_REGEX, str(line_text))
if skip_search:
skipped_check: _SkippedCheck = {
"id": skip_search.group(2),
"suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided",
"line_number": line_number
}
# No matter which ID was used to skip, save the pair of IDs in the appropriate fields
if bc_id_mapping and skipped_check["id"] in bc_id_mapping:
skipped_check["bc_id"] = skipped_check["id"]
skipped_check["id"] = bc_id_mapping[skipped_check["id"]]
elif metadata_integration.check_metadata:
skipped_check["bc_id"] = metadata_integration.get_bc_id(skipped_check["id"])
skipped_checks.append(skipped_check)
return skipped_checks
| [
"[email protected]"
] | |
dc832f564f9b36bab6df19320d362a1949d6fd4f | 31e2106bc39a1e99dc4fadb2d597bd7b8cf03de5 | /examples/tutorial/replica_exchange_b.py | a672c2f27d97c1ba3e232559eeb6ad707cb70d27 | [
"MIT"
] | permissive | maxentile/radical.ensemblemd | d483ee27b01f6d41ee5113c1a7aaee599eed4652 | 0ec4b127760d2fee88d4eae1768fecec4bdd6b21 | refs/heads/master | 2021-01-13T07:18:55.256329 | 2016-07-18T17:22:22 | 2016-07-18T17:22:22 | 71,578,966 | 0 | 0 | null | 2016-10-21T15:42:37 | 2016-10-21T15:42:37 | null | UTF-8 | Python | false | false | 12,786 | py | #!/usr/bin/env python
__author__ = "Antons Treikalis <[email protected]>"
__copyright__ = "Copyright 2014, http://radical.rutgers.edu"
__license__ = "MIT"
__example_name__ = "Synchronous Replica Exchange Example with 'remote' \
exchange (generic)."
import os
import sys
import json
import math
import time
import random
import string
import pprint
import optparse
import datetime
from os import path
import radical.pilot
from radical.ensemblemd import Kernel
from radical.ensemblemd import EnsemblemdError
from radical.ensemblemd import SingleClusterEnvironment
from radical.ensemblemd.patterns.replica_exchange import Replica
from radical.ensemblemd.patterns.replica_exchange import ReplicaExchange
#-------------------------------------------------------------------------------
#
class ReplicaP(Replica):
"""Class representing replica and it's associated data.
This will have to be extended by users implementing RE pattern for
a particular kernel and scheme
"""
def __init__(self, my_id, cores=1):
"""Constructor
Arguments:
my_id - integer representing replica's id
cores - number of cores each replica should use
"""
self.id = int(my_id)
self.cores = int(cores)
self.parameter = random.randint(300, 600)
self.cycle = 0
super(ReplicaP, self).__init__(my_id)
class RePattern(ReplicaExchange):
"""In this class are specified details of RE simulation:
- initialization of replicas
- generation of input files
- preparation for MD and exchange steps
- implementation of exchange routines
"""
def __init__(self, workdir_local=None):
"""Constructor
"""
# hardcoded name of the input file base
self.inp_basename = "md_input"
# number of replicas to be launched during the simulation
self.replicas = None
# number of cycles the simulaiton will perform
self.nr_cycles = None
self.workdir_local = workdir_local
self.sh_file = 'shared_md_input.dat'
self.shared_urls = []
self.shared_files = []
super(RePattern, self).__init__()
# --------------------------------------------------------------------------
#
def prepare_shared_data(self):
fo = open(self.sh_file, "wb")
for i in range(1,250):
fo.write(str(random.randint(i, 500) + i*2.5) + " ");
fo.write(str(random.choice(string.letters)) + " ");
if i % 10 == 0:
fo.write(str("\n"));
fo.close()
self.shared_files.append(self.sh_file)
url = 'file://%s/%s' % (self.workdir_local, self.sh_file)
self.shared_urls.append(url)
# --------------------------------------------------------------------------
#
def initialize_replicas(self):
"""Initializes replicas and their attributes to default values
"""
try:
self.replicas+1
except:
print "Ensemble MD Toolkit Error: Number of replicas must be \
defined for pattern ReplicaExchange!"
raise
replicas = []
N = self.replicas
for k in range(N):
r = ReplicaP(k)
replicas.append(r)
return replicas
# --------------------------------------------------------------------------
#
def build_input_file(self, replica):
"""Generates dummy input file
Arguments:
replica - object representing a given replica and it's associated
parameters
"""
file_name = self.inp_basename + "_" + \
str(replica.id) + "_" + \
str(replica.cycle) + ".md"
fo = open(file_name, "wb")
for i in range(1,500):
fo.write(str(random.randint(i, 500) + i*2.5) + " ");
if i % 10 == 0:
fo.write(str("\n"));
fo.close()
# --------------------------------------------------------------------------
#
def prepare_replica_for_md(self, replica):
"""Specifies input and output files and passes them to kernel
Arguments:
replica - object representing a given replica and it's associated
parameters
"""
input_name = self.inp_basename + "_" + \
str(replica.id) + "_" + \
str(replica.cycle) + ".md"
output_name = self.inp_basename + "_" + \
str(replica.id) + "_" + \
str(replica.cycle) + ".out"
k = Kernel(name="misc.ccount")
k.arguments = ["--inputfile=" + \
input_name + " " + \
self.sh_file, "--outputfile=" + \
output_name]
# no need to specify shared data here
# everything in shared_files list will be staged in
k.upload_input_data = [input_name]
k.download_output_data = output_name
replica.cycle = replica.cycle + 1
return k
# --------------------------------------------------------------------------
#
def prepare_replica_for_exchange(self, replica):
"""Launches matrix_calculator.py script on target resource in order to
populate columns of swap matrix
Arguments:
replica - object representing a given replica and it's associated
parameters
"""
matrix_col = "matrix_column_{cycle}_{replica}.dat"\
.format(cycle=replica.cycle-1, replica=replica.id )
k = Kernel(name="md.re_exchange")
k.arguments = ["--calculator=matrix_calculator.py",
"--replica_id=" + str(replica.id),
"--replica_cycle=" + str(replica.cycle-1),
"--replicas=" + str(self.replicas),
"--replica_basename=" + self.inp_basename]
k.upload_input_data = "matrix_calculator.py"
k.download_output_data = matrix_col
return k
#---------------------------------------------------------------------------
#
def exchange(self, r_i, replicas, swap_matrix):
"""Given replica r_i returns replica r_i needs to perform an exchange
with
Arguments:
replicas - a list of replica objects
swap_matrix - matrix of dimension-less energies, where each column is
a replica and each row is a state
"""
return random.choice(replicas)
#---------------------------------------------------------------------------
#
def get_swap_matrix(self, replicas, matrix_columns):
"""Creates and populates swap matrix which is used to determine
exchange probabilities
Arguments:
replicas - a list of replica objects
matrix_columns - matrix of energy parameters obtained during the
exchange step
"""
dim = len(replicas)
# init matrix
swap_matrix = [[ 0. for j in range(dim)] for i in range(dim)]
matrix_columns = sorted(matrix_columns)
# checking if matrix columns has enough rows
if (len(matrix_columns) < dim):
print "Ensemble MD Toolkit Error: matrix_columns does not have \
enough rows."
sys.exit()
# checking if matrix columns rows have enough elements
index = 0
for row in matrix_columns:
if (len(row) < dim):
print "Ensemble MD Toolkit Error: matrix_columns row {0} does \
not have enough elements.".format(index)
sys.exit()
index += 1
for r in replicas:
# populating one column at a time
for i in range(len(replicas)):
pos = len(matrix_columns[r.id][i]) - 1
if (matrix_columns[r.id][i][pos].isdigit()):
swap_matrix[i][r.id] = float(matrix_columns[r.id][i])
else:
print "Ensemble MD Toolkit Error: matrix_columns element \
({0},{1}) is not a number.".format(r.id, i)
sys.exit()
return swap_matrix
#---------------------------------------------------------------------------
#
def perform_swap(self, replica_i, replica_j):
"""Performs an exchange of parameters
Arguments:
replica_i - a replica object
replica_j - a replica object
"""
param_i = replica_i.parameter
replica_i.parameter = replica_j.parameter
replica_j.parameter = param_i
#---------------------------------------------------------------------------
#
def build_swap_matrix(self, replicas):
"""Creates a swap matrix from matrix_column_x.dat files.
matrix_column_x.dat - is populated on targer resource and then
transferred back. This file is created for each replica and has data
for one column of swap matrix. In addition to that, this file holds
path to pilot compute unit of the previous run, where reside NAMD output
files for a given replica.
Arguments:
replicas - list of Replica objects
Returns:
swap_matrix - 2D list of lists of dimension-less energies, where each
column is a replica and each row is a state
"""
base_name = "matrix_column"
size = len(replicas)
# init matrix
swap_matrix = [[ 0. for j in range(size)]
for i in range(size)]
for r in replicas:
column_file = base_name + "_" + \
str(r.cycle-1) + "_" + \
str(r.id) + ".dat"
try:
f = open(column_file)
lines = f.readlines()
f.close()
data = lines[0].split()
# populating one column at a time
for i in range(size):
swap_matrix[i][r.id] = float(data[i])
except:
raise
return swap_matrix
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
# use the resource specified as argument, fall back to localhost
if len(sys.argv) > 2:
print 'Usage:\t%s [resource]\n\n' % sys.argv[0]
sys.exit(1)
elif len(sys.argv) == 2:
resource = sys.argv[1]
else:
resource = 'local.localhost'
try:
with open('%s/config.json'%os.path.dirname(os.path.abspath(__file__))) as data_file:
config = json.load(data_file)
# Create a new static execution context with one resource and a fixed
# number of cores and runtime.
workdir_local = os.getcwd()
cluster = SingleClusterEnvironment(
resource=resource,
cores=1,
walltime=15,
#username=None,
project=config[resource]['project'],
access_schema = config[resource]['schema'],
queue = config[resource]['queue'],
database_url='mongodb://ec2-54-221-194-147.compute-1.amazonaws.com:24242',
database_name='myexps',
)
# Allocate the resources.
cluster.allocate()
# creating RE pattern object
re_pattern = RePattern(workdir_local)
# set number of replicas
re_pattern.replicas = 8
# set number of cycles
re_pattern.nr_cycles = 3
# initializing replica objects
replicas = re_pattern.initialize_replicas()
re_pattern.add_replicas( replicas )
# run RE simulation
cluster.run(re_pattern, force_plugin="replica_exchange.static_pattern_2")
cluster.deallocate()
print "RE simulation finished! Simulation performed {0} cycles for {1} replicas. In your working directory you should".format(re_pattern.nr_cycles, re_pattern.replicas)
print "have {0} md_input_x_y.md files and {0} md_input_x_y.out files where x in {{0,1,2,...{1}}} and y in {{0,1,...{2}}}.".format( (re_pattern.nr_cycles*re_pattern.replicas), (re_pattern.replicas-1), (re_pattern.nr_cycles-1))
print ".md file is replica input file and .out is output file providing number of occurrences of each character."
#-----------------------------------------------------------------------
except EnsemblemdError, er:
print "Ensemble MD Toolkit Error: {0}".format(str(er))
raise # Just raise the execption again to get the backtrace
| [
"[email protected]"
] | |
6cb5a4e88884421b42513d7f6f813ca06bda2c4b | 9341a1eb44a8ecf9629cd58f746836ce4988acc4 | /Pythonๅ
จๆ ๅผๅไธญ็บง/็ฌฌไบๆจกๅ/็ฌฌไบๅ
ณ่ฏพๅ็ปไน /ATM+Log/__init__.py | 697da9f9492ce176756a86704783a6e6ae4a183c | [] | no_license | liudefang/14_day_training_camp | d846c91759b69b09b185a351c046187582336fbe | e608ceeb1ff47f8f458c1dbfc5974c428d351a2d | refs/heads/master | 2023-03-10T09:20:52.719381 | 2023-02-22T10:32:30 | 2023-02-22T10:32:30 | 128,916,715 | 2 | 0 | null | 2023-01-05T21:57:52 | 2018-04-10T10:40:14 | JavaScript | UTF-8 | Python | false | false | 105 | py | # -*- encoding: utf-8 -*-
# @Time : 2018-05-28 22:44
# @Author : mike.liu
# @File : __init__.py.py | [
"[email protected]"
] | |
5c8b9bbaeea57e3892988d55ad34cbcfa836aba7 | 9d5ae8cc5f53f5aee7247be69142d9118769d395 | /419. Battleships in a Board.py | 0dd34567666bfd5777b15f6d650c4bcb404e31ef | [] | no_license | BITMystery/leetcode-journey | d4c93319bb555a7e47e62b8b974a2f77578bc760 | 616939d1599b5a135747b0c4dd1f989974835f40 | refs/heads/master | 2020-05-24T08:15:30.207996 | 2017-10-21T06:33:17 | 2017-10-21T06:33:17 | 84,839,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
# Idea: If a point is the head or tail of a ship, only one of its 4 adjacent points is 'X'. Exception: The ship contains only one 'X'.
# If a point is an inner node of a ship, two of its 4 adjacent points are 'X's.
r = 0
length = len(board)
width = len(board[0])
for i in range(0, length):
for j in range(0, width):
if board[i][j] == 'X':
counter = 0
# up
if i > 0 and board[i - 1][j] == 'X':
counter += 1
#down
if i < length - 1 and board[i + 1][j] == 'X':
counter += 1
#left
if j > 0 and board[i][j - 1] == 'X':
counter += 1
#right
if j < width -1 and board[i][j + 1] == 'X':
counter += 1
if counter == 0:
r += 2
elif counter == 1:
r += 1
return r / 2
class Solution_2(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
#Idea: Scan the board, once the left and top neighbor of a point are '.'s, this point counts a ship.
r = 0
length = len(board)
width = len(board[0])
for i in range(0, length):
for j in range(0, width):
# Better than solution 1. Only need to judge left and top
if board[i][j] == 'X' and (i == 0 or board[i-1][j] == '.') and (j == 0 or board[i][j-1] == '.'):
r += 1
return r
s = Solution()
print s.countBattleships(['X..X', '...X', '...X'])
| [
"[email protected]"
] | |
846b82d516567cd220e94d37409b58410512e50e | 2846e9aae639966796395a92bfe8ac06315f22b0 | /leetcode/ino/prob_400/prob_400_nth_digit(2).py | 7f74b0e0dc8cd07d8cb1a46c3033b1ceede09a87 | [] | no_license | sglim/inno-study | 6b8b454da4977be5ffb53d6862f3e8f2177bb077 | 456a3dd62b429037587cd23ed847ac316aa723dd | refs/heads/master | 2020-05-24T07:36:42.717567 | 2017-06-27T02:49:25 | 2017-06-27T02:49:25 | 84,835,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | class Solution(object):
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
if n < 10:
return n
order_idx = 1
temp = n
k = 0
while temp > 0:
k = temp
temp -= order_idx * 9 * 10**(order_idx - 1)
order_idx += 1
order_idx -= 1
# now nth digit is located kth position of order_idx's range
n_include_num = 10**(order_idx - 1) + (k//order_idx) - 1
if k % order_idx != 0:
n_include_num += 1
#target digit is located 'order_idx + 1 - k%order_idx'th from back of n_include_num
back = 0
if k % order_idx == 0:
back = 1
else:
back = (order_idx + 1) - (k % order_idx)
result = 0
for i in range(back):
result = n_include_num % 10
n_include_num //= 10
return result
n = 2147483647
# n = 17
obj = Solution()
print(obj.findNthDigit(n))
# print(2**31-1)
| [
"[email protected]"
] | |
4da63feb93d7c27541efed91f449f67ea88a4fc2 | e906fe8237e5b55b7bef1f7a87884c5924ccd8b1 | /contactmps/context_processors.py | 98c239bbb478844afc111d5142b05c0474145d13 | [
"MIT"
] | permissive | OpenUpSA/contact-mps | ac9a88ef166769d6305e213f3d77191f385c962a | 63d7f86e1b6c9319a4d0344a6125cd22770f34c7 | refs/heads/master | 2022-12-11T07:22:20.942567 | 2020-01-15T13:11:59 | 2020-01-15T13:11:59 | 93,042,651 | 0 | 2 | MIT | 2022-12-08T02:08:08 | 2017-06-01T09:52:56 | JavaScript | UTF-8 | Python | false | false | 750 | py | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
def general(request):
""" Add some useful settings into the template helpers.
"""
info = {
'BASE_URL': "https://%s" % get_current_site(request).domain,
}
ga_tracking_id = getattr(settings, 'GOOGLE_ANALYTICS_ID', False)
if not settings.DEBUG and ga_tracking_id:
info['GOOGLE_ANALYTICS_ID'] = ga_tracking_id
return info
def is_mobile(request):
useragent = request.META.get('HTTP_USER_AGENT', '').lower()
mobiles = [
'ipad',
'ipod',
'iphone',
'android',
'blackberry',
]
return {
'is_mobile': any(mobile in useragent for mobile in mobiles),
}
| [
"[email protected]"
] | |
198758c611b2f754df74f3b1587d1c8ef5e8c7fd | ec1f8cdbf52bcc5516a833e02ac99301a1664ed9 | /wordclasses/wctool.py | 41aa852ee84d3a9b2979e3e11015d5b6c7da6751 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | senarvi/theanolm | 8fe85dcf07358a331807b9002a56b6089d5f0ff3 | 9904faec19ad5718470f21927229aad2656e5686 | refs/heads/master | 2023-06-24T10:39:21.985241 | 2023-06-12T06:55:26 | 2023-06-12T06:55:26 | 42,454,187 | 95 | 37 | Apache-2.0 | 2020-11-05T11:22:31 | 2015-09-14T14:35:54 | Python | UTF-8 | Python | false | false | 6,849 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import argparse
import logging
from time import time
from theanolm.backend import TextFileType
from wordclasses import TheanoBigramOptimizer, NumpyBigramOptimizer
from theanolm.vocabulary import Vocabulary
from theanolm.vocabulary import compute_word_counts, BigramStatistics
from wordclasses.functions import is_scheduled
def save(optimizer, output_file, output_format):
"""Writes the current classes to a file.
If the output file is seekable, first rewinds and truncates the file.
:type optimizer: BigramOptimizer
:param optimizer: save the current state of this optimizer
:type output_file: file object
:param output_file: a file or stream where to save the classes
:type output_format: str
:param output_format: either "classes" or "srilm-classes" - selects the
output file format
"""
if output_file.seekable():
output_file.seek(0)
output_file.truncate()
for word, class_id, prob in optimizer.words():
if output_format == 'classes':
output_file.write('{} {}\n'.format(word, class_id))
elif output_format == 'srilm-classes':
output_file.write('CLASS-{:05d} {} {}\n'.format(class_id, prob, word))
def main():
parser = argparse.ArgumentParser(prog='wctool')
argument_group = parser.add_argument_group("files")
argument_group.add_argument(
'--training-set', metavar='FILE', type=TextFileType('r'),
nargs='+', required=True,
help='text or .gz files containing training data (one sentence per '
'line)')
argument_group.add_argument(
'--vocabulary', metavar='FILE', type=TextFileType('r'), default=None,
help='text or .gz file containing a list of words to include in class '
'forming, and possibly their initial classes')
argument_group.add_argument(
'--vocabulary-format', metavar='FORMAT', type=str, default='words',
help='vocabulary format, one of "words" (one word per line, default), '
'"classes" (word and class ID per line), "srilm-classes" (class '
'name, membership probability, and word per line)')
argument_group.add_argument(
'--output-file', metavar='FILE', type=TextFileType('w'), default='-',
help='where to write the word classes (default stdout)')
argument_group.add_argument(
'--output-format', metavar='FORMAT', type=str, default='srilm-classes',
help='format of the output file, one of "classes" (word and class ID '
'per line), "srilm-classes" (default; class name, membership '
'probability, and word per line)')
argument_group.add_argument(
'--output-frequency', metavar='N', type=int, default='1',
help='save classes N times per optimization iteration (default 1)')
argument_group = parser.add_argument_group("optimization")
argument_group.add_argument(
'--num-classes', metavar='N', type=int, default=2000,
help='number of classes to form, if vocabulary is not specified '
'(default 2000)')
argument_group.add_argument(
'--method', metavar='NAME', type=str, default='bigram-theano',
help='method for creating word classes, one of "bigram-theano", '
'"bigram-numpy" (default "bigram-theano")')
argument_group = parser.add_argument_group("logging and debugging")
argument_group.add_argument(
'--log-file', metavar='FILE', type=str, default='-',
help='path where to write log file (default is standard output)')
argument_group.add_argument(
'--log-level', metavar='LEVEL', type=str, default='info',
help='minimum level of events to log, one of "debug", "info", "warn" '
'(default "info")')
argument_group.add_argument(
'--log-interval', metavar='N', type=int, default=1000,
help='print statistics after every Nth word; quiet if less than one '
'(default 1000)')
args = parser.parse_args()
log_file = args.log_file
log_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_level, int):
raise ValueError("Invalid logging level requested: " + args.log_level)
log_format = '%(asctime)s %(funcName)s: %(message)s'
if args.log_file == '-':
logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level)
else:
logging.basicConfig(filename=log_file, format=log_format, level=log_level)
if args.vocabulary is None:
word_counts = compute_word_counts(args.training_set)
vocabulary = Vocabulary.from_word_counts(word_counts,
args.num_classes)
for subset_file in args.training_set:
subset_file.seek(0)
else:
vocabulary = Vocabulary.from_file(args.vocabulary,
args.vocabulary_format)
print("Number of words in vocabulary:", vocabulary.num_shortlist_words())
print("Number of word classes:", vocabulary.num_classes())
print("Number of normal word classes:", vocabulary.num_normal_classes)
logging.info("Reading word unigram and bigram statistics.")
statistics = BigramStatistics(args.training_set, vocabulary)
if args.method == 'bigram-theano':
optimizer = TheanoBigramOptimizer(statistics, vocabulary)
elif args.method == 'bigram-numpy':
optimizer = NumpyBigramOptimizer(statistics, vocabulary)
else:
raise ValueError("Invalid method requested: " + args.method)
iteration = 1
while True:
logging.info("Starting iteration %d.", iteration)
num_words = 0
num_moves = 0
for word in vocabulary.words():
start_time = time()
num_words += 1
if optimizer.move_to_best_class(word):
num_moves += 1
duration = time() - start_time
if (args.log_interval >= 1) and \
(num_words % args.log_interval == 0):
logging.info("[%d] (%.1f %%) of iteration %d -- moves = %d, cost = %.2f, duration = %.1f ms",
num_words,
num_words / vocabulary.num_shortlist_words() * 100,
iteration,
num_moves,
optimizer.log_likelihood(),
duration * 100)
if is_scheduled(num_words,
args.output_frequency,
vocabulary.num_shortlist_words()):
save(optimizer, args.output_file, args.output_format)
if num_moves == 0:
break
iteration += 1
logging.info("Optimization finished.")
save(optimizer, args.output_file, args.output_format)
| [
"[email protected]"
] | |
2038dd0e6d0049b70a0b0d8ef36745acc98d4064 | fe826833d207ced7b01d8aef4922da58614846ca | /demo/NavierStokesDrivenCavity.py | 7202e6a95247bd4863a6e921199545fc994f8544 | [
"BSD-2-Clause"
] | permissive | snytav/shenfun | ce56d912a38beef3f4df3072708a1f9aa5370e75 | 67844cb75e21488d7ab43bf0caa21dfbdc057395 | refs/heads/master | 2022-12-14T21:21:44.895648 | 2020-09-16T13:40:54 | 2020-09-16T13:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,818 | py | r"""Solve Navier-Stokes equations for the lid driven cavity using a coupled
formulation
The equations are in strong form
.. math::
\nu\nabla^2 u - \nabla p &= (u \cdot \nabla) u) \\
\nabla \cdot u &= 0 \\
i\bs{u}(x, y=1) = (1, 0) \, &\text{ or }\, \bs{u}(x, y=1) = ((1-x)^2(1+x)^2, 0) \\
u(x, y=-1) &= 0 \\
u(x=\pm 1, y) &= 0
In addition we require :math:`\int p d\ = 0`, which is achieved by
fixing the coefficient :math:`\hat{p}_{0, 0} = 0`.
We use a tensorproductspace with a composite Legendre for the Dirichlet space
and a regular Legendre for the pressure space.
To remove all nullspaces we use a P_{N} x P_{N-2} basis, with P_{N-2} for the
pressure.
"""
import os
import sys
import time
import numpy as np
from scipy.sparse.linalg import splu
import sympy
from shenfun import *
assert comm.Get_size() == 1, "Two non-periodic directions only have solver implemented for serial"
Re = 10.
nu = 2./Re
alfa = 0.2 # underrelaxation factor
N = (46, 46)
family = 'Chebyshev'
#family = 'Legendre'
quad = 'GC'
x = sympy.symbols('x', real='True')
D0X = FunctionSpace(N[0], family, quad=quad, bc=(0, 0))
#D1Y = FunctionSpace(N[1], family, quad=quad, bc=(0, 1))
D1Y = FunctionSpace(N[1], family, quad=quad, bc=(0, (1-x)**2*(1+x)**2))
D0Y = FunctionSpace(N[1], family, quad=quad, bc=(0, 0))
PX = FunctionSpace(N[0], family, quad=quad)
PY = FunctionSpace(N[1], family, quad=quad)
# Create tensor product spaces with different combination of bases
V1 = TensorProductSpace(comm, (D0X, D1Y))
V0 = TensorProductSpace(comm, (D0X, D0Y))
P = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
# To get a P_N x P_{N-2} space, just pick the first N-2 items of the pressure basis
# Note that this effectively sets P_N and P_{N-1} to zero, but still the basis uses
# the same quadrature points as the Dirichlet basis, which is required for the inner
# products.
PX.slice = lambda: slice(0, PX.N-2)
PY.slice = lambda: slice(0, PY.N-2)
# Create vector space for velocity
W1 = VectorSpace([V1, V0])
# Create mixed space for total solution
VQ = CompositeSpace([W1, P]) # for velocity and pressure
# Create padded spaces for nonlinearity
V1p = V1.get_dealiased((1.5, 1.5))
V0p = V0.get_dealiased((1.5, 1.5))
#V1p = V1.get_dealiased(dealias_direct=True)
#V0p = V0.get_dealiased(dealias_direct=True)
#V1p = V1 # Or do not use dealiasing at all. Makes very little difference here
#V0p = V0
W1p = VectorSpace([V1p, V0p])
W0p = VectorSpace([V0p, V0p])
QTp = TensorSpace([W1p, W0p]) # for uiuj
up = TrialFunction(VQ)
vq = TestFunction(VQ)
u, p = up
v, q = vq
# Assemble blocks of the complete block matrix
if family.lower() == 'legendre':
A00 = inner(grad(v), -nu*grad(u))
A01 = inner(div(v), p)
else:
A00 = inner(v, nu*div(grad(u)))
A01 = inner(v, -grad(p))
A10 = inner(q, div(u))
# Extract the boundary matrices
bc_mats = extract_bc_matrices([A00, A01, A10])
# Create Block matrix
M = BlockMatrix(A00+A01+A10)
# Create Function to hold solution
uh_hat = Function(VQ).set_boundary_dofs()
ui_hat = uh_hat[0]
# New solution (iterative)
uh_new = Function(VQ).set_boundary_dofs()
ui_new = uh_new[0]
# Compute the constant contribution to rhs due to nonhomogeneous boundary conditions
bh_hat0 = Function(VQ)
BM = BlockMatrix(bc_mats)
bh_hat0 = BM.matvec(-uh_hat, bh_hat0)
bi_hat0 = bh_hat0[0]
# Create regular work arrays for right hand side. (Note that bc part will not be used so we can use Q)
bh_hat = Function(VQ)
# Create arrays to hold velocity vector solution
ui = Array(W1)
uip = Array(W1p)
# Create work arrays for nonlinear part
uiuj = Array(QTp)
uiuj_hat = Function(QTp)
def compute_rhs(ui_hat, bh_hat):
global uip, uiuj, uiuj_hat, W1p
bh_hat.fill(0)
uip = W1p.backward(ui_hat, uip)
uiuj = outer(uip, uip, uiuj)
uiuj_hat = uiuj.forward(uiuj_hat)
bi_hat = bh_hat[0]
bi_hat = inner(v, div(uiuj_hat), output_array=bi_hat)
#bi_hat = inner(grad(v), -uiuj_hat, output_array=bi_hat)
bh_hat += bh_hat0
return bh_hat
uh_hat, Ai = M.solve(bh_hat0, u=uh_hat, constraints=((2, 0, 0),), return_system=True) # Constraint for component 2 of mixed space
Alu = splu(Ai)
uh_new[:] = uh_hat
converged = False
count = 0
max_count = 1000
if 'pytest' in os.environ:
max_count = 1
t0 = time.time()
while not converged:
count += 1
bh_hat = compute_rhs(ui_hat, bh_hat)
uh_new = M.solve(bh_hat, u=uh_new, constraints=((2, 0, 0),), Alu=Alu) # Constraint for component 2 of mixed space
error = np.linalg.norm(ui_hat-ui_new)
uh_hat[:] = alfa*uh_new + (1-alfa)*uh_hat
converged = abs(error) < 1e-11 or count >= max_count
if count % 1 == 0:
print('Iteration %d Error %2.4e' %(count, error))
print('Time ', time.time()-t0)
# Move solution to regular Function
up = Array(VQ)
up = uh_hat.backward(up)
u_, p_ = up
if 'pytest' in os.environ: sys.exit(0)
# Postprocessing
# Solve streamfunction
r = TestFunction(V0)
s = TrialFunction(V0)
S = inner(r, div(grad(s)))
h = inner(r, -curl(ui_hat))
H = la.SolverGeneric2ND(S)
phi_h = H(h)
phi = phi_h.backward()
# Compute vorticity
PX.slice = lambda: slice(0, PX.N)
PY.slice = lambda: slice(0, PY.N)
w_h = Function(P)
w_h = project(curl(ui_hat), P, output_array=w_h)
#p0 = np.array([[0.], [0.]])
#print(w_h.eval(p0)*2)
# Find minimal streamfunction value and position
# by gradually zooming in on mesh
W = 101
converged = False
xmid, ymid = 0, 0
dx = 1
psi_old = 0
count = 0
y, x = np.meshgrid(np.linspace(ymid-dx, ymid+dx, W), np.linspace(xmid-dx, xmid+dx, W))
points = np.vstack((x.flatten(), y.flatten()))
pp = phi_h.eval(points).reshape((W, W))
while not converged:
yr, xr = np.meshgrid(np.linspace(ymid-dx, ymid+dx, W), np.linspace(xmid-dx, xmid+dx, W))
points = np.vstack((xr.flatten(), yr.flatten()))
pr = phi_h.eval(points).reshape((W, W))
xi, yi = pr.argmin()//W, pr.argmin()%W
psi_min, xmid, ymid = pr.min()/2, xr[xi, yi], yr[xi, yi]
err = abs(psi_min-psi_old)
converged = err < 1e-12 or count > 10
psi_old = psi_min
dx = dx/4.
print("%d %d " %(xi, yi) +("%+2.7e "*4) %(xmid, ymid, psi_min, err))
count += 1
import matplotlib.pyplot as plt
#f = open('plot_u_y_Ghia{}.csv'.format(int(Re)))
#g = np.loadtxt(f, skiprows=1, delimiter=',')
#plt.figure()
#y = 2*(g[:, 0]-0.5)
#plt.plot(y, g[:, 1], 'r+')
X = V0.local_mesh(True)
#x = np.vstack([np.zeros(N[0]), X[1][0]])
#res = ui_hat[0].eval(x)
#plt.plot(x[1], res)
#res2 = ui_hat[0].eval(np.vstack([np.zeros(len(y)), y]))
#plt.plot(y, res2, 'bs', mfc='None')
plt.figure()
plt.contourf(X[0], X[1], p_, 100)
plt.figure()
plt.quiver(X[0], X[1], u_[0], u_[1])
plt.figure()
plt.spy(M.diags())
plt.figure()
plt.contourf(X[0], X[1], u_[0], 100)
plt.figure()
plt.contourf(X[0], X[1], u_[1], 100)
#plt.figure()
#plt.contour(x, y, pp, 100)
#plt.title('Streamfunction')
plt.show()
| [
"[email protected]"
] | |
dd6415024aa3cbcb67c72fdd6d5982cb3f90a182 | 5234bc430c83d616a8214d7f77c2c081543b6b26 | /src/Python/801-900/824.GoatLatin.py | 3a2bc8c0e7db0df5b73e1507777865a28111ca71 | [
"Apache-2.0"
] | permissive | AveryHuo/PeefyLeetCode | 3e749b962cadfdf10d7f7b1ed21c5fafc4342950 | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | refs/heads/master | 2022-04-26T06:01:18.547761 | 2020-04-25T09:55:46 | 2020-04-25T09:55:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py |
class Solution:
def toGoatLatin(self, S):
"""
:type S: str
:rtype: str
"""
yuanyin = {'a', 'e', 'i', 'o', 'u', 'I', 'E', 'A', 'O', 'U'}
words = S.split(' ')
for i in range(len(words)):
if words[i][0] in yuanyin:
words[i] += "ma" + "a" * (i + 1)
else:
words[i] = words[i][1:] + words[i][0] + "ma" + "a" * (i + 1)
return ' '.join(words)
if __name__ == '__main__':
solution = Solution()
print(solution.toGoatLatin("I speak Goat Latin"))
print(solution.toGoatLatin("The quick brown fox jumped over the lazy dog"))
else:
pass
| [
"[email protected]"
] | |
47bf83a160bfc51995106dc38a302578407f955b | c31d440a92b33ad49c6da9c2e2646f4796fe1d0c | /oneflow_onnx/x2oneflow/handler.py | a880dd21084c9f6712768918b9f5ac7e3f8b4384 | [] | no_license | jiangjiajun/oneflow_convert_tools | c4ec79fed35f2f4489039b419e0f7d7f0877c467 | 96696edd940d58187573d7531404e6b5054e3d56 | refs/heads/main | 2023-04-15T03:21:23.828413 | 2021-04-16T09:49:24 | 2021-04-16T09:49:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,967 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import inspect
import os
import shutil
from onnx import defs
import oneflow as flow
import oneflow_api
import oneflow
class BackendHandler:
"""
All operator handler MUST put decorator @onnx_op to register corresponding op.
"""
ONNX_OP = None
DOMAIN = defs.ONNX_DOMAIN
VERSION = 0
SINCE_VERSION = 0
PARTIAL_SUPPORT = False
PS_DESCRIPTION = ""
ONEFLOW_BLOBNAME_MAP = {}
ONEFLOW_CODE_GEN = []
OP_OUTPUS = []
@classmethod
def check_cls(cls):
if not cls.ONNX_OP:
common.logger.warning(
"{} doesn't have ONNX_OP. "
"Please use BackendHandler.onnx_op decorator to register ONNX_OP.".format(
cls.__name__
)
)
@classmethod
def handle(cls, node, tensor_dict, **kwargs):
""" Main method in handler. It will find corresponding versioned handle method,
whose name format is `version_%d`. So prefix `version_` is reserved in onnx-oneflow.
DON'T use it for other purpose.
:param node: NodeProto for backend.
:param kwargs: Other args.
:return: OneFlowNode for backend.
"""
ver_handle = getattr(cls, "version_{}".format(cls.SINCE_VERSION), None)
if ver_handle:
return ver_handle(node, tensor_dict, **kwargs)
raise ValueError(
'node "{}" of version {} is not supported'.format(
node.op_type, cls.SINCE_VERSION
)
)
return None
@classmethod
def get_versions(cls):
""" Get all support versions.
:return: Version list.
"""
versions = []
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k.startswith("version_"):
versions.append(int(k.replace("version_", "")))
return versions
@staticmethod
def onnx_op(op):
return BackendHandler.property_register("ONNX_OP", op)
@staticmethod
def flow_func(func):
return BackendHandler.property_register("FLOW_FUNC", func)
@staticmethod
def domain(d):
return BackendHandler.property_register("DOMAIN", d)
@staticmethod
def partial_support(ps):
return BackendHandler.property_register("PARTIAL_SUPPORT", ps)
@staticmethod
def ps_description(psd):
return BackendHandler.property_register("PS_DESCRIPTION", psd)
@staticmethod
def property_register(name, value):
def deco(cls):
setattr(cls, name, value)
return cls
return deco
FLOW_FUNC = None
WEIGHT_SAVE_DIR = None
@classmethod
def copy_variable_file(cls, src_var_name, dst_var_name):
dst_dir_name = os.path.join(cls.WEIGHT_SAVE_DIR, dst_var_name)
if not os.path.exists(dst_dir_name):
os.makedirs(dst_dir_name)
shutil.copyfile(
os.path.join(cls.WEIGHT_SAVE_DIR, src_var_name, "out"),
os.path.join(dst_dir_name, "out"),
)
@classmethod
def get_attrs_processor_param(cls):
""" Get param for attrs processor.
:return: Dict.
"""
return {}
@classmethod
def _process_attrs(cls, attrs):
""" Private method for processing attrs.
Param for this processor got from `get_attrs_processor_param`.
Param is dict contains two key: `default` and `raname`.
First add default value to attrs if key does not exist.
Second rename key to new key.
For example:
attrs = {"keep_dims": True}
param = {"default": {"axis": 1},
"rename": {"keep_dims": "keepdims"}}
processed_attrs = {"axis": "1", "keepdims": True}
:param attrs: Process target attrs.
:return: Processed attrs.
"""
param = {"rename": {}, "default": {}}
param.update(cls.get_attrs_processor_param())
for k, v in param["default"].items():
attrs.setdefault(k, v)
for k, new_k in param["rename"].items():
if k in attrs:
attrs[new_k] = attrs.pop(k)
return attrs
@classmethod
def run_onnx_node(
cls,
node,
tensor_dict,
flow_func=None,
inputs=None,
attrs=None,
name="",
**kwargs
):
""" Helper method to make tensor.
:param node: OnnxNode object.
:param flow_func: Callable OneFlow function. Default is cls.FLOW_FUNC.
:param inputs: Inputs tensor. Default is got from node.inputs.
:param attrs: Attributes. Default is node.attrs.
:param name: Node name.
:param kwargs: Other args.
:return: Tensor.
"""
if flow_func is None:
flow_func = cls.FLOW_FUNC
if inputs is None:
inputs = [tensor_dict.get(inp, None) for inp in node.input_tensor_names]
if attrs is None:
attrs = copy.deepcopy(node.attrs)
if name != "":
attrs["name"] = name
for inp in node.input_tensor_names:
if tensor_dict[inp] not in cls.ONEFLOW_BLOBNAME_MAP:
cls.ONEFLOW_BLOBNAME_MAP[tensor_dict[inp]] = inp
cls.OP_OUTPUS = []
for oup in node.output_tensor_names:
cls.OP_OUTPUS.append(oup)
y = cls._run_flow_func(flow_func, inputs, attrs)
if type(y) == list():
for x in cls.OP_OUTPUS:
if y[x] not in cls.ONEFLOW_BLOBNAME_MAP:
cls.ONEFLOW_BLOBNAME_MAP[y[x]] = x
else:
if y not in cls.ONEFLOW_BLOBNAME_MAP:
cls.ONEFLOW_BLOBNAME_MAP[y] = cls.OP_OUTPUS[0]
return y
@classmethod
def _run_flow_func(cls, flow_func, inputs, attrs):
""" Run Oneflow function.
Use only acceptable attributes of function from attrs.
:param flow_func: OneFlow function.
:param inputs: Inputs.
:param attrs: Attributes.
:return: Tensor.
"""
params = list(inspect.signature(flow_func).parameters.keys())
attrs = cls._process_attrs(attrs)
attrs = {p: v for p, v in attrs.items() if p in params}
kwargs = dict(zip(params, inputs))
ambiguous_arguments = any(
kwargs.get(p) is not None and v is not None for p, v in attrs.items()
)
if ambiguous_arguments:
raise TypeError("Ambiguous arguments for {}()".format(flow_func.__name__))
kwargs.update((p, v) for p, v in attrs.items() if v is not None)
pre_name = ''
if len(cls.OP_OUTPUS) == 1:
pre_name = cls.OP_OUTPUS[0] + ' = '
else:
for i in range(len(cls.OP_OUTPUS) - 1):
pre_name = pre_name + '{}, '.format(cls.OP_OUTPUS[i])
pre_name = pre_name + '{} = '.format(cls.OP_OUTPUS[len(cls.OP_OUTPUS) - 1])
if str(flow_func).split()[1] != 'api_get_variable' and (pre_name + cls.code_gen(flow_func, kwargs)) not in cls.ONEFLOW_CODE_GEN:
cls.ONEFLOW_CODE_GEN.append(pre_name + cls.code_gen(flow_func, kwargs))
return flow_func(**kwargs)
@classmethod
def code_gen(cls, flow_fun, kwargs):
def import_func(func):
flag = 0
if hasattr(flow, func):
flag = 1
elif hasattr(flow.math, func):
flag = 2
elif hasattr(flow.layers, func):
flag = 3
elif hasattr(flow.nn, func):
flag = 4
elif func == "api_get_variable":
return str("flow.get_variable")
if flag == 0:
raise NotImplementedError("can not import this func:{} from oneflow".format(func))
elif flag == 1:
return str("flow." + func)
elif flag == 2:
return str("flow.math." + func)
elif flag == 3:
return str("flow.layers." + func)
elif flag == 4:
return str("flow.nn." + func)
func = str(flow_fun).split()
func = func[1]
func = import_func(func)
func += '('
for k, v in kwargs.items():
func += str(k) + '='
if type(v) == list:
new_v = []
for x in v:
if type(x) == oneflow_api.LazyConsistentBlob:
new_v.append(cls.ONEFLOW_BLOBNAME_MAP[x])
else:
new_v.append(x)
v = new_v
func += '['
for x in v:
func += str(x) + ', '
func += '], '
elif type(v) == oneflow_api.LazyConsistentBlob:
v = cls.ONEFLOW_BLOBNAME_MAP[v]
func += str(v) + ', '
else:
func += str(v) + ', '
func += ')\n'
return func
domain = BackendHandler.domain
onnx_op = BackendHandler.onnx_op
flow_func = BackendHandler.flow_func
partial_support = BackendHandler.partial_support
ps_description = BackendHandler.ps_description
oneflow_blobname_map = BackendHandler.ONEFLOW_BLOBNAME_MAP
oneflow_code_gen = BackendHandler.ONEFLOW_CODE_GEN | [
"[email protected]"
] | |
e33a1aba6d98fe98d0dc48d2d88c120aa113de68 | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1850396913/pyexpat.py | d8a525a88b3a33d69b9d4060d8306489c2477a02 | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 7,337 | py | # encoding: utf-8
# module pyexpat
# from (built-in)
# by generator 1.147
""" Python wrapper for Expat parser. """
# imports
import pyexpat.errors as errors # <module 'pyexpat.errors'>
import pyexpat.model as model # <module 'pyexpat.model'>
# Variables with simple values
EXPAT_VERSION = 'expat_2.2.5'
native_encoding = 'UTF-8'
XML_PARAM_ENTITY_PARSING_ALWAYS = 2
XML_PARAM_ENTITY_PARSING_NEVER = 0
XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE = 1
# functions
def ErrorString(*args, **kwargs): # real signature unknown
""" Returns string error for given number. """
pass
def ParserCreate(*args, **kwargs): # real signature unknown
""" Return a new XML parser object. """
pass
# classes
class ExpatError(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
error = ExpatError
class XMLParserType(object):
""" XML parser """
def ExternalEntityParserCreate(self, *args, **kwargs): # real signature unknown
""" Create a parser for parsing an external entity based on the information passed to the ExternalEntityRefHandler. """
pass
def GetBase(self, *args, **kwargs): # real signature unknown
""" Return base URL string for the parser. """
pass
def GetInputContext(self, *args, **kwargs): # real signature unknown
"""
Return the untranslated text of the input that caused the current event.
If the event was generated by a large amount of text (such as a start tag
for an element with many attributes), not all of the text may be available.
"""
pass
def Parse(self, *args, **kwargs): # real signature unknown
"""
Parse XML data.
`isfinal' should be true at end of input.
"""
pass
def ParseFile(self, *args, **kwargs): # real signature unknown
""" Parse XML data from file-like object. """
pass
def SetBase(self, *args, **kwargs): # real signature unknown
""" Set the base URL for the parser. """
pass
def SetParamEntityParsing(self, *args, **kwargs): # real signature unknown
"""
Controls parsing of parameter entities (including the external DTD subset).
Possible flag values are XML_PARAM_ENTITY_PARSING_NEVER,
XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE and
XML_PARAM_ENTITY_PARSING_ALWAYS. Returns true if setting the flag
was successful.
"""
pass
def UseForeignDTD(self, *args, **kwargs): # real signature unknown
"""
Allows the application to provide an artificial external subset if one is not specified as part of the document instance.
This readily allows the use of a 'default' document type controlled by the
application, while still getting the advantage of providing document type
information to the parser. 'flag' defaults to True if not provided.
"""
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7ffb5f337048>, 'find_spec': <classmethod object at 0x7ffb5f337080>, 'find_module': <classmethod object at 0x7ffb5f3370b8>, 'create_module': <classmethod object at 0x7ffb5f3370f0>, 'exec_module': <classmethod object at 0x7ffb5f337128>, 'get_code': <classmethod object at 0x7ffb5f337198>, 'get_source': <classmethod object at 0x7ffb5f337208>, 'is_package': <classmethod object at 0x7ffb5f337278>, 'load_module': <classmethod object at 0x7ffb5f3372b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
expat_CAPI = None # (!) real value is '<capsule object "pyexpat.expat_CAPI" at 0x7ffb5c5df630>'
features = [
(
'sizeof(XML_Char)',
1,
),
(
'sizeof(XML_LChar)',
1,
),
(
'XML_DTD',
0,
),
(
'XML_CONTEXT_BYTES',
1024,
),
(
'XML_NS',
0,
),
]
version_info = (
2,
2,
5,
)
__spec__ = None # (!) real value is "ModuleSpec(name='pyexpat', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
| [
"[email protected]"
] | |
1e4bb90e0f5856138c8ab6c6996f0479a227feb7 | 28541d61368a14a0d5003db4cc07fed21b40c41f | /Chapter-4/depth_search2.py | e0dc1ae6ff91822c949a2e637ca6c0eb3ca7512a | [] | no_license | eizin6389/python_algorithm | 390861f9342ce907f2cda0b45b84d364bcba7541 | abf3588ed97a343b6559eb5d69156708d42bc243 | refs/heads/master | 2022-12-06T20:48:49.470312 | 2020-08-14T13:29:26 | 2020-08-14T13:29:26 | 282,905,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | tree = [[1,2],[3,4],[5,6],[7,8],[9,10],[11,12],[13,14],[],[],[],[],[],[],[],[]]
def search(pos):
for i in tree[pos]:
search(i)
print(pos, end=' ')
search(0)
| [
"[email protected]"
] | |
82e54a6fd231e57a350d7ae8277a496efb65f8b6 | 0aa7255bf5df6b11ad929ec313019d734e67afb8 | /LMS/blog/models/blog_tag.py | a62cc44a40ac7f850e5d91dc6384f1bac7a19239 | [] | no_license | arafat08007/Learning-management-system-by-jaki | c4e3f34061b527a09cdbc86d5ec8547074774189 | 928bdf4b8a0f408a17fa0c3d9b8bb6d77ef285e8 | refs/heads/master | 2022-11-16T06:04:22.016667 | 2020-06-19T09:23:01 | 2020-06-19T09:23:01 | 273,452,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django.db import models
# # Create your models here.
class Tag(models.Model):
tag_name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.tag_name
| [
"[email protected]"
] | |
883b04e8faa0d0655b54ddd6386e4fedc823eb87 | 1734fd26a9adf7d2580f8bd981babda861944ebd | /snippets/plot.py | 33a0a5553e81ca7a1279684804877763af55723c | [] | no_license | tangzhuochen/Python_ML_Code | 420f4d80552a901b41e368e4e66a06f51ea1b29f | b418fd6a431a77838447ab4736bdf24019276309 | refs/heads/master | 2020-03-28T11:44:50.853941 | 2018-02-08T06:59:31 | 2018-02-08T06:59:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,110 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 08:57:13 2015
@author: shifeng
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation,ๅฏผๅ
ฅirisๆฐๆฎ๏ผๅๆฐๆฎๅๅค
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]#ๅปๆไบlabelไธบ2๏ผlabelๅช่ฝไบๅ๏ผๆๅฏไปฅใ
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
#ๅ็ฑป๏ผๅROCๅๆ
# Run classifier with cross-validation and plot ROC curves
#ไฝฟ็จ6ๆไบคๅ้ช่ฏ๏ผๅนถไธ็ปROCๆฒ็บฟ
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)#ๆณจๆ่ฟ้๏ผprobability=True,้่ฆ๏ผไธ็ถ้ขๆต็ๆถๅไผๅบ็ฐๅผๅธธใๅฆๅคrbfๆ ธๆๆๆดๅฅฝไบใ
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
#้่ฟ่ฎญ็ปๆฐๆฎ๏ผไฝฟ็จsvm็บฟๆงๆ ธๅปบ็ซๆจกๅ๏ผๅนถๅฏนๆต่ฏ้่ฟ่กๆต่ฏ๏ผๆฑๅบ้ขๆตๅพๅ
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# print set(y[train]) #set([0,1]) ๅณlabelๆไธคไธช็ฑปๅซ
# print len(X[train]),len(X[test]) #่ฎญ็ป้ๆ84ไธช๏ผๆต่ฏ้ๆ16ไธช
# print "++",probas_ #predict_proba()ๅฝๆฐ่พๅบ็ๆฏๆต่ฏ้ๅจlaelๅ็ฑปๅซไธ็็ฝฎไฟกๅบฆ๏ผ
# #ๅจๅชไธช็ฑปๅซไธ็็ฝฎไฟกๅบฆ้ซ๏ผๅๅไธบๅช็ฑป
# Compute ROC curve and area the curve
#้่ฟroc_curve()ๅฝๆฐ๏ผๆฑๅบfprๅtpr๏ผไปฅๅ้ๅผ
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr) #ๅฏนmean_tprๅจmean_fprๅค่ฟ่กๆๅผ๏ผ้่ฟscipyๅ
่ฐ็จinterp()ๅฝๆฐ
mean_tpr[0] = 0.0 #ๅๅงๅคไธบ0
roc_auc = auc(fpr, tpr)
#็ปๅพ๏ผๅช้่ฆplt.plot(fpr,tpr),ๅ้roc_aucๅชๆฏ่ฎฐๅฝauc็ๅผ๏ผ้่ฟauc()ๅฝๆฐ่ฝ่ฎก็ฎๅบๆฅ
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
#็ปๅฏน่ง็บฟ
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv) #ๅจmean_fpr100ไธช็น๏ผๆฏไธช็นๅคๆๅผๆๅผๅคๆฌกๅๅนณๅ
mean_tpr[-1] = 1.0 #ๅๆ ๆๅไธไธช็นไธบ๏ผ1,1๏ผ
mean_auc = auc(mean_fpr, mean_tpr) #่ฎก็ฎๅนณๅAUCๅผ
#็ปๅนณๅROCๆฒ็บฟ
#print mean_fpr,len(mean_fpr)
#print mean_tpr
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show() | [
"[email protected]"
] | |
6e8a2025251aa202713cc2049aa715e891ee297c | 2c50ed6522f5c58f7be0416e702ec0d73127246f | /tests/test_model_e2e.py | 982be9523b521d2ab07a423c8bd28eb6dfb5cfa2 | [
"Apache-2.0"
] | permissive | wzy810103882/detectron2 | 5a5ca7f3c88e7972ddc379ab81e315075a7a2c0d | ca38df54206a78742d02a8bd572390cebcc91c86 | refs/heads/master | 2020-09-22T13:49:45.656257 | 2019-12-01T22:52:24 | 2019-12-01T22:52:24 | 225,226,643 | 0 | 0 | Apache-2.0 | 2019-12-01T20:32:49 | 2019-12-01T20:32:49 | null | UTF-8 | Python | false | false | 2,643 | py | # -*- coding: utf-8 -*-
import unittest
import torch
import detectron2.model_zoo as model_zoo
from detectron2.modeling import build_model
from detectron2.structures import BitMasks, Boxes, Instances
from detectron2.utils.events import EventStorage
from detectron2.config import get_cfg
def get_model_zoo(config_path):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg_file = model_zoo.get_config_file(config_path)
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
return build_model(cfg)
def create_model_input(img, inst=None):
if inst is not None:
return {"image": img, "instances": inst}
else:
return {"image": img}
def get_empty_instance(h, w):
inst = Instances((h, w))
inst.gt_boxes = Boxes(torch.rand(0, 4))
inst.gt_classes = torch.tensor([]).to(dtype=torch.int64)
inst.gt_masks = BitMasks(torch.rand(0, h, w))
return inst
class MaskRCNNE2ETest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
def test_empty_data(self):
inst = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
# eval
self.model.eval()
self.model(
[
create_model_input(torch.rand(3, 200, 250)),
create_model_input(torch.rand(3, 200, 249)),
]
)
# training
self.model.train()
with EventStorage():
losses = self.model(
[
create_model_input(torch.rand(3, 200, 250), inst[0]),
create_model_input(torch.rand(3, 200, 249), inst[1]),
]
)
sum(losses.values()).backward()
del losses
class RetinaNetE2ETest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
def test_empty_data(self):
inst = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
# eval
self.model.eval()
self.model(
[
create_model_input(torch.rand(3, 200, 250)),
create_model_input(torch.rand(3, 200, 249)),
]
)
# training
self.model.train()
with EventStorage():
losses = self.model(
[
create_model_input(torch.rand(3, 200, 250), inst[0]),
create_model_input(torch.rand(3, 200, 249), inst[1]),
]
)
sum(losses.values()).backward()
del losses
| [
"[email protected]"
] | |
33454d8a473d9356ae6386b4a2337be5edca2700 | b35f80114ad96928ccce44d40840177e0b5158aa | /dfvfs/encoding/decoder.py | 1ec6014e57a2ce8159dd18307c1cfe296256ff2c | [
"Apache-2.0"
] | permissive | ryanmjones/dfvfs | 7b62bab127cb201e679331fa808ec79e8ef03bd9 | 29ae5baddbf285260a596a67a199d0f5077214c1 | refs/heads/master | 2020-03-29T23:47:49.363000 | 2018-09-03T11:43:03 | 2018-09-03T11:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # -*- coding: utf-8 -*-
"""The decoder interface."""
from __future__ import unicode_literals
import abc
class Decoder(object):
"""Decoder interface."""
@abc.abstractmethod
def Decode(self, encoded_data):
"""Decodes the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
"""
| [
"[email protected]"
] | |
bd9d719ca44cc85dcaf2e828f9cfb1f1854d7bc8 | aa44a2a7dec257687eb67ed109ca7727ac09d343 | /polls/migrations/0001_initial.py | 22cb82fe892f4aa5ffdda8e008b1f0fd8e12ee2e | [] | no_license | mitshel/mbrc_poll | 3f63bc2e0aa18d14eefc4e4583a84613de7d9e81 | 694cc8b084394feca178bdc2bd9ecfc1f58d9906 | refs/heads/master | 2020-04-05T23:17:08.152738 | 2015-09-16T17:32:14 | 2015-09-16T17:32:14 | 40,611,735 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='polls',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='polls_Answers',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('answer', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='polls_AnswersResults',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('open_answer', models.CharField(max_length=1024)),
('closed_answer', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='polls_Questions',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('question', models.CharField(max_length=1024)),
('poll', models.ForeignKey(to='polls.polls')),
],
),
migrations.CreateModel(
name='polls_Results',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('poll', models.ForeignKey(to='polls.polls')),
],
),
migrations.AddField(
model_name='polls_answersresults',
name='poll_result',
field=models.ForeignKey(to='polls.polls_Results'),
),
migrations.AddField(
model_name='polls_answersresults',
name='question',
field=models.ForeignKey(to='polls.polls_Questions'),
),
migrations.AddField(
model_name='polls_answers',
name='question',
field=models.ForeignKey(to='polls.polls_Questions'),
),
]
| [
"[email protected]"
] | |
93d81bbe417a0112b72a86bf91c615abb9e27a37 | 47bd686ab04d8f6daba2097875dfefdba967d598 | /01_baekjoon/83_problem_3052.py | af9e73d95822c7b61f0dd7150058852dbd9f71bf | [] | no_license | EmjayAhn/DailyAlgorithm | 9633638c7cb7064baf26126cbabafd658fec3ca8 | acda1917fa1a290fe740e1bccb237d83b00d1ea4 | refs/heads/master | 2023-02-16T17:04:35.245512 | 2023-02-08T16:29:51 | 2023-02-08T16:29:51 | 165,942,743 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import sys
inputs = []
for _ in range(10):
input_number = int(sys.stdin.readline())
inputs.append(input_number%42)
print(len(list(set(inputs))))
| [
"[email protected]"
] | |
6f80c3f5e025ec85ced638609306fc2465839e96 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /dms_write_1/replication-task-assessment-run_cancel.py | d153b39c7a14410bbcb8fb410169d26887be325c | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/cancel-replication-task-assessment-run.html
if __name__ == '__main__':
"""
delete-replication-task-assessment-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/delete-replication-task-assessment-run.html
describe-replication-task-assessment-runs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/describe-replication-task-assessment-runs.html
start-replication-task-assessment-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/start-replication-task-assessment-run.html
"""
parameter_display_string = """
# replication-task-assessment-run-arn : Amazon Resource Name (ARN) of the premigration assessment run to be canceled.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("dms", "cancel-replication-task-assessment-run", "replication-task-assessment-run-arn", add_option_dict)
| [
"[email protected]"
] | |
bfad1f787200f149256f169c0dea8aaf4e2cdb55 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /sportsdata/nhl_stats/api_client.py | a7663abbf3b998432f1d1266f063551c21b205cd | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 25,092 | py | # coding: utf-8
"""
NHL v3 Stats
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import datetime
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from sportsdata.nhl_stats.configuration import Configuration
import sportsdata.nhl_stats.models
from sportsdata.nhl_stats import rest
class ApiClient(object):
"""Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool = ThreadPool()
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
def __del__(self):
self.pool.close()
self.pool.join()
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.configuration.host + resource_path
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(sportsdata.nhl_stats.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
response_data = response.data
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
else:
f.write(response_data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __hasattr(self, object, name):
return name in object.__class__.__dict__
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types and not self.__hasattr(klass, 'get_real_child_model'):
return data
kwargs = {}
if klass.swagger_types is not None:
for attr, attr_type in six.iteritems(klass.swagger_types):
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if (isinstance(instance, dict) and
klass.swagger_types is not None and
isinstance(data, dict)):
for key, value in data.items():
if key not in klass.swagger_types:
instance[key] = value
if self.__hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| [
"[email protected]"
] | |
9ea1502d9147fa1af905973a61f5bc952aea401c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_014/ch9_2020_03_02_19_39_46_703363.py | 5edc1af73d79ab1e4a46e92006127db38bf7623c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | def calcula_volume_da_esfera (R):
v = 4/3 * (3.14 * R**3)
return v
R = 2
print (calcula_volume_da_esfera) | [
"[email protected]"
] | |
3ea0595fb5b419bbeb72ea4652dfeab1bd2e2b1a | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/topology/pppoxserversessions_34f51eaa47353aae9b360c64589d7c32.py | 3e819b57836a62b085cb72205b218a6011b47d87 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,857 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class PppoxServerSessions(Base):
"""PPPoX Server Sessions.
The PppoxServerSessions class encapsulates a required pppoxServerSessions resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'pppoxServerSessions'
_SDM_ATT_MAP = {
'ChapName': 'chapName',
'ChapSecret': 'chapSecret',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'DiscoveredClientsMacs': 'discoveredClientsMacs',
'DiscoveredRemoteSessionIds': 'discoveredRemoteSessionIds',
'DiscoveredRemoteTunnelIds': 'discoveredRemoteTunnelIds',
'DiscoveredSessionIds': 'discoveredSessionIds',
'DiscoveredTunnelIPs': 'discoveredTunnelIPs',
'DiscoveredTunnelIds': 'discoveredTunnelIds',
'DomainList': 'domainList',
'EnableDomainGroups': 'enableDomainGroups',
'Name': 'name',
'PapPassword': 'papPassword',
'PapUser': 'papUser',
'ServerIpv4Addresses': 'serverIpv4Addresses',
'ServerIpv6Addresses': 'serverIpv6Addresses',
'SessionInfo': 'sessionInfo',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(PppoxServerSessions, self).__init__(parent, list_op)
@property
def Tag(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is not None:
return self._properties.get('Tag')
else:
return Tag(self)
@property
def ChapName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): User name when CHAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ChapName']))
@property
def ChapSecret(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Secret when CHAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ChapSecret']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DiscoveredClientsMacs(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The discovered remote MAC address.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredClientsMacs'])
@property
def DiscoveredRemoteSessionIds(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): The negotiated session ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredRemoteSessionIds'])
@property
def DiscoveredRemoteTunnelIds(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): The negotiated tunnel ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredRemoteTunnelIds'])
@property
def DiscoveredSessionIds(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): The negotiated session ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredSessionIds'])
@property
def DiscoveredTunnelIPs(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): The discovered remote tunnel IP.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredTunnelIPs'])
@property
def DiscoveredTunnelIds(self):
# type: () -> List[int]
"""
Returns
-------
- list(number): The negotiated tunnel ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredTunnelIds'])
@property
def DomainList(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Configure domain group settings
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DomainList']))
@property
def EnableDomainGroups(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable domain groups
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableDomainGroups']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PapPassword(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Password when PAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PapPassword']))
@property
def PapUser(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): User name when PAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PapUser']))
@property
def ServerIpv4Addresses(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): IPv4 Server Address. Each PPPoX Server Session will display the v4 address from the PPPoX Server it belongs to.
"""
return self._get_attribute(self._SDM_ATT_MAP['ServerIpv4Addresses'])
@property
def ServerIpv6Addresses(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): IPv6 Server Address. Each PPPoX Server Session will display the v6 address from the PPPoX Server it belongs to.
"""
return self._get_attribute(self._SDM_ATT_MAP['ServerIpv6Addresses'])
@property
def SessionInfo(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[cLS_CFG_REJ_AUTH | cLS_CHAP_PEER_DET_FAIL | cLS_CHAP_PEER_RESP_BAD | cLS_CODE_REJ_IPCP | cLS_CODE_REJ_IPV6CP | cLS_CODE_REJ_LCP | cLS_ERR_PPP_NO_BUF | cLS_ERR_PPP_SEND_PKT | cLS_LINK_DISABLE | cLS_LOC_IPADDR_BROADCAST | cLS_LOC_IPADDR_CLASS_E | cLS_LOC_IPADDR_INVAL_ACKS_0 | cLS_LOC_IPADDR_INVAL_ACKS_DIFF | cLS_LOC_IPADDR_LOOPBACK | cLS_LOC_IPADDR_PEER_MATCH_LOC | cLS_LOC_IPADDR_PEER_NO_GIVE | cLS_LOC_IPADDR_PEER_NO_HELP | cLS_LOC_IPADDR_PEER_NO_TAKE | cLS_LOC_IPADDR_PEER_REJ | cLS_LOOPBACK_DETECT | cLS_NO_NCP | cLS_NONE | cLS_PAP_BAD_PASSWD | cLS_PEER_DISCONNECTED | cLS_PEER_IPADDR_MATCH_LOC | cLS_PEER_IPADDR_PEER_NO_SET | cLS_PPOE_AC_SYSTEM_ERROR | cLS_PPOE_GENERIC_ERROR | cLS_PPP_DISABLE | cLS_PPPOE_PADI_TIMEOUT | cLS_PPPOE_PADO_TIMEOUT | cLS_PPPOE_PADR_TIMEOUT | cLS_PROTO_REJ_IPCP | cLS_PROTO_REJ_IPv6CP | cLS_TIMEOUT_CHAP_CHAL | cLS_TIMEOUT_CHAP_RESP | cLS_TIMEOUT_IPCP_CFG_REQ | cLS_TIMEOUT_IPV6CP_CFG_REQ | cLS_TIMEOUT_IPV6CP_RA | cLS_TIMEOUT_LCP_CFG_REQ | cLS_TIMEOUT_LCP_ECHO_REQ | cLS_TIMEOUT_PAP_AUTH_REQ]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
def update(self, Name=None):
# type: (str) -> PppoxServerSessions
"""Updates pppoxServerSessions resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def CloseIpcp(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the closeIpcp operation on the server.
Close IPCP for selected PPPoX Server Sessions items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
closeIpcp(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
closeIpcp(SessionIndices=list, async_operation=bool)
----------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
closeIpcp(SessionIndices=string, async_operation=bool)
------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('closeIpcp', payload=payload, response_object=None)
def CloseIpv6cp(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the closeIpv6cp operation on the server.
Close IPv6CP for selected PPPoX Severs Sessions items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
closeIpv6cp(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
closeIpv6cp(SessionIndices=list, async_operation=bool)
------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
closeIpv6cp(SessionIndices=string, async_operation=bool)
--------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('closeIpv6cp', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, ChapName=None, ChapSecret=None, DomainList=None, EnableDomainGroups=None, PapPassword=None, PapUser=None):
"""Base class infrastructure that gets a list of pppoxServerSessions device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- ChapName (str): optional regex of chapName
- ChapSecret (str): optional regex of chapSecret
- DomainList (str): optional regex of domainList
- EnableDomainGroups (str): optional regex of enableDomainGroups
- PapPassword (str): optional regex of papPassword
- PapUser (str): optional regex of papUser
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
b9d8ab974796272d2f381fa35d1a618abf7f072d | 758f1ad9c287c74e57fa7a4f8d03aba8d9f776ab | /host/knobui/list.py | 3936fdb4caea0387b3222bc6750f7be8a61a6517 | [] | no_license | cnvogelg/knobterm | 6ef50bc479a64d5ff1729265f447b40c5d5dfd00 | a731d1a0f1f85a0ed17f3b0df5175e151f82608f | refs/heads/master | 2021-01-21T22:26:53.052986 | 2013-01-01T16:38:40 | 2013-01-01T16:38:40 | 5,993,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | from label import Label
from consts import *
class List:
def __init__(self, x, y, w, title, entries=None):
self.x = x
self.y = y
self.w = w
self.h = len(entries) + 2
x += 2
w -= 4
self.tlabel = Label(x,y,w,title, align=Label.ALIGN_CENTER, fg=COLOR_LIGHT_GREY)
self.labels = []
y += 1
self.lx = x
self.ly = y
self.lw = w
if entries != None:
self.add_entries(entries)
def add_entries(self, entries):
y = self.ly
for e in entries:
l = Label(self.lx,y,self.lw,e)
self.labels.append(l)
y += 1
self.ly = y
def add_entry(self, entry):
l = Label(self.lx,self.ly,self.lw,entry)
self.ly += 1
def draw(self, gc):
gc.set_color_fg(COLOR_LIGHT_GREY)
gc.draw_border(1, self.x, self.y, self.w-2, self.h-2)
self.tlabel.draw(gc)
for l in self.labels:
l.draw(gc)
def get_label(self, i):
return self.labels[i]
| [
"[email protected]"
] | |
b5a4113c696eea49ed3ad204ff79189f7aa46c03 | 210e1cffcd8a705c2a8a1485ed5532b9169f5d10 | /whoville/cloudbreak/models/custom_container_request.py | dbe93467c0216ad39351277fd6aa353b5d0e87eb | [
"Apache-2.0"
] | permissive | mikchaos/whoville | 2a45bc6636d448733d8d2368ac88a980cf6954ea | 6eabaea4b74ac0b632c03db8252590131c6ce63b | refs/heads/master | 2020-04-19T08:53:04.430990 | 2019-01-29T05:01:57 | 2019-01-29T05:01:57 | 168,092,002 | 0 | 0 | Apache-2.0 | 2019-01-29T05:00:06 | 2019-01-29T05:00:06 | null | UTF-8 | Python | false | false | 4,034 | py | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CustomContainerRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'definitions': 'dict(str, str)'
}
attribute_map = {
'definitions': 'definitions'
}
def __init__(self, definitions=None):
"""
CustomContainerRequest - a model defined in Swagger
"""
self._definitions = None
if definitions is not None:
self.definitions = definitions
@property
def definitions(self):
"""
Gets the definitions of this CustomContainerRequest.
:return: The definitions of this CustomContainerRequest.
:rtype: dict(str, str)
"""
return self._definitions
@definitions.setter
def definitions(self, definitions):
"""
Sets the definitions of this CustomContainerRequest.
:param definitions: The definitions of this CustomContainerRequest.
:type: dict(str, str)
"""
self._definitions = definitions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CustomContainerRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
237be374caae6ca01296cb6efbdcd60b11fc1254 | c326c6e95c5ec945f534a89e3a24e791991f45f1 | /oscar/apps/partner/migrations/0008_auto__del_abstractstockalert__del_field_stockalert_abstractstockalert_.py | a9d9350d25a0114266c69b299b2ea9024c8b4a78 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | michaelBenin/django-oscar | 69ae279febae8d1896b9c0ba9b29d98439611b3e | f45d38b5d3ffa10756d95c625fb90a27185ce1e1 | refs/heads/master | 2021-01-18T03:56:51.054708 | 2013-07-10T13:50:15 | 2013-07-10T13:50:15 | 11,319,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,932 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'AbstractStockAlert'
db.drop_table('partner_stockalert')
db.rename_table('partner_abstractstockalert', 'partner_stockalert')
def backwards(self, orm):
raise Exception("This migration cannot be reversed")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'partner.stockalert': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'StockAlert'},
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': "orm['partner.StockRecord']"}),
'threshold': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'stockrecord'", 'unique': 'True', 'to': "orm['catalogue.Product']"})
}
}
complete_apps = ['partner']
| [
"[email protected]"
] | |
600d6005c001b142a95537db39711a46551329a9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_stormshadow1896_test.py | 3f76200143e9cb10b787cfb85cc80e4209134e9f | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,174 | py | def check(num):
for i in range( 2, int(num**.5) + 2 ):
if num % i == 0:
return i
break
return -1
def create_num( num, base ):
i = 1
temp = 0
while num > 0:
temp = temp + (num % 10) * i
i = i*base
num = num / 10
return temp
def valid(num):
a = str(num)
for i in range(2,10):
if str(i) in a:
return False
if num % 10 == 1:
return True
else: return False
found = 0
a = 1000000000000001
# a = 100001
# print len(str(a))
print "Case #1:"
while found < 50:#
factors = []
if valid(a) :
for bases in range(2, 11):
num = create_num(a, bases)
# print num,
# if valid(a):
# print "yes"
# print len(set(str(num))) == 2
# print num
res = check(num)
# print res
if res == -1:
break
else:
factors.append(res)
if len(factors) == 9:
print a,
for e in factors:print e,
# print factors
found = found + 1
print
# print
a = a + 1
| [
"[[email protected]]"
] | |
81ca69ae2548272b52a98916125018505f90c491 | d755c825cacbb60e9a23234fbaaf93de48c6a058 | /others/permutations.py | a183388454c2dd1f1a4ad70a22c8ae4c383a2f96 | [] | no_license | Subhash3/CodeChef | d82e4df751e6dd1e07205871b9e2c0b1202d0506 | 5301b0e4555aac55a72f175dfba8f786b4ea7bbd | refs/heads/master | 2020-08-04T20:17:18.339602 | 2020-01-21T16:10:12 | 2020-01-21T16:10:12 | 212,267,130 | 2 | 3 | null | 2019-10-15T12:07:44 | 2019-10-02T06:07:41 | Python | UTF-8 | Python | false | false | 269 | py | #!/usr/bin/env python3
import math
for i in range(int(input())) :
n_k = input().split()
n = int(n_k[0])
k = int(n_k[1])
integers = input().split()
integers = list(map(int, integers))
r = integers.count(0)
a = math.factorial(r)
print(a)
| [
"[email protected]"
] | |
c6888ea8295a4174a130e4b3ac4e16938370ffc9 | f4dd7ae9af786a396c42c3cc4a2126ab7d7e9cb8 | /tests/test_transformer/test_refcnt/test_refcnt_optimizer.py | 3fb998e7c54a00dad269e0741fcfd4b50370de05 | [
"Apache-2.0"
] | permissive | hpplinux/utensor_cgen | 1e9b85b8a457117763313b8924d9696e0b99a120 | d892b728d24321bc751552667b9722633d17c574 | refs/heads/master | 2020-04-19T16:13:31.891523 | 2019-02-19T02:27:44 | 2019-02-19T02:27:44 | 168,297,950 | 0 | 0 | null | 2019-01-30T07:20:53 | 2019-01-30T07:20:52 | null | UTF-8 | Python | false | false | 591 | py | from utensor_cgen.ir import uTensorGraph
from utensor_cgen.transformer import RefCntOptimizer
def test_refcnt_optimizer(refgraph_tuple):
(graph_def, refcnt_ans, output_nodes)= refgraph_tuple
ugraph = uTensorGraph(graph_def, output_nodes)
transformer = RefCntOptimizer()
ugraph = transformer.transform(ugraph)
for node_name in ugraph.topo_order:
if node_name in refcnt_ans:
op_info = ugraph.ops_info[node_name]
refcnts = op_info.op_attr["%s__ref_counts" % transformer.KWARGS_NAMESCOPE]
assert refcnts == refcnt_ans[node_name]
| [
"[email protected]"
] | |
7bc4aca3786342718ac19d3cf036d249e8b025a1 | e6d862a9df10dccfa88856cf16951de8e0eeff2b | /VMS/core/python-aiohttp/api_server/models/person.py | 47ed159f8bd6e87bf16eaad232b627a25976f722 | [] | no_license | AllocateSoftware/API-Stubs | c3de123626f831b2bd37aba25050c01746f5e560 | f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0 | refs/heads/master | 2022-06-01T07:26:53.264948 | 2020-01-09T13:44:41 | 2020-01-09T13:44:41 | 232,816,845 | 0 | 0 | null | 2022-05-20T21:23:09 | 2020-01-09T13:34:35 | C# | UTF-8 | Python | false | false | 5,846 | py | # coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from api_server.models.base_model_ import Model
from api_server.models.link import Link
from api_server import util
class Person(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, id: str=None, universal_id: str=None, first_name: str=None, surname: str=None, email: str=None, telephone_number: str=None, links: List[Link]=None):
"""Person - a model defined in OpenAPI
:param id: The id of this Person.
:param universal_id: The universal_id of this Person.
:param first_name: The first_name of this Person.
:param surname: The surname of this Person.
:param email: The email of this Person.
:param telephone_number: The telephone_number of this Person.
:param links: The links of this Person.
"""
self.openapi_types = {
'id': str,
'universal_id': str,
'first_name': str,
'surname': str,
'email': str,
'telephone_number': str,
'links': List[Link]
}
self.attribute_map = {
'id': 'id',
'universal_id': 'universalId',
'first_name': 'firstName',
'surname': 'surname',
'email': 'email',
'telephone_number': 'telephoneNumber',
'links': 'links'
}
self._id = id
self._universal_id = universal_id
self._first_name = first_name
self._surname = surname
self._email = email
self._telephone_number = telephone_number
self._links = links
@classmethod
def from_dict(cls, dikt: dict) -> 'Person':
"""Returns the dict as a model
:param dikt: A dict.
:return: The Person of this Person.
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this Person.
identifier of the person (worker) within the VMS
:return: The id of this Person.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Person.
identifier of the person (worker) within the VMS
:param id: The id of this Person.
:type id: str
"""
self._id = id
@property
def universal_id(self):
"""Gets the universal_id of this Person.
Global identifer, such as National Insurance number (where known), or other identifier or composite identifier that may be used for matching purposes.
:return: The universal_id of this Person.
:rtype: str
"""
return self._universal_id
@universal_id.setter
def universal_id(self, universal_id):
"""Sets the universal_id of this Person.
Global identifer, such as National Insurance number (where known), or other identifier or composite identifier that may be used for matching purposes.
:param universal_id: The universal_id of this Person.
:type universal_id: str
"""
self._universal_id = universal_id
@property
def first_name(self):
"""Gets the first_name of this Person.
:return: The first_name of this Person.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this Person.
:param first_name: The first_name of this Person.
:type first_name: str
"""
self._first_name = first_name
@property
def surname(self):
"""Gets the surname of this Person.
:return: The surname of this Person.
:rtype: str
"""
return self._surname
@surname.setter
def surname(self, surname):
"""Sets the surname of this Person.
:param surname: The surname of this Person.
:type surname: str
"""
self._surname = surname
@property
def email(self):
"""Gets the email of this Person.
:return: The email of this Person.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this Person.
:param email: The email of this Person.
:type email: str
"""
self._email = email
@property
def telephone_number(self):
"""Gets the telephone_number of this Person.
:return: The telephone_number of this Person.
:rtype: str
"""
return self._telephone_number
@telephone_number.setter
def telephone_number(self, telephone_number):
"""Sets the telephone_number of this Person.
:param telephone_number: The telephone_number of this Person.
:type telephone_number: str
"""
self._telephone_number = telephone_number
@property
def links(self):
"""Gets the links of this Person.
Array of HATEOAS-style references that may be followed by the client. This may include a 'worker.profile' URL, which will return an HTML page representing the worker profile within the VMS.
:return: The links of this Person.
:rtype: List[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Person.
Array of HATEOAS-style references that may be followed by the client. This may include a 'worker.profile' URL, which will return an HTML page representing the worker profile within the VMS.
:param links: The links of this Person.
:type links: List[Link]
"""
self._links = links
| [
"[email protected]"
] | |
d302aaee20a1f34b79c85492e6c660c1b7a60229 | f443a7ab85f6eb99cc2466f147843faed0c2efd8 | /fivelayerssoftmax.py | 0488df1ccb7e1f73182d270249eb104271bca015 | [] | no_license | chaichai1997/deeplearning-tensorflow | 40d09a3c13518e0634ffebc1260add7b6fab80b9 | 1d75b759868ad0775ab8432d4828dc2448fcb882 | refs/heads/master | 2020-06-30T07:24:17.398378 | 2019-08-04T02:33:13 | 2019-08-04T02:33:13 | 200,765,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import math
logs_path = 'log_simple_stats_5_layers_sigmoid'
batch_size = 100
learning_rate = 0.5
training_epochs = 10
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
"""
ๆๅปบ็ฝ็ปๆถๆ๏ผๅณๆๅปบๅพ
"""
# xไธบ28*28็ๅพๅ
X = tf.placeholder(tf.float32, [None, 784], name="input")
# yไธบ10ไธชๅ
็ด ๅผ ้็ปๆ็ไธ็ปดๆฐๆฎ
Y_ = tf.placeholder(tf.float32, [None, 10])
L = 200 # ็ฌฌไธๅฑ็ฅ็ปๅ
ๆฐ็ฎ
M = 100 # ็ฌฌไบๅฑ
N = 60 # ็ฌฌไธๅฑ
O = 30 # ็ฌฌๅๅฑ
# ๅๆฐๅฎไน
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))
B1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.zeros([O]))
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))
# ่พๅ
ฅๆไผธไธบไธ็ปดๅผ ้
dropout_radio = tf.placeholder(tf.float32)
XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + B1)
# Y1 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
# Y1d = tf.nn.dropout(Y1, dropout_radio)
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + B2)
# Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.sigmoid(tf.matmul(Y2, W3) + B3)
# Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.sigmoid(tf.matmul(Y3, W4) + B4)
# Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
# ๆๅคฑๅฝๆฐ
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
learning_rate = 0.003
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
for epoch in range(training_epochs):
batch_count = int(mnist.train.num_examples/batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_step, summary_op],
feed_dict={X: batch_x,
Y_: batch_y})
writer.add_summary(summary,
epoch * batch_count + i)
print("Epoch: ", epoch)
print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
print("done") | [
"[email protected]"
] | |
b6c1a3f1e0ce006d9453defe233f1062e31be1a6 | 4b6ab4d9d8a3e35def45633149cab03a1430ecfb | /my_pytest/my_pytest/urls.py | ee611711be63735dd44762c4a628969b678fef31 | [
"MIT"
] | permissive | Anych/pytest_project | d8d7b2f45a05f1042db5195f5d01586d0866510f | 6ed90b3688212d0b5f035b0d9761f2cf5f99a82f | refs/heads/main | 2023-05-20T13:56:56.103966 | 2021-06-13T20:33:29 | 2021-06-13T20:33:29 | 375,334,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | from django.contrib import admin
from django.urls import path, include
from companies.urls import companies_router
from companies.views import send_company_email
urlpatterns = [
path("admin/", admin.site.urls),
path("", include(companies_router.urls)),
path("send-email", send_company_email),
]
| [
"[email protected]"
] | |
b6742b1202739ec60419f8f23681a7bb827e3aa0 | 8fbcb5eb7a527b700486ec161c09225dfdd30bbb | /Actividades/AC20/14632152_11633905.py | 47b2a52023588612eeeb6b43f2633acc9a7ee741 | [] | no_license | rjherrera/IIC2233 | 388eae95ed9a32e2de7239f4c99277b2c01f5223 | 6637ff92ee225092b7a598d765012153c39713ee | refs/heads/master | 2021-01-21T14:38:50.939337 | 2015-12-10T22:21:32 | 2015-12-10T22:21:32 | 95,313,145 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,535 | py | from PyQt4 import QtGui, uic
from calc_financiero import calcular_jub
form = uic.loadUiType("hexa.ui")
class MainWindow(form[0], form[1]):
def __init__(self):
super().__init__()
self.setupUi(self)
pixmap = QtGui.QPixmap('logo_argentum.png')
self.label_15.setPixmap(pixmap)
self.label_15.resize(self.label_15.sizeHint())
self.label_15.move(90, 0)
self.label_16.setScaledContents(True)
self.label_16.setPixmap(QtGui.QPixmap('logo_hexa.png'))
self.label_16.resize(100, 30)
self.label_14.move(105, 68)
# aporte mensual
self.lineEdit.textChanged.connect(self.calcular)
self.lineEdit_2.textChanged.connect(self.calcular)
# aรฑos
self.lineEdit_4.textChanged.connect(self.calcular)
self.lineEdit_5.textChanged.connect(self.calcular)
self.lineEdit_6.textChanged.connect(self.calcular)
# combobox
self.comboBox.currentIndexChanged.connect(self.calcular)
# Completar la creaciรณn de la interfaz #
def calcular(self):
""" Completar esta funciรณn para calcular los cambios de los datos
en tiempo real segรบn el input del usuario. """
# aporte mensual
ingreso = self.lineEdit
porcentaje = self.lineEdit_2
error = False
if ingreso.text() and porcentaje.text():
try:
porcentaje_valor = float(porcentaje.text())
res = int(ingreso.text()) * porcentaje_valor / 100
if not 0 <= porcentaje_valor <= 100:
raise ValueError
self.label_2.setText('$%f' % res)
self.statusbar.showMessage('')
error = False
except:
if not error:
self.statusbar.showMessage(
'Ingreso y/o porcentaje invรกlidos.')
error = True
# aรฑos de pension
edad_jubilacion = self.lineEdit_5
esperanza = self.lineEdit_6
if edad_jubilacion.text() and esperanza.text():
try:
res = int(esperanza.text()) - int(edad_jubilacion.text())
self.label_5.setText('%d' % res)
self.statusbar.showMessage('')
error = False
except:
if not error:
self.statusbar.showMessage('Edades invรกlidas.')
error = True
# calculo final
edad_actual = self.lineEdit_4
seleccion = self.comboBox.itemText(self.comboBox.currentIndex())
if (ingreso.text() and porcentaje.text() and edad_actual.text() and
edad_jubilacion.text() and esperanza.text() and seleccion):
try:
res = calcular_jub(int(ingreso.text()),
float(porcentaje.text()),
int(edad_actual.text()),
int(edad_jubilacion.text()),
int(esperanza.text()),
seleccion)
self.label_13.setText(res)
self.statusbar.showMessage('')
error = False
except:
if not error:
self.statusbar.showMessage(
'Error en los datos ingresados.')
error = True
if __name__ == '__main__':
app = QtGui.QApplication([])
form = MainWindow()
form.show()
app.exec_()
| [
"[email protected]"
] | |
ee98f96da82d17f2de26d2ede7aa9b13a7b0fa32 | ef914133e0ade675ae201f7895c50d819180951b | /facebook_real_network_DB.py | 2945ea9efb10ccc0b748bfb8e060265939ebc6e9 | [] | no_license | vpahari/biconn | b094d6e7e6270f7601fde7de2f4d4528cd80aa20 | fd2259dfeb73a39bbdd4e616700f912cec8f17cf | refs/heads/master | 2021-06-01T18:54:09.477458 | 2020-09-22T14:49:48 | 2020-09-22T14:49:48 | 136,077,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,833 | py | import networkx as nx
import networkit as nk
import random
import sys
import math
from functools import reduce
import csv
from operator import itemgetter
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import pickle
import igraph as ig
import numpy as np
import os
import itertools
import pandas as pd
def get_name_WS(initial_name, dim, size, nei, p, SEED,radius):
return initial_name + "_dim_" + str(dim) + "_size_" + str(size) + "_nei_" + str(nei) + "_p_" + str(p) + "_SEED_" + str(SEED) + "_radius_" + str(radius) + "_" + ".pickle"
def get_name_ER(initial_name, N, k, SEED,radius):
return initial_name + "_N_" + str(N) + "_k_" + str(k) + "_SEED_" + str(SEED) + "_radius_" + str(radius) + "_" + ".pickle"
def get_name_SF(initial_name,N,k,exp_out,SEED,radius):
return initial_name + "_N_" + str(N) + "_k_" + str(k) + "_expout_" + str(exp_out) + "_SEED_" + str(SEED) + "_radius_" + str(radius) + "_" + ".pickle"
def make_WS_graph(dim,size,nei,p,SEED):
N = size ** dim
random.seed(SEED)
igG = ig.Graph.Watts_Strogatz(dim,size,nei,p)
allEdges = igG.get_edgelist()
fixed_G = nx.Graph()
listOfNodes = [i for i in range(N)]
fixed_G.add_nodes_from(listOfNodes)
fixed_G.add_edges_from(allEdges)
G_nk = nk.nxadapter.nx2nk(fixed_G)
return G_nk
def make_SF_Graph(N,k,exp_out,SEED):
random.seed(SEED)
num_edges = int((N * k) / 2)
igG = ig.Graph.Static_Power_Law(N,num_edges,exp_out)
allEdges = igG.get_edgelist()
fixed_G = nx.Graph()
listOfNodes = [i for i in range(N)]
fixed_G.add_nodes_from(listOfNodes)
fixed_G.add_edges_from(allEdges)
G_nk = nk.nxadapter.nx2nk(fixed_G)
return G_nk
def make_ER_Graph(N,k,SEED):
G_nx = nx.erdos_renyi_graph(N, k/(N-1), seed = SEED)
G_nk = nk.nxadapter.nx2nk(G_nx)
return G_nk
def make_realworldnetwork(file_name):
df = pd.read_excel(file_name)
file_len = len(df.source)
total_nodes = df.source[file_len - 1]
listOfNodes = [i for i in range(total_nodes)]
fixed_G = nx.Graph()
fixed_G.add_nodes_from(listOfNodes)
for i in range(file_len):
source = int(df.source[i])
target = int(df.target[i])
fixed_G.add_edge(source, target)
G_nk = nk.nxadapter.nx2nk(fixed_G)
return G_nk
def DA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
original_degree_list = []
adaptive_degree_list = []
GC_List.append(get_GC(G))
degree = nk.centrality.DegreeCentrality(G)
degree.run()
degree_sequence = degree.ranking()
random.shuffle(degree_sequence)
degree_sequence.sort(key = itemgetter(1), reverse = True)
for i in range(num_nodes_to_remove):
node_to_remove = degree_sequence[i][0]
original_degree = degree_sequence[i][1]
adaptive_degree_list.append(G.degree(node_to_remove))
original_degree_list.append(original_degree)
G.removeNode(node_to_remove)
GC_List.append(get_GC(G))
return (GC_List, original_degree_list,adaptive_degree_list)
def ADA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
SGC_List = []
num_comp_List = []
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
degree_list = []
for i in range(num_nodes_to_remove):
degree = nk.centrality.DegreeCentrality(G)
degree.run()
degree_sequence = degree.ranking()
random.shuffle(degree_sequence)
degree_sequence.sort(key = itemgetter(1), reverse = True)
node_to_remove = degree_sequence[0][0]
degree_list.append(G.degree(node_to_remove))
G.removeNode(node_to_remove)
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
return (GC_List, SGC_List, num_comp_List, degree_list)
def BA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
GC_List.append(get_GC(G))
between = nk.centrality.DynBetweenness(G)
between.run()
between_sequence = between.ranking()
random.shuffle(between_sequence)
between_sequence.sort(key = itemgetter(1), reverse = True)
for i in range(num_nodes_to_remove):
node_to_remove = between_sequence[i][0]
G.removeNode(node_to_remove)
GC_List.append(get_GC(G))
return GC_List
def ABA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
SGC_List = []
num_comp_List = []
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
for i in range(num_nodes_to_remove):
between = nk.centrality.DynBetweenness(G)
between.run()
between_sequence = between.ranking()
between_sequence.sort(key = itemgetter(1), reverse = True)
node_to_remove = between_sequence[0][0]
G.removeNode(node_to_remove)
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
return (GC_List, SGC_List, num_comp_List)
def RA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
SGC_List = []
num_comp_List = []
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
all_nodes = random.sample(list(G.nodes()),num_nodes_to_remove)
for i in all_nodes:
G.removeNode(i)
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
return (GC_List, SGC_List, num_comp_List)
def big_RA_attack(G_copy,num_nodes_to_remove,num_sims):
big_GC_List = []
big_SGC_List = []
big_numComp_List = []
for i in range(num_sims):
(GC_List, SGC_List, num_comp_List) = RA_attack(G_copy,num_nodes_to_remove)
big_GC_List.append(GC_List)
big_SGC_List.append(SGC_List)
big_numComp_List.append(num_comp_List)
avg_list_GC = get_avg_list(big_GC_List)
avg_list_SGC = get_avg_list(big_SGC_List)
avg_list_numComp = get_avg_list(big_numComp_List)
return (avg_list_GC, avg_list_SGC, avg_list_numComp)
def get_betweenness_score(G, node):
between = nk.centrality.DynBetweenness(G)
between.run()
return between.score(node)
def get_degree_score(G,node):
return G.degree(node)
def get_coreness_score(G,node):
coreness = nk.centrality.CoreDecomposition(G)
coreness.run()
partition = coreness.getPartition()
core_number = partition.subsetOf(node)
return core_number
def get_betweenness_score_list(G, node_list):
between = nk.centrality.DynBetweenness(G)
between.run()
final_list = []
for node in node_list:
final_list.append(between.score(node))
return final_list
def get_degree_score_list(G,node_list):
final_list = []
for node in node_list:
final_list.append(G.degree(node))
return final_list
def get_coreness_score_list(G,node_list):
coreness = nk.centrality.CoreDecomposition(G)
coreness.run()
final_list = []
partition = coreness.getPartition()
for node in node_list:
final_list.append(partition.subsetOf(node))
return final_list
def add_into_set(s,new_s):
for i in new_s:
s.add(i)
return s
def take_out_list(dBall, ball):
new_list = []
for i in dBall:
if i in ball:
continue
new_list.append(i)
return new_list
#change this such that the neighbors are diff
def get_dBN(G,node,radius):
dBall = set([node])
ball = set([node])
for i in range(radius):
neighbor = []
for j in dBall:
for n in G.neighbors(j):
if n in ball:
continue
neighbor.append(n)
ball = add_into_set(ball,neighbor)
dBall = set(neighbor.copy())
return (list(dBall),list(ball))
def get_all_dBN(G,radius):
all_nodes = get_GC_nodes(G)
dict_nodes_dBall = {}
dict_nodes_ball = {}
dict_nodes_x_i = {}
for n in all_nodes:
(dBall,ball) = get_dBN(G,n,radius)
dict_nodes_dBall[n] = len(dBall)
dict_nodes_ball[n] = len(ball)
dict_nodes_x_i[n] = len(dBall) / len(ball)
return (dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i)
def make_partitions(dict_nodes_x_i, step_size):
counter = 0
values_list = list(dict_nodes_x_i.values())
num_partitions = int(1 / step_size)
all_values = [0 for i in range(num_partitions)]
for i in values_list:
box_to_put = int(i / step_size)
if box_to_put == num_partitions:
all_values[-1] = all_values[-1] + 1
continue
all_values[box_to_put] = all_values[box_to_put] + 1
return all_values
def get_all_same_x_i(sorted_list,x_i_value):
node_list = []
for i in sorted_list:
if i[1] == x_i_value:
node_list.append(i[0])
return node_list
def get_largest_dball(dball_dict,node_list):
largest_dball = 0
largest_node = 0
for i in node_list:
print(dball_dict[i])
if dball_dict[i] > largest_dball:
largest_dball = dball_dict[i]
largest_node = i
return largest_node
def get_random_dball(node_list):
return random.choice(node_list)
def dict_to_sorted_list(d):
new_list = list(d.items())
final_list = sorted(new_list, key = itemgetter(1))
final_list_no_0 = list(filter(lambda x : x[1] != 0, final_list))
if len(final_list_no_0) != 0:
x_i_value = final_list_no_0[0][1]
nodes_list = get_all_same_x_i(final_list_no_0, x_i_value)
return nodes_list
else:
return final_list_no_0
def get_GC_nodes(G):
comp = nk.components.DynConnectedComponents(G)
comp.run()
all_comp = comp.getComponents()
all_comp.sort(key = len)
return all_comp[-1]
def get_GC(G):
comp = nk.components.DynConnectedComponents(G)
comp.run()
all_comp_sizes = comp.getComponentSizes()
all_values = list(all_comp_sizes.values())
all_values.sort()
return all_values[-1]
def get_avg_comp_size(all_val):
avg = sum(all_val) / len(all_val)
return avg
def get_GC_SGC_number_of_components(G):
comp = nk.components.DynConnectedComponents(G)
comp.run()
all_comp_sizes = comp.getComponentSizes()
all_values = list(all_comp_sizes.values())
all_values.sort()
if len(all_values) == 1:
return (all_values[-1], 0,1, all_values[-1])
else:
avg_comp_size = get_avg_comp_size(all_values)
return (all_values[-1],all_values[-2],len(all_values),avg_comp_size)
def copy_graph(G):
G_copy = G.copyNodes()
edges = G.edges()
for (i,j) in edges:
G_copy.addEdge(i,j)
return G_copy
#dball, vball, degree, betweenness, coreness
def dBalls_attack(G_copy,radius):
G = copy_graph(G_copy)
GC_List = []
SGC_List = []
num_comp_List = []
size_dball = []
size_ball = []
degree_list_mainNode = []
betweenness_list_mainNode = []
coreness_list_mainNode = []
degree_list_removedNode = []
betweenness_list_removedNode = []
coreness_list_removedNode = []
counter = 0
counter_list = []
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
counter_list.append(counter)
num_nodes_to_remove = G.numberOfNodes()
while counter < num_nodes_to_remove:
print(counter)
(dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i) = get_all_dBN(G,radius)
list_to_remove = dict_to_sorted_list(dict_nodes_x_i)
if len(list_to_remove) == 0:
break
node = get_random_dball(list_to_remove)
(dBall,ball) = get_dBN(G,node,radius)
combined_list = [node] + dBall
between_list = get_betweenness_score_list(G,combined_list)
degree_list = get_degree_score_list(G,combined_list)
coreness_list = get_coreness_score_list(G,combined_list)
degree_list_mainNode.append(degree_list[0])
betweenness_list_mainNode.append(between_list[0])
coreness_list_mainNode.append(coreness_list[0])
degree_list_removedNode += degree_list[1:]
betweenness_list_removedNode += between_list[1:]
coreness_list_removedNode += coreness_list[1:]
size_dball.append(len(dBall))
size_ball.append(len(ball))
#print(dBall)
#print(ball)
for i in dBall:
G.removeNode(i)
counter += 1
(GC,SGC,num_comp) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
counter_list.append(counter)
return (GC_List,SGC_List,num_comp_List,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode)
def get_degree_dict(G):
all_nodes = list(G.nodes())
final_dict = {}
for i in all_nodes:
final_dict[i] = G.degree(i)
return final_dict
def get_all_dBN_allNodes(G,radius):
all_nodes = list(G.nodes())
dict_nodes_dBall = {}
dict_nodes_ball = {}
dict_nodes_x_i = {}
for n in all_nodes:
(dBall,ball) = get_dBN(G,n,radius)
dict_nodes_dBall[n] = len(dBall)
dict_nodes_ball[n] = len(ball)
dict_nodes_x_i[n] = len(dBall) / len(ball)
return (dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i)
def dBalls_attack_adapt(G_copy,radius):
G = copy_graph(G_copy)
GC_List = []
SGC_List = []
num_comp_List = []
avg_comp_size_List = []
size_dball = []
size_ball = []
degree_list_mainNode = []
betweenness_list_mainNode = []
coreness_list_mainNode = []
degree_list_removedNode = []
betweenness_list_removedNode = []
coreness_list_removedNode = []
counter = 0
counter_list = []
(GC,SGC,num_comp,avg_comp_size) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
avg_comp_size_List.append(avg_comp_size)
counter_list.append(counter)
original_degree_dict = get_degree_dict(G)
original_degree_main_node = []
original_degree_removed_node = []
num_nodes_to_remove = G.numberOfNodes()
(dict_nodes_dBall,dict_nodes_ball,original_dict_nodes_x_i) = get_all_dBN_allNodes(G,radius)
original_xi_values = []
num_nodes_to_remove = G.numberOfNodes()
while counter < num_nodes_to_remove:
print(counter)
(dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i) = get_all_dBN(G,radius)
list_to_remove = dict_to_sorted_list(dict_nodes_x_i)
if len(list_to_remove) == 0:
break
node = get_random_dball(list_to_remove)
(dBall,ball) = get_dBN(G,node,radius)
original_xi_values.append(original_dict_nodes_x_i[node])
size_dball.append(len(dBall))
size_ball.append(len(ball))
combined_list = [node] + dBall
original_degree_main_node.append(original_degree_dict[node])
for i in dBall:
original_degree_removed_node.append(original_degree_dict[i])
#between_list = get_betweenness_score_list(G,combined_list)
degree_list = get_degree_score_list(G,combined_list)
#coreness_list = get_coreness_score_list(G,combined_list)
degree_list_mainNode.append(degree_list[0])
#betweenness_list_mainNode.append(between_list[0])
#coreness_list_mainNode.append(coreness_list[0])
degree_list_removedNode += degree_list[1:]
#betweenness_list_removedNode += between_list[1:]
#coreness_list_removedNode += coreness_list[1:]
for i in dBall:
G.removeNode(i)
counter += 1
print(GC)
(GC,SGC,num_comp,avg_comp_size) = get_GC_SGC_number_of_components(G)
GC_List.append(GC)
SGC_List.append(SGC)
num_comp_List.append(num_comp)
avg_comp_size_List.append(avg_comp_size)
counter_list.append(counter)
return (GC_List, SGC_List, num_comp_List, avg_comp_size_List, counter_list,size_dball,size_ball,degree_list_mainNode,degree_list_removedNode,original_degree_main_node,original_degree_removed_node, original_xi_values)
def dict_to_sorted_list_NA(d):
new_list = list(d.items())
random.shuffle(new_list)
final_list = sorted(new_list, key = itemgetter(1))
return final_list
def get_avg_list(big_list):
counter = 0
size_of_list = len(big_list[0])
avg_list = []
while counter < size_of_list:
index_list = list(map(lambda x : x[counter], big_list))
avg = sum(index_list) / len(index_list)
avg_list.append(avg)
counter += 1
return avg_list
def turn_lists_together(GC_List,num_nodes_removed):
final_list = []
pointer = 0
counter = 0
for i in num_nodes_removed:
diff = i - counter
for j in range(diff):
final_list.append(GC_List[pointer])
counter += 1
pointer += 1
return final_list
def random_ball_removal(G_copy,radius,num_nodes_to_remove):
G = copy_graph(G_copy)
counter = 0
GC_list = []
size_dball = []
size_ball = []
continue_counter = 0
N = G.numberOfNodes()
while counter < num_nodes_to_remove:
if continue_counter > (0.1 * N):
all_nodes = list(G.nodes())
node_sample = random.sample(all_nodes,(num_nodes_to_remove - counter))
for i in node_sample:
G.removeNode(i)
counter += 1
GC_list.append(get_GC(G))
break
print(counter)
all_nodes = get_GC_nodes(G)
node = random.choice(all_nodes)
(dBall,ball) = get_dBN(G,node,radius)
if len(dBall) == 0:
continue_counter += 1
continue
size_dball.append(len(dBall))
size_ball.append(len(ball))
for i in dBall:
G.removeNode(i)
counter += 1
GC_list.append(get_GC(G))
continue_counter = 0
return (GC_list,size_dball,size_ball)
def big_sim(N,k,SEED,radius,perc_to_remove,num_sims):
big_GC_List = []
big_size_dball = []
big_size_ball = []
big_dg_list = []
for i in range(num_sims):
G_nx = nx.erdos_renyi_graph(N, k/(N-1), seed = SEED * (i+1))
G_nk = nk.nxadapter.nx2nk(G_nx)
num_nodes_to_remove = int(perc_to_remove * N)
(GC_List,size_dball,size_ball,dg_list) = perc_process_dBalls(G_nk,radius,num_nodes_to_remove)
GC_List_to_append = GC_List[:num_nodes_to_remove]
big_GC_List.append(GC_List_to_append)
big_size_dball.append(size_dball)
big_size_ball.append(size_ball)
big_dg_list.append(dg_list)
return (big_GC_List,big_size_dball,big_size_ball,big_dg_list)
def big_sim_dball(N,k,SEED,radius,perc_to_remove,num_sims):
big_GC_List = []
big_size_dball = []
big_size_ball = []
big_dg_list = []
for i in range(num_sims):
G_nx = nx.erdos_renyi_graph(N, k/(N-1), seed = SEED * (i+1))
G_nk = nk.nxadapter.nx2nk(G_nx)
num_nodes_to_remove = int(perc_to_remove * N)
(GC_List,size_dball,size_ball,dg_list) = perc_process_dBalls_bigDBalls(G_nk,radius,num_nodes_to_remove)
GC_List_to_append = GC_List[:num_nodes_to_remove]
big_GC_List.append(GC_List_to_append)
big_size_dball.append(size_dball)
big_size_ball.append(size_ball)
big_dg_list.append(dg_list)
return (big_GC_List,big_size_dball,big_size_ball,big_dg_list)
def big_sim_SF(N,k,exp_out,radius,perc_to_remove,num_sims):
big_GC_List = []
big_size_ball = []
big_size_dball = []
big_dg_list = []
for i in range(num_sims):
G_nk = make_SF_Graph(N,k,exp_out)
num_nodes_to_remove = int(perc_to_remove * N)
(GC_List,size_dball,size_ball,degree_list) = perc_process_dBalls(G_nk,radius,num_nodes_to_remove)
GC_List_to_append = GC_List[:num_nodes_to_remove]
big_GC_List.append(GC_List_to_append)
big_size_ball.append(size_ball)
big_size_dball.append(size_dball)
big_dg_list.append(degree_list)
return (big_GC_List,big_size_dball,big_size_ball,big_dg_list)
def big_sim_changing_radius(G,start_radius,end_radius):
big_GC_List = []
big_counter_list = []
curr_radius = start_radius
while curr_radius <= end_radius:
(GC_List,size_dball,size_ball,degree_list,counter_list) = perc_process_dBalls_track_balls(G,curr_radius)
big_GC_List.append(GC_List)
big_counter_list.append(counter_list)
curr_radius += 1
return (big_GC_List,big_counter_list)
def get_results_NA(G, radius):
N = G.numberOfNodes()
GC_list_DA = DA_attack(G, int(N * 0.99))
GC_list_BA = BA_attack(G, int(N * 0.99))
GC_list_RAN = big_RA_attack(G,int(N * 0.99),20)
(GC_List_DB,size_dball,size_ball,degree_list,counter_list) = dBalls_attack_NA(G_copy,radius)
return (GC_list_BA, GC_list_DA, GC_list_RAN, GC_List_DB)
def get_result(G, radius):
N = G.numberOfNodes()
(GC_List_DB, SGC_List_DB,num_comp_List_DB,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode) = dBalls_attack(G,radius)
return (GC_List_DB, SGC_List_DB,num_comp_List_DB,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode)
def get_airport_graph():
reader = open("routes.txt", "r")
line_string = reader.readlines()
counter = 0
dict_airport_node = {}
print(len(line_string))
G = nx.Graph()
double_edges = []
for line in line_string:
split_list = line.split(",")
source = split_list[2]
target = split_list[4]
if source not in dict_airport_node:
dict_airport_node[source] = counter
G.add_node(counter)
counter += 1
if target not in dict_airport_node:
dict_airport_node[target] = counter
G.add_node(counter)
counter += 1
source_node = dict_airport_node[source]
target_node = dict_airport_node[target]
if G.has_edge(source_node, target_node):
print(source, target)
double_edges.append((source, target))
continue
else:
G.add_edge(source_node, target_node)
G_nk = nk.nxadapter.nx2nk(G)
return G_nk
def get_facebook_graph():
reader = open("facebook_combined.txt", "r")
line_string = reader.readlines()
counter = 0
G = nx.Graph()
nodes_list = [i for i in range(4039)]
G.add_nodes_from(nodes_list)
for line in line_string:
split_list = line.split(" ")
print(split_list)
source = int(split_list[0])
target = int(split_list[1])
if not G.has_edge(source, target):
G.add_edge(source, target)
G_nk = nk.nxadapter.nx2nk(G)
return G_nk
radius = int(sys.argv[1])
adaptive_type = "ADAPT"
graph_type = "FACEBOOK"
G = get_facebook_graph()
print(G.numberOfNodes())
print(G.numberOfEdges())
N = G.numberOfNodes()
E = G.numberOfEdges()
k = (2 * E) / N
SEED = 0
print(N)
print(E)
print(k)
(GC_List, SGC_List, num_comp_List,avg_comp_size_List, counter_list,size_dball,size_ball,degree_list_mainNode,degree_list_removedNode,original_degree_main_node,original_degree_removed_node, original_xi_values) = dBalls_attack_adapt(G,radius)
init_name_GC_DB = adaptive_type + "SGCattackDB_" + graph_type + "_GC"
init_name_dball = adaptive_type + "SGCattackDB_" + graph_type + "_DBALL"
init_name_ball = adaptive_type + "SGCattackDB_" + graph_type + "_BALL"
init_name_CL = adaptive_type + "SGCattackDB_" + graph_type + "_CL"
init_name_deg_mainNode = adaptive_type + "SGCattackDB_" + graph_type + "_degMainNode"
init_name_deg_removedNode = adaptive_type + "SGCattackDB_" + graph_type + "_degRemovedNode"
init_name_SGC_DB = adaptive_type + "SGCattackDB_" + graph_type + "_SGC"
init_name_numComp_DB = adaptive_type + "SGCattackDB_" + graph_type + "_numberOfComponents"
init_name_avgSize_DB = adaptive_type + "SGCattackDB_" + graph_type + "_avgComponents"
init_name_original_degree_main_node = adaptive_type + "SGCattackDB_" + graph_type + "_originalDegreeMainNode"
init_name_original_degree_removed_node = adaptive_type + "SGCattackDB_" + graph_type + "_originalDegreeRemovedNode"
init_name_original_xi_values = adaptive_type + "SGCattackDB_" + graph_type + "_originalXIValues"
GC_List_DB_name = get_name_ER(init_name_GC_DB, N, k, SEED,radius)
CL_name = get_name_ER(init_name_CL, N, k, SEED,radius)
dBall_name = get_name_ER(init_name_dball, N, k, SEED,radius)
ball_name = get_name_ER(init_name_ball, N, k, SEED,radius)
SGC_DB_name = get_name_ER(init_name_SGC_DB, N, k, SEED, radius)
numComp_DB_name = get_name_ER(init_name_numComp_DB, N, k, SEED, radius)
avgSize_DB_name = get_name_ER(init_name_avgSize_DB, N, k, SEED, radius)
deg_mainNode_name = get_name_ER(init_name_deg_mainNode, N, k, SEED,radius)
deg_removedNode_name = get_name_ER(init_name_deg_removedNode, N, k, SEED,radius)
original_xi_values_name = get_name_ER(init_name_original_xi_values, N, k, SEED,radius)
with open(GC_List_DB_name,'wb') as handle:
pickle.dump(GC_List, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(CL_name,'wb') as handle:
pickle.dump(counter_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(dBall_name,'wb') as handle:
pickle.dump(size_dball, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(ball_name,'wb') as handle:
pickle.dump(size_ball, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(deg_mainNode_name,'wb') as handle:
pickle.dump(degree_list_mainNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(deg_removedNode_name,'wb') as handle:
pickle.dump(degree_list_removedNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(SGC_DB_name,'wb') as handle:
pickle.dump(SGC_List, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(numComp_DB_name,'wb') as handle:
pickle.dump(num_comp_List, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(avgSize_DB_name,'wb') as handle:
pickle.dump(avg_comp_size_List, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(original_xi_values_name,'wb') as handle:
pickle.dump(original_xi_values, handle, protocol=pickle.HIGHEST_PROTOCOL)
| [
"[email protected]"
] | |
5d90c7c8469e0b914663c45f3b039bfd1e2d49f4 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/3/ising2d_microstates_20200505224503.py | 7e59314942b34605b3e6386332a64b58176c61ce | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,266 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
from time import time
import datetime
import collections
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import pandas as pd
script_dir = os.path.dirname(os.path.abspath(__file__))
def save_csv(data, data_headers=None, data_index=None, file_name=None,
relevant_info=None, print_data=True):
"""
Uso: data debe contener listas que serรกn las columnas de un archivo CSV que se guardarรก
con nombre file_name. relevant_info agrega comentarios en primeras lรญneas del
archivo.
Recibe:
data: array of arrays, shape=(nx,ny) -> cada columna es una columna del archivo.
data_headers: numpy array, shape=(ny,) -> nombres de las columnas
data_index: numpy array, shape=(nx,) -> nombres de las filas
file_name: str -> nombre del archivo en el que se guardarรกn datos.
relevant_info: list of str -> informaciรณn que se agrega como comentario en
primeras lรญneas. Cada elemento de esta lista
se agrega como una nueva lรญnea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
data_pdDF: pd.DataFrame -> archivo con datos formato "pandas data frame".
guarda archivo con datos e inforamaciรณn relevante en primera lรญnea.
"""
data_pdDF = pd.DataFrame(data, columns=data_headers, index=data_index)
# Asigna nombre al archivo para que se guarde en el folder en el que estรก
# guardado el script que lo usa
if file_name==None:
now = datetime.datetime.now()
#path completa para este script
file_name = (script_dir + '/' + 'csv-file-%.4d%.2d%.2d%.2d%.2d%.2d.csv'
%(now.year,now.month,now.day,now.hour,
now.minute,now.second))
# Crea archivo CSV y agrega comentarios relevantes dados como input
if relevant_info:
# Agregamos informaciรณn relevante en primeras lรญneas
with open(file_name,mode='w') as file_csv:
for info in list(relevant_info):
file_csv.write('# '+info+'\n')
file_csv.close()
# Usamos pandas para escribir en archivo formato csv.
with open(file_name,mode='a') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
else:
with open(file_name,mode='w') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
# Imprime datos en pantalla.
if print_data==True:
print(data_pdDF)
return data_pdDF
def ising_microstates(L=2):
# Tamaรฑo del sistema
N = L * L
t_0 = time()
# Lista en la que se guardan explรญcitamente todos los microestados (2**N en total)
# cada fila del array representarรก un microestado diferente
microstates = np.array([[0] * N] * 2**N)
# La primera mitad de los microestados
# El procedimiento consiste en que en cada paso, para el espรญn i-esimo se llena
# estratรฉgicamente en la mitad los microestados con 1 y en la otra mitad con -1,
# asรญ se obtienen todas las configuraciones posibles.
for i in range(N):
index_factor = int(2**N / 2**(i+1))
for j in range(2**i):
microstates[j*index_factor : (j+1)*index_factor, i] = (-1)**j
# La segunda mitad de los microestados son los estados opuestos a los de la primera
# mitad
microstates[int((2 ** N) / 2):,:] = - np.copy(microstates[:int((2 ** N) / 2), :])
t_1 = time()
comp_time = t_1 - t_0
print('\n----------------------------------------------------------\n'
+ 'Explicit microstates: L = %d --> computation time = %.3f \n'%(L,comp_time)
+ '----------------------------------------------------------\n')
return microstates
def ising_neighbours(L=2):
"""
vecinos del espin i en el formato {i: (derecha, izquierda, abajo, arriba)}
derecha = nรบmero de 'fila' anterior * nรบmero de espines por fila
+ ubicaciรณn relativa en fila de espรญn de la dereha teniendo en cuenta
condiciones de frontera perรญodicas (mรณdulo L)
abajo = posiciรณn de espรญn + nรบmero de espines por fila, teniendo en cuenta
condiciones de frontera periรณdicas (mรณdulo N)
izquierda = nรบmero de 'fila' anterior * nรบmero de espines por fila
+ ubicaciรณn relativa en fila de espรญn de la izquierda, teniendo en
cuenta condiciones de frontera perรญodicas (mรณdulo L)
arriba = posiciรณn de espรญn - nรบmero de espines por fila, teniendo en cuenta
condiciones de frontera periรณdicas (mรณdulo N)
"""
N = L * L
ngbrs = {i: ((i//L)*L + (i+1)%L, (i+L) % N,
(i//L)*L + (i-1)%L, (i-L) % N) for i in range(N)}
return ngbrs
def ising_energy(microstates, ngbrs, J=1, save_data=False, data_file_name=None):
t_0 = time()
energies = []
N = len(ngbrs)
L = int(N**0.5)
for microstate_j in microstates:
energy_j = 0
for i in range(N):
for ngbr in ngbrs[i]:
energy_j -= microstate_j[i] * microstate_j[ngbr]
energies.append(energy_j)
# En el algoritmo hemos contado cada contribuciรณn de energรญa 2 veces, por tanto se
# debe hacer correcciรณn. Ademรกs se agrega el factor de la integral de intercambio.
energies = 0.5 * J * np.array(energies)
# Guardamos datos de energรญas
if save_data:
if not data_file_name:
data_file_name = 'ising-energy-data-L_%d.csv'%(L)
data_file_name = script_dir + '/' + data_file_name
relevant_info = ['2D Ising energies: all microstates. L=%d.'%L]
headers = ['i-th microstate energy']
save_csv(energies, data_headers=headers, file_name=data_file_name, relevant_info=relevant_info)
t_1 = time()
comp_time = t_1-t_0
print('\n--------------------------------------------------------\n'
+ 'Explicit energies: L = %d --> computation time = %.3f \n'%(L,comp_time)
+ '--------------------------------------------------------\n')
return energies
def ising_microstate_plot(config, show_plot=True, save_plot=False, plot_file_name=None):
L = int(len(config)**0.5)
bw_cmap = colors.ListedColormap(['black', 'white'])
fig, ax = plt.subplots(1, 1)
ax.imshow(config.reshape(L,L), cmap=bw_cmap, extent=(0,L,L,0), aspect='equal')
ax.xaxis.set_ticks_position('top')
ax.set_xticks(range(0,L+1))
ax.set_yticks(range(0,L+1))
plt.tight_layout()
if save_plot:
if not plot_file_name:
now = datetime.datetime.now()
plot_file_name = 'ising-config-plot-L_%d.pdf'%(L)
plot_file_name = script_dir + '/' + plot_file_name
plt.savefig(plot_file_name)
if show_plot:
plt.show()
return
def ising_energy_plot(energies, L, show_plot=True, save_plot=False, plot_file_Name=None):
x_lim = [0, 0, 10, 20, 30, 55, 80]
plt.xlim(-1*x_lim[L],x_lim[L])
plt.hist(energies, bins=L**3+1, label='Histograma energรญas\nIsing $L\\times L=%d$'%(L*L))
plt.xlabel('$E$')
plt.ylabel('Frecuencia')
plt.legend(loc='best', fancybox=True, framealpha=0.5)
plt.tight_layout()
if save_plot:
if not plot_file_Name:
plot_file_Name = 'ising-energy-plot-L_%d.pdf'%(L)
plot_file_Name = script_dir + '/' + plot_file_Name
plt.savefig(plot_file_Name)
if show_plot:
plt.show()
plt.close()
return
def energy_data_to_frequencies(energy_data_file_name):
energy_data_file_name = script_dir + '/' + energy_data_file_name
energy_data = pd.read_csv(energy_data_file_name, index_col=0, comment='#')
energy_data = energy_data.to_numpy()
energy_omegas = collections.Counter()
energy_omegas = np.array([[energy, freq] for energy, freq in energy_omegas.items()])
return list(energy_omegas.transpose())
| [
"[email protected]"
] | |
65e0ac96fa03bc4c5d8b73113cbf7696a1137df2 | 958685165bfeb4122cc3473659a6d0c89c5cae95 | /crea8s_warehouse/stock.py | 784d425ff8259ded6339e857f54c984518363ce3 | [] | no_license | tringuyen17588/OpenERP-7.0 | 44efee7735af65d960c5adb4b03a1a329f5c4a57 | 2486261e4d351d4f444ec31e74c6b0e36ed2fb82 | refs/heads/master | 2021-01-10T02:45:24.320726 | 2016-02-19T06:05:21 | 2016-02-19T06:05:21 | 52,064,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,556 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import time
import datetime
from datetime import timedelta
class product_product(osv.osv):
_inherit = "product.product"
def get_product_available(self, cr, uid, ids, context=None):
""" Finds whether product is available or not in particular warehouse.
@return: Dictionary of values
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
shop_obj = self.pool.get('sale.shop')
states = context.get('states',[])
what = context.get('what',())
if not ids:
ids = self.search(cr, uid, [])
res = {}.fromkeys(ids, 0.0)
if not ids:
return res
if context.get('shop', False):
warehouse_id = shop_obj.read(cr, uid, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]
if warehouse_id:
context['warehouse'] = warehouse_id
if context.get('warehouse', False):
lot_id = warehouse_obj.read(cr, uid, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]
if lot_id:
context['location'] = lot_id
if context.get('location', False):
if type(context['location']) == type(1):
location_ids = [context['location']]
elif type(context['location']) in (type(''), type(u'')):
location_ids = location_obj.search(cr, uid, [('name','ilike',context['location'])], context=context)
else:
location_ids = context['location']
else:
location_ids = []
# compute again quantity depend on warehouse
users_br = self.pool.get('res.users').browse(cr, uid, uid)
wids = [x.id for x in users_br.warehouse_ids]
if not wids:
company_id = users_br.company_id.id
wids = warehouse_obj.search(cr, uid, [('company_id', '=', company_id)], context=context)
if not wids:
return res
for w in warehouse_obj.browse(cr, uid, wids, context=context):
location_ids.append(w.lot_stock_id.id)
# build the list of ids of children of the location given by id
if context.get('compute_child',True):
child_location_ids = location_obj.search(cr, uid, [('location_id', 'child_of', location_ids)])
location_ids = child_location_ids or location_ids
# this will be a dictionary of the product UoM by product id
product2uom = {}
uom_ids = []
for product in self.read(cr, uid, ids, ['uom_id'], context=context):
product2uom[product['id']] = product['uom_id'][0]
uom_ids.append(product['uom_id'][0])
# this will be a dictionary of the UoM resources we need for conversion purposes, by UoM id
uoms_o = {}
for uom in self.pool.get('product.uom').browse(cr, uid, uom_ids, context=context):
uoms_o[uom.id] = uom
results = []
results2 = []
from_date = context.get('from_date',False)
to_date = context.get('to_date',False)
date_str = False
date_values = False
where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]
if from_date and to_date:
date_str = "date>=%s and date<=%s"
where.append(tuple([from_date]))
where.append(tuple([to_date]))
elif from_date:
date_str = "date>=%s"
date_values = [from_date]
elif to_date:
date_str = "date<=%s"
date_values = [to_date]
if date_values:
where.append(tuple(date_values))
prodlot_id = context.get('prodlot_id', False)
prodlot_clause = ''
if prodlot_id:
prodlot_clause = ' and prodlot_id = %s '
where += [prodlot_id]
# TODO: perhaps merge in one query.
if 'in' in what:
# all moves from a location out of the set to a location in the set
cr.execute(
'select sum(product_qty), product_id, product_uom '\
'from stock_move '\
'where location_id NOT IN %s '\
'and location_dest_id IN %s '\
'and product_id IN %s '\
'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\
+ prodlot_clause +
'group by product_id,product_uom',tuple(where))
results = cr.fetchall()
if 'out' in what:
# all moves from a location in the set to a location out of the set
cr.execute(
'select sum(product_qty), product_id, product_uom '\
'from stock_move '\
'where location_id IN %s '\
'and location_dest_id NOT IN %s '\
'and product_id IN %s '\
'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\
+ prodlot_clause +
'group by product_id,product_uom',tuple(where))
results2 = cr.fetchall()
# Get the missing UoM resources
uom_obj = self.pool.get('product.uom')
uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)
if context.get('uom', False):
uoms += [context['uom']]
uoms = filter(lambda x: x not in uoms_o.keys(), uoms)
if uoms:
uoms = uom_obj.browse(cr, uid, list(set(uoms)), context=context)
for o in uoms:
uoms_o[o.id] = o
#TOCHECK: before change uom of product, stock move line are in old uom.
context.update({'raise-exception': False})
# Count the incoming quantities
for amount, prod_id, prod_uom in results:
amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,
uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)
res[prod_id] += amount
# Count the outgoing quantities
for amount, prod_id, prod_uom in results2:
amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,
uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)
res[prod_id] -= amount
return res
product_product() | [
"[email protected]"
] | |
9132da0e6d4babe5a123bb4b0ccc7a8bb1cb97e0 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/505.py | 1738dd39c2a21f537d1057b58316f37a1bfb3b13 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | #!/usr/bin/python3
from common import *
def testcase(x):
n, m = readintegers()
a = [0] * n
for i in range(n):
a[i] = readintegers()
row_maxes = [0] * n
col_maxes = [0] * m
for i in range(n):
for j in range(m):
if a[i][j] > row_maxes[i]:
row_maxes[i] = a[i][j]
if a[i][j] > col_maxes[j]:
col_maxes[j] = a[i][j]
possible = True
for i in range(n):
for j in range(m):
if a[i][j] < row_maxes[i] and a[i][j] < col_maxes[j]:
possible = False
if possible:
writeline("Case #%d: YES" % x)
else:
writeline("Case #%d: NO" % x)
run_tests(testcase)
| [
"[email protected]"
] | |
6801cdfd546e4d6267fe06cd6e03cbe17529a639 | bde607d5c75179861cd1bae62fa40861b984ee4b | /datalive/datalive_cust_veh/migrations/0038_insurance_insurance_accident_phone.py | 17b494fe3e8628616fe39944ae984d06b99775da | [] | no_license | simba999/Geofence-project | 1658f1473b1b2a554607596872448928c1ccac77 | 7c01b55ff0ff3537fd63ea10182b12c5e1f107fa | refs/heads/master | 2021-03-31T00:51:57.811563 | 2018-03-08T19:22:12 | 2018-03-08T19:22:12 | 124,434,299 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-12-12 21:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datalive_cust_veh', '0037_insurance_address'),
]
operations = [
migrations.AddField(
model_name='insurance',
name='insurance_accident_phone',
field=models.CharField(blank=True, max_length=14, null=True),
),
]
| [
"[email protected]"
] | |
718d72ed56398bf36ff79851a900d5fc1dc3117c | 1864af9eda58307024acbf7fe5d5f2f39f435e44 | /module_1/python/reverse_list.py | c8c318922bfd95015188f65af585098c10945cc5 | [] | no_license | vprusso/6-Weeks-to-Interview-Ready | c393bbfe071d97cba12f0f0668e53a25fb25986f | 8105e1b20bf450a03a9bb910f344fc140e5ba703 | refs/heads/master | 2021-08-11T04:48:34.252178 | 2020-08-09T22:54:55 | 2020-08-09T22:54:55 | 210,997,768 | 6 | 2 | null | 2019-09-26T04:12:44 | 2019-09-26T04:12:44 | null | UTF-8 | Python | false | false | 1,578 | py | """
Title: Reverse linked list.
Problem: Reverse a singly linked list.
Execution: python reverse_list.py
"""
import unittest
class ListNode:
"""Basic node class for linked list."""
def __init__(self, x):
self.val = x
self.next = None
def reverse_list_iterative(head: ListNode) -> ListNode:
"""Function for iteratively reversing singly linked list."""
prev = None
curr = head
while curr:
next_temp = curr.next
curr.next = prev
prev = curr
curr = next_temp
return prev
def reverse_list_recursive(head: ListNode) -> ListNode:
"""Function for recursively reversing singly linked list."""
if head is None or head.next is None:
return head
p = reverse_list_recursive(head.next)
head.next.next = head
head.next = None
return p
def print_list(head: ListNode) -> list:
"""Print linked list elements."""
output_list = []
while head:
output_list.append(head.val)
head = head.next
return output_list
class TestReverseList(unittest.TestCase):
"""Unit test for reverse list."""
def test_1(self):
"""Test for 1->2->3->4->5."""
input_1 = ListNode(1)
input_1.next = ListNode(2)
input_1.next.next = ListNode(3)
input_1.next.next.next = ListNode(4)
input_1.next.next.next.next = ListNode(5)
list_output_iterative_1 = print_list(reverse_list_iterative(input_1))
self.assertEqual(list_output_iterative_1, [5, 4, 3, 2, 1])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
831fa7afce258eaefe2d09b668885866549fd4c9 | 99aa9b2be5199bf1b2f670bc9bb1a5bc7cec1c89 | /BFS_topological/L207_Course_schedule.py | 5fdbab7f22cbf7d4e144ae1cf2db55a5f348e331 | [] | no_license | SimonFans/LeetCode | 5196e85dec886b18cb2350419a4a2ae3c751966c | 0a34a19bb0979d58b511822782098f62cd86b25e | refs/heads/master | 2023-02-08T00:49:30.916655 | 2023-01-31T06:32:32 | 2023-01-31T06:32:32 | 145,938,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,585 | py | There are a total of n courses you have to take, labeled from 0 to n-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
Example 1:
Input: 2, [[1,0]]
Output: true
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
Example 2:
Input: 2, [[1,0],[0,1]]
Output: false
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should
also have finished course 1. So it is impossible.
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
# initialize 0 for each course
inDegree=[0 for i in range(numCourses+1)]
# create a course & its pre-course mapping
Map={}
# loop to let all course that need pre-course +=1, others no need are 0
# Map find relationship, exp: 1->4, 2->[4,5]...
for i in range(len(prerequisites)):
inDegree[prerequisites[i][0]]+=1
if prerequisites[i][1] not in Map:
Map[prerequisites[i][1]]=[prerequisites[i][0]]
else:
Map[prerequisites[i][1]].append(prerequisites[i][0])
# find all courses not need pre-course, append to queue
queue=collections.deque()
for i in range(numCourses):
if inDegree[i]==0:
queue.append(i) #queue: [0,1,2]
#queue.popleft()
# BFS starts
while len(queue):
course=queue.popleft()
subcourses=Map.get(course,[]) # [4], [4,5]
for k in range(len(subcourses)):
if inDegree[subcourses[k]]!=0:
inDegree[subcourses[k]]-=1
if inDegree[subcourses[k]]==0:
queue.append(subcourses[k])
for i in range(numCourses+1):
if inDegree[i]!=0:
return False
else:
return True
| [
"[email protected]"
] | |
44deee25bd721ba3b1e5f31587692afa7f3bce16 | 3c73609eea12d6784ffc0be5acc6994cda19dc57 | /Codeforces Difficulty 500-700/749ABachgoldProb.py | 4cefb4124f4f1af645594d719bbdb2a464837732 | [] | no_license | TanveshT/Competitive-Programming | 0cf7a8ebc20a74cb6fd8505e67fbfec5bac6b8c2 | 47acc0a2af2711c86bb0da06e961677a8ec1e7d3 | refs/heads/master | 2022-12-19T01:44:46.033633 | 2020-09-25T06:57:23 | 2020-09-25T06:57:23 | 258,095,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | n = int(input())
result = ''
if n%2 == 0:
result += ' '.join(['2'] * (n//2))
else:
result += ' '.join(['2'] * ((n-2)//2))
result += ' 3'
print(n//2)
print(result) | [
"[email protected]"
] | |
7c68a69b658aa325154cbd035c1f844f20806ee6 | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/form/pytraj_Trajectory/to_molsysmt_Topology.py | 296fd312de31fa18840f59e7be11dc3efb032788 | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 379 | py | from molsysmt._private.digestion import digest
@digest(form='pytraj.Trajectory')
def to_molsysmt_Topology(item, atom_indices='all'):
from . import to_pytraj_Topology
from ..pytraj_Topology import to_molsysmt_Topology
tmp_item = to_pytraj_Topology(item)
tmp_item = pytraj_Topology_to_molsysmt_Topology(tmp_item, atom_indices=atom_indices)
return tmp_item
| [
"[email protected]"
] | |
8c7a3e6b0fb2244bf9a051a5e0fd52af9b05a262 | 3449e5511dc8da19fc841af767dbe8d216e26ffb | /mmServer/api/views/transaction.py | d862b5b36594d84932e91b3afcccfc5f6ca247cd | [] | no_license | erikwestra/mm-server | 8ba2af0ee7acd372949589b6f8d429099a38ea58 | bead1ad439541211e33fdc60264a869f18a99ae9 | refs/heads/master | 2021-01-10T21:14:23.636707 | 2015-05-27T21:22:54 | 2015-05-27T21:22:54 | 28,573,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,226 | py | """ mmServer.api.views.transaction
This module implements the "transaction endpoint for the mmServer.api
application.
"""
import logging
from django.http import *
from django.views.decorators.csrf import csrf_exempt
from django.db.models import Q
from django.conf import settings
from django.utils import timezone
import simplejson as json
from mmServer.shared.models import *
from mmServer.shared.lib import utils, transactionHandler, encryption
from mmServer.shared.lib import rippleInterface
#############################################################################
logger = logging.getLogger(__name__)
#############################################################################
@csrf_exempt
def endpoint(request):
""" Respond to the "/api/transaction" endpoint.
This view function simply selects an appropriate handler based on the
HTTP method.
"""
try:
if request.method == "GET":
return transaction_GET(request)
elif request.method == "POST":
return transaction_POST(request)
else:
return HttpResponseNotAllowed(["GET", "POST"])
except:
return utils.exception_response()
#############################################################################
def transaction_GET(request):
""" Respond to the "GET /api/transaction" API request.
This is used to check the current status of a pending transaction.
"""
if not utils.has_hmac_headers(request):
return HttpResponseForbidden()
# Get the request parameters.
if "global_id" in request.GET:
global_id = request.GET['global_id']
else:
return HttpResponseBadRequest("Missing 'global_id' parameter.")
if "transaction_id" in request.GET:
transaction_id = request.GET['transaction_id']
else:
return HttpResponseBadRequest("Missing 'transaction_id' parameter.")
# Get the user's profile, and check the HMAC authentication details.
try:
profile = Profile.objects.get(global_id=global_id)
except Profile.DoesNotExist:
return HttpResponseBadRequest("There is no profile for that global ID")
if not utils.check_hmac_authentication(request, profile.account_secret):
return HttpResponseForbidden()
# Get the desired Transaction record.
try:
transaction = Transaction.objects.get(id=transaction_id)
except Transaction.DoesNotExist:
return HttpResponseNotFound()
# Check that this user is the creator of this transaction.
if transaction.created_by.global_id != global_id:
return HttpResponseBadRequest("You didn't create this transaction!")
# If the transaction is pending, ask the Ripple network for the current
# transaction status.
cur_status = transaction.status
if cur_status == Transaction.STATUS_PENDING:
transactionHandler.check_pending_ripple_transaction(transaction)
# If the transaction went through, update the affected account balances.
if (cur_status == Transaction.STATUS_PENDING and
transaction.status == Transaction.STATUS_SUCCESS):
transactionHandler.update_account_balance(transaction.debit_account)
transactionHandler.update_account_balance(transaction.credit_account)
# Calculate the response to send back to the caller.
response = {'status' : Transaction.STATUS_MAP[transaction.status]}
if transaction.status == Transaction.STATUS_FAILED:
if transaction.error != None:
response['error'] = transaction.error
# Finally, send back the response.
return HttpResponse(json.dumps(response),
mimetype="application/json")
#############################################################################
def transaction_POST(request):
""" Respond to the "POST /api/transaction" API request.
This is used to submit a transaction for processing.
"""
if not utils.has_hmac_headers(request):
return HttpResponseForbidden()
# Get the request parameters from the body of our request.
if request.META['CONTENT_TYPE'] != "application/json":
return HttpResponseBadRequest()
params = json.loads(request.body)
if "global_id" in params:
global_id = params['global_id']
else:
return HttpResponseBadRequest("Missing 'global_id' value.")
if "ripple_account" in params:
ripple_account = params['ripple_account']
else:
return HttpResponseBadRequest("Missing 'ripple_account' value.")
if "type" in params:
if params['type'] == "DEPOSIT":
trans_type = Transaction.TYPE_DEPOSIT
elif params['type'] == "WITHDRAWAL":
trans_type = Transaction.TYPE_WITHDRAWAL
else:
return HttpResponseBadRequest("Invalid 'type' value.")
else:
return HttpResponseBadRequest("Missing 'type' value.")
if "amount" in params:
try:
amount_in_drops = int(params['amount'])
except ValueError:
return HttpResponseBadRequest("Invalid 'amount' value.")
else:
return HttpResponseBadRequest("Missing 'amount' value.")
if "description" in params:
description = params['description']
else:
description = None
# Get the user's profile, and check the HMAC authentication details.
try:
profile = Profile.objects.get(global_id=global_id)
except Profile.DoesNotExist:
return HttpResponseBadRequest("There is no profile for that global ID")
if not utils.check_hmac_authentication(request, profile.account_secret):
return HttpResponseForbidden()
# Get the user's Account record. If it doesn't exist, create one.
try:
account = Account.objects.get(type=Account.TYPE_USER,
global_id=global_id)
except Account.DoesNotExist:
account = Account()
account.type = Account.TYPE_USER
account.global_id = global_id
account.balance_in_drops = 0
account.save()
# Get the MessageMe Ripple Holding Account record, creating it if it
# doesn't exist.
try:
holding_account = Account.objects.get(
type=Account.TYPE_RIPPLE_HOLDING)
except Account.DoesNotExist:
holding_account = Account()
holding_account.global_id = None
holding_account.type = Account.TYPE_RIPPLE_HOLDING
holding_account.balance_in_drops = 0
holding_account.save()
# Create the Transaction record for this transaction. Note that we assume
# that the transaction will be pending, but may change this if an error
# occurs.
transaction = Transaction()
transaction.timestamp = timezone.now()
transaction.created_by = account
transaction.status = Transaction.STATUS_PENDING #initially
transaction.type = trans_type
transaction.amount_in_drops = amount_in_drops
transaction.ripple_transaction_hash = None # initially
transaction.message = None
transaction.description = description
transaction.error = None # initially
error = None # initially.
if trans_type == Transaction.TYPE_DEPOSIT:
# A deposit transfers the funds from the Ripple holding account to the
# user's MessageMe account.
transaction.debit_account = holding_account
transaction.credit_account = account
# The user is making a deposit -> create a Ripple transaction to
# transfer the funds into the MessageMe Ripple Holding Account. We
# don't actually credit the user's MessageMe account until this
# transaction has been confirmed in the Ripple ledger.
ripple_transaction = {
'TransactionType' : "Payment",
'Account' : ripple_account,
'Destination' : settings.RIPPLE_HOLDING_ACCOUNT,
'Amount' : str(amount_in_drops)
}
# Ask the Ripple network to sign our transaction, using the user's
# account secret.
response = rippleInterface.request("sign",
tx_json=ripple_transaction,
secret=profile.account_secret,
fee_mult_max=1000000)
if response == None:
error = "Ripple server failed to respond when signing " \
+ "the transaction"
elif response['status'] != "success":
error = "Ripple server error signing transaction: %s" \
% response['error']
# Now attempt to submit the transaction to the Ripple ledger.
if error == None:
tx_blob = response['result']['tx_blob']
response = rippleInterface.request("submit",
tx_blob=tx_blob,
fail_hard=True)
if response == None:
error = "Ripple server failed to respond when submitting " \
+ "transaction"
elif response['status'] != "success":
error = "Ripple server error submittting transaction: " \
+ response['error']
if error == None:
transaction.ripple_transaction_hash = \
response['result']['tx_json']['hash']
elif trans_type == Transaction.TYPE_WITHDRAWAL:
# The user is attempting to withdraw some funds from their MessageMe
# account. In this case, we only allow the withdrawal if there are
# sufficient funds in their account. We debit the user's MessageMe
# account right away, and then credit the funds again if the Ripple
# transaction fails.
if account.balance_in_drops < amount_in_drops:
error = "Insufficient funds"
if error == None:
# A withdrawal transfers the funds from the user's MessageMe
# account back to the Ripple holding account.
transaction.debit_account = account
transaction.credit_account = holding_account
# Attempt to create a Ripple transaction transferring the funds
# from the MessageMe Ripple Holding Account, back into the user's
# Ripple account.
ripple_transaction = {
'TransactionType' : "Payment",
'Account' : settings.RIPPLE_HOLDING_ACCOUNT,
'Destination' : ripple_account,
'Amount' : str(amount_in_drops)
}
# Ask the Ripple network to sign our transaction, using the user's
# account secret.
holding_account_secret = settings.RIPPLE_HOLDING_ACCOUNT_SECRET
response = rippleInterface.request("sign",
tx_json=ripple_transaction,
secret=holding_account_secret,
fee_mult_max=1000000)
if response == None:
error = "Ripple server failed to respond when signing " \
+ "the transaction"
elif response['status'] != "success":
error = "Ripple server error signing transaction: %s" \
% response['error']
# Now attempt to submit the transaction to the Ripple ledger.
if error == None:
tx_blob = response['result']['tx_blob']
response = rippleInterface.request("submit",
tx_blob=tx_blob,
fail_hard=True)
if response == None:
error = "Ripple server failed to respond when " \
+ "submitting transaction"
elif response['status'] != "success":
error = "Ripple server error submittting transaction: " \
+ response['error']
if error == None:
transaction.ripple_transaction_hash = \
response['result']['tx_json']['hash']
# If our attempt to sign and submit the Ripple transaction failed, mark
# our internal transaction as having failed right away.
if error != None:
transaction.status = Transaction.STATUS_FAILED
transaction.error = error
# Save our transaction, and get the internal ID for the transaction.
transaction.save()
transaction_id = transaction.id
# Finally, return our response back to the caller.
response = {
'status' : Transaction.STATUS_MAP[transaction.status],
'transaction_id' : transaction_id
}
if transaction.status == Transaction.STATUS_FAILED:
response['error'] = transaction.error
return HttpResponse(json.dumps(response),
mimetype="application/json")
| [
"[email protected]"
] | |
26f76611c215781c36e6cc4a244cdd1c6af2e16b | a8e2c66b3ebadfc17ee9aee197b3f466534cee16 | /ytn11/tp/tp/middlewares.py | 1fc214e18a4f61ea17d33486d46c0fa18bf80064 | [] | no_license | yintiannong/98kar | 49b6db186a4543a7c50671df990bb491846c1a98 | 3863529f57e9d2d9bc1bdf8188916e25ad289db0 | refs/heads/master | 2022-01-07T05:49:31.566453 | 2019-05-22T07:04:45 | 2019-05-22T07:04:45 | 187,794,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,589 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class TpSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnโt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TpDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
ad3bccd5f5c0938d4ef0c702293960cec7e513ff | 45bd50e1e63346f77a46c2a75d9bda8c19fe97de | /Basic Concept Zoning World Prelim 2 2019/Test Scripts/Zoning/ZoningShow.py | fad8d27b7d260c9f54284db7440e4057d7a3785f | [] | no_license | Hackin7/Space-Through-CoSpace | 40a5d8fad18c392dc3f2ae6f3f83f8ae290166a8 | d93541e8f0cd8b331ca8a179617ca4204ab43e3b | refs/heads/master | 2022-11-13T01:35:49.052934 | 2020-06-29T08:54:36 | 2020-06-29T08:54:36 | 222,223,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | from tkinter import *
from tkinter.filedialog import askopenfilename
from PIL import Image
from PIL import ImageTk
from PIL import ImageGrab
from ctypes import windll #Makes app DPI Aware
user32 = windll.user32
user32.SetProcessDPIAware()
###Drawing Path
text_ids=[]
import sys,os
from sys import stdin, stdout
sys.path.append('../Python_Libraries/')
from ImageCalculation import *
#from PreprocessedMap import data
from hppToMap import readMapData
data = readMapData("../../World2/w2map.hpp")
def imageObj(im, pixels):
print(stack)
lines = cppInput(stack[0],stack[1],stack[2],stack[3])
coordinates = inputToCoordinates(lines)
newMap = data#switchYValues(mapData(im.size, pixels))
newImg, newPixels = convertBack(newMap)
addCoordinates(coordinates, newPixels,(100,100,0))
return newImg
if __name__ == "__main__":
root = Tk()
im, pixels = convertBack(data)
widths, heights = im.size
img=ImageTk.PhotoImage(im)
#setting up a tkinter canvas with scrollbars
frame = Frame(root, bd=2, relief=SUNKEN,height=heights,width=widths)
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
canvas = Canvas(frame, bd=0)
canvas.grid(row=0, column=0, sticky=N+S+E+W)
frame.pack(fill=BOTH,expand=1)
#adding the image
canvasImage = canvas.create_image(0,0,image=img,anchor="nw")
# canvas.config(scrollregion=canvas.bbox(ALL))
zoneMapping = {}
zones = []
with open("zone.txt") as f:
for line in f:
if " " in line:
x,y,node=map(int,line.split())
zoneMapping[x,y] = node
x = x/(360/widths)
y = (heights-y)/(270/heights)
if node not in zones:
zones.append(node)
text_id = canvas.create_text(x-15,y-10,text=str(node),font = "Times 20 bold")
print(zones)
with open("../../World2/zones.hpp", "w") as f:
w, h = 360,270
zoneData = [[0 for j in range(h)] for i in range(w)]
for i in range(w):
for j in range(h):
try:
zoneData[i][j] = zoneMapping[i,j]
except: pass#print("failed:",i,j)
print(len(zoneData), len(zoneData[0]))
f.write(f"int zones[width][height] = {str(zoneData).replace('[','{').replace(']','}')};")
#mouseclick event
def printZone(event):
x = round(event.x/(widths/360))
y = round((heights-event.y)/(heights/270))
print("Zone No:",zoneMapping[x,y])
pass
canvas.bind("<Button 1>",printZone)
root.mainloop()
| [
"[email protected]"
] | |
257d50fce51c9ec884492fd4f1d6d0b538eceef6 | 99078b5d47a6b9bb476bdf27cd466a4fb1e78538 | /src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_template_builder.py | af4017fec40e0af9590fe3545f07422bc14fe51f | [
"MIT"
] | permissive | enterstudio/azure-cli | 949c54464fbce7ff596665c6bcc3d4aeefe54b53 | b0504c3b634e17f1afc944a9572864a40da6bc18 | refs/heads/master | 2023-07-14T11:09:45.925689 | 2017-02-03T18:26:09 | 2017-02-03T18:26:09 | 81,024,949 | 0 | 0 | NOASSERTION | 2023-07-02T22:24:59 | 2017-02-05T21:53:49 | Python | UTF-8 | Python | false | false | 17,448 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-arguments
from collections import OrderedDict
import json
from enum import Enum
from azure.cli.core._util import CLIError
class ArmTemplateBuilder(object):
def __init__(self):
template = OrderedDict()
template['$schema'] = \
'https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#'
template['contentVersion'] = '1.0.0.0'
template['parameters'] = {}
template['variables'] = {}
template['resources'] = []
template['outputs'] = {}
self.template = template
def add_resource(self, resource):
self.template['resources'].append(resource)
def add_variable(self, key, value):
self.template['variables'][key] = value
def add_parameter(self, key, value):
self.template['parameters'][key] = value
def add_id_output(self, key, provider, property_type, property_name):
new_output = {
key: {
'type': 'string',
'value': "[resourceId('{}/{}', '{}')]".format(
provider, property_type, property_name)
}
}
self.template['outputs'].update(new_output)
def add_output(self, key, property_name, provider=None, property_type=None,
output_type='string', path=None):
if provider and property_type:
value = "[reference(resourceId('{provider}/{type}', '{property}'),providers('{provider}', '{type}').apiVersions[0])".format( # pylint: disable=line-too-long
provider=provider, type=property_type, property=property_name)
else:
value = "[reference('{}')".format(property_name)
value = '{}.{}]'.format(value, path) if path else '{}]'.format(value)
new_output = {
key: {
'type': output_type,
'value': value
}
}
self.template['outputs'].update(new_output)
def build(self):
return json.loads(json.dumps(self.template))
# pylint: disable=too-few-public-methods
class StorageProfile(Enum):
SAPirImage = 1
SACustomImage = 2
def build_deployment_resource(name, template, dependencies=None):
from azure.cli.command_modules.vm._vm_utils import random_string
dependencies = dependencies or []
deployment = {
'name': name,
'type': 'Microsoft.Resources/deployments',
'apiVersion': '2015-01-01',
'dependsOn': dependencies,
'properties': {
'mode': 'Incremental',
'template': template,
}
}
return deployment
def build_output_deployment_resource(key, property_name, property_provider, property_type,
parent_name=None, output_type='object', path=None):
from azure.cli.command_modules.vm._vm_utils import random_string
output_tb = ArmTemplateBuilder()
output_tb.add_output(key, property_name, property_provider, property_type,
output_type=output_type, path=path)
output_template = output_tb.build()
deployment_name = '{}_{}'.format(property_name, random_string(16))
deployment = {
'name': deployment_name,
'type': 'Microsoft.Resources/deployments',
'apiVersion': '2015-01-01',
'properties': {
'mode': 'Incremental',
'template': output_template,
}
}
deployment['dependsOn'] = [] if not parent_name \
else ['Microsoft.Resources/deployments/{}'.format(parent_name)]
return deployment
def build_storage_account_resource(name, location, tags, sku):
storage_account = {
'type': 'Microsoft.Storage/storageAccounts',
'name': name,
'apiVersion': '2015-06-15',
'location': location,
'tags': tags,
'dependsOn': [],
'properties': {'accountType': sku}
}
return storage_account
def build_public_ip_resource(name, location, tags, address_allocation, dns_name=None):
public_ip_properties = {'publicIPAllocationMethod': address_allocation}
if dns_name:
public_ip_properties['dnsSettings'] = {'domainNameLabel': dns_name}
public_ip = {
'apiVersion': '2015-06-15',
'type': 'Microsoft.Network/publicIPAddresses',
'name': name,
'location': location,
'tags': tags,
'dependsOn': [],
'properties': public_ip_properties
}
return public_ip
def build_nic_resource(name, location, tags, vm_name, subnet_id, private_ip_address=None,
nsg_id=None, public_ip_id=None):
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
ip_config_properties = {
'privateIPAllocationMethod': private_ip_allocation,
'subnet': {'id': subnet_id}
}
if private_ip_address:
ip_config_properties['privateIPAddress'] = private_ip_address
if public_ip_id:
ip_config_properties['publicIPAddress'] = {'id': public_ip_id}
nic_properties = {
'ipConfigurations': [
{
'name': 'ipconfig{}'.format(vm_name),
'properties': ip_config_properties
}
]
}
if nsg_id:
nic_properties['networkSecurityGroup'] = {'id': nsg_id}
nic = {
'apiVersion': '2015-06-15',
'type': 'Microsoft.Network/networkInterfaces',
'name': name,
'location': location,
'tags': tags,
'dependsOn': [],
'properties': nic_properties
}
return nic
def build_nsg_resource(name, location, tags, nsg_rule_type):
rule_name = 'rdp' if nsg_rule_type == 'rdp' else 'default-allow-ssh'
rule_dest_port = '3389' if nsg_rule_type == 'rdp' else '22'
nsg_properties = {
'securityRules': [
{
'name': rule_name,
'properties': {
'protocol': 'Tcp',
'sourcePortRange': '*',
'destinationPortRange': rule_dest_port,
'sourceAddressPrefix': '*',
'destinationAddressPrefix': '*',
'access': 'Allow',
'priority': 1000,
'direction': 'Inbound'
}
}
]
}
nsg = {
'type': 'Microsoft.Network/networkSecurityGroups',
'name': name,
'apiVersion': '2015-06-15',
'location': location,
'tags': tags,
'dependsOn': [],
'properties': nsg_properties
}
return nsg
def build_vnet_resource(name, location, tags, vnet_prefix=None, subnet=None,
subnet_prefix=None, dns_servers=None):
vnet = {
'name': name,
'type': 'Microsoft.Network/virtualNetworks',
'location': location,
'apiVersion': '2015-06-15',
'dependsOn': [],
'tags': tags,
'properties': {
'addressSpace': {'addressPrefixes': [vnet_prefix]},
}
}
if dns_servers:
vnet['properties']['dhcpOptions'] = {
'dnsServers': dns_servers
}
if subnet:
vnet['properties']['subnets'] = [{
'name': subnet,
'properties': {
'addressPrefix': subnet_prefix
}
}]
return vnet
def build_vm_resource(
name, location, tags, size, storage_profile, nics, admin_username,
availability_set_id=None, admin_password=None, ssh_key_value=None, ssh_key_path=None,
image_reference=None, os_disk_name=None, custom_image_os_type=None, storage_caching=None,
os_publisher=None, os_offer=None, os_sku=None, os_version=None,
os_vhd_uri=None):
def _build_os_profile():
os_profile = {
'computerName': name,
'adminUsername': admin_username,
}
if admin_password:
os_profile['adminPassword'] = admin_password
if ssh_key_value and ssh_key_path:
os_profile['linuxConfiguration'] = {
'disablePasswordAuthentication': True,
'ssh': {
'publicKeys': [
{
'keyData': ssh_key_value,
'path': ssh_key_path
}
]
}
}
return os_profile
def _build_storage_profile():
storage_profiles = {
'SACustomImage': {
'osDisk': {
'createOption': 'fromImage',
'name': os_disk_name,
'caching': storage_caching,
'osType': custom_image_os_type,
'image': {'uri': image_reference},
'vhd': {'uri': os_vhd_uri}
}
},
'SAPirImage': {
'osDisk': {
'createOption': 'fromImage',
'name': os_disk_name,
'caching': storage_caching,
'vhd': {'uri': os_vhd_uri}
},
'imageReference': {
'publisher': os_publisher,
'offer': os_offer,
'sku': os_sku,
'version': os_version
}
}
}
return storage_profiles[storage_profile.name]
vm_properties = {
'hardwareProfile': {'vmSize': size},
'networkProfile': {'networkInterfaces': nics}
}
vm_properties['storageProfile'] = _build_storage_profile()
if availability_set_id:
vm_properties['availabilitySet'] = {'id': availability_set_id}
vm_properties['osProfile'] = _build_os_profile()
vm = {
'apiVersion': '2015-06-15',
'type': 'Microsoft.Compute/virtualMachines',
'name': name,
'location': location,
'tags': tags,
'dependsOn': [],
'properties': vm_properties,
}
return vm
def build_load_balancer_inbound_nat_rules_resource(lb_name, location, backend_port, instance_count,
frontend_ip_name):
lb_id = "resourceId('Microsoft.Network/loadBalancers', '{}')".format(lb_name)
nat_rule_properties = {
'frontendIPConfiguration': {
'id': "[concat({}, '/frontendIPConfigurations/', '{}')]".format(lb_id, frontend_ip_name)
},
'protocol': 'tcp',
'frontendPort': "[copyIndex(50000)]",
'backendPort': backend_port,
'enableFloatingIP': False
}
nat_rules = {
'apiVersion': '2015-06-15',
'type': 'Microsoft.Network/loadBalancers/inboundNatRules',
'name': "[concat('{}', '/', 'NAT-RULE', copyIndex())]".format(lb_name),
'location': location,
'copy': {
'name': 'lbNatLoop',
'count': instance_count
},
'dependsOn': [
'Microsoft.Network/loadBalancers/{}'.format(lb_name)
],
'properties': nat_rule_properties
}
return nat_rules
def build_load_balancer_resource(name, location, tags, backend_pool_name, nat_pool_name,
backend_port, frontend_ip_name, public_ip_id, subnet_id,
private_ip_address='', private_ip_allocation='dynamic'):
lb_id = "resourceId('Microsoft.Network/loadBalancers', '{}')".format(name)
frontend_ip_config = {
'name': frontend_ip_name
}
if public_ip_id:
frontend_ip_config.update({
'properties': {
'publicIPAddress': {
'id': public_ip_id
}
}
})
else:
frontend_ip_config.update({
'properties': {
'privateIPAllocationMethod': private_ip_allocation,
'privateIPAddress': private_ip_address,
'subnet': {
'id': subnet_id
}
}
})
lb_properties = {
'backendAddressPools': [
{
'name': backend_pool_name
}
],
'inboundNatPools': [
{
'name': nat_pool_name,
'properties': {
'frontendIPConfiguration': {
'id': "[concat({}, '/frontendIPConfigurations/', '{}')]".format(
lb_id, frontend_ip_name)
},
'protocol': 'tcp',
'frontendPortRangeStart': '50000',
'frontendPortRangeEnd': '50119',
'backendPort': backend_port
}
}
],
'frontendIPConfigurations': [frontend_ip_config]
}
lb = {
"type": "Microsoft.Network/loadBalancers",
"name": name,
"location": location,
"tags": tags,
"apiVersion": "2015-06-15",
"dependsOn": [],
"properties": lb_properties
}
return lb
def build_vmss_storage_account_pool_resource(loop_name, location, tags, storage_sku):
storage_resource = {
'type': 'Microsoft.Storage/storageAccounts',
'name': "[variables('storageAccountNames')[copyIndex()]]",
'location': location,
'tags': tags,
'apiVersion': '2015-06-15',
'copy': {
'name': loop_name,
'count': 5
},
'properties': {
'accountType': storage_sku
}
}
return storage_resource
# pylint: disable=too-many-locals
def build_vmss_resource(name, naming_prefix, location, tags, overprovision, upgrade_policy_mode,
vm_sku, instance_count, ip_config_name, nic_name, subnet_id,
admin_username, authentication_type,
storage_profile, os_disk_name, storage_caching, os_type,
image=None, admin_password=None, ssh_key_value=None, ssh_key_path=None,
os_publisher=None, os_offer=None, os_sku=None, os_version=None,
backend_address_pool_id=None, inbound_nat_pool_id=None):
# Build IP configuration
ip_configuration = {
'name': ip_config_name,
'properties': {
'subnet': {'id': subnet_id}
}
}
if backend_address_pool_id:
ip_configuration['properties']['loadBalancerBackendAddressPools'] = [
{'id': backend_address_pool_id}
]
if inbound_nat_pool_id:
ip_configuration['properties']['loadBalancerInboundNatPools'] = [
{'id': inbound_nat_pool_id}
]
# Build storage profile
storage_properties = {
'osDisk': {
'name': os_disk_name,
'caching': storage_caching,
'createOption': 'FromImage',
}
}
if storage_profile == StorageProfile.SACustomImage:
storage_properties['osDisk'].update({
'osType': os_type,
'image': {
'uri': image
}
})
elif storage_profile == StorageProfile.SAPirImage:
storage_properties['osDisk']['vhdContainers'] = "[variables('vhdContainers')]"
else:
raise CLIError('Unsupported storage profile.')
if storage_profile in [StorageProfile.SAPirImage]:
storage_properties['imageReference'] = {
'publisher': os_publisher,
'offer': os_offer,
'sku': os_sku,
'version': os_version
}
# Build OS Profile
os_profile = {
'computerNamePrefix': naming_prefix,
'adminUsername': admin_username
}
if authentication_type == 'password':
os_profile['adminPassword'] = admin_password
else:
os_profile['linuxConfiguration'] = {
'disablePasswordAuthentication': True,
'ssh': {
'publicKeys': [
{
'path': ssh_key_path,
'keyData': ssh_key_value
}
]
}
}
# Build VMSS
vmss_properties = {
'overprovision': overprovision,
'upgradePolicy': {
'mode': upgrade_policy_mode
},
'virtualMachineProfile': {
'storageProfile': storage_properties,
'osProfile': os_profile,
'networkProfile': {
'networkInterfaceConfigurations': [{
'name': nic_name,
'properties': {
'primary': 'true',
'ipConfigurations': [ip_configuration]
}
}]
}
}
}
vmss = {
'type': 'Microsoft.Compute/virtualMachineScaleSets',
'name': name,
'location': location,
'tags': tags,
'apiVersion': '2016-03-30',
'dependsOn': [],
'sku': {
'name': vm_sku,
'tier': 'Standard',
'capacity': instance_count
},
'properties': vmss_properties
}
return vmss
| [
"[email protected]"
] | |
cb7e7df7d3e7efa91123a54b0420d67565acae87 | 22d84f804271a629a395cec785b7eb4b47f72f36 | /eelbrain/data/tests/test_data.py | 298853b71bf4ac8d11837e8d1fed7bf42b3e0240 | [] | no_license | imclab/Eelbrain | bb787294218a2ba00f90f447af0e629abadeac88 | e52eb3a5bd8bf8fc9aece2fb4413e0286e080c46 | refs/heads/master | 2021-01-21T23:34:15.673295 | 2014-05-02T19:17:04 | 2014-05-02T19:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,466 | py | '''
Created on Dec 2, 2012
@author: christian
'''
import os
import cPickle as pickle
import shutil
import tempfile
import mne
from nose.tools import (assert_equal, assert_is_instance, assert_true, eq_,
ok_, assert_raises)
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from eelbrain.data import datasets, Var, Factor, Dataset, Celltable, load
from eelbrain.data.data_obj import (align, align1, asvar, combine, isdatalist,
isndvar, isvar, isuv, SourceSpace, UTS)
from eelbrain.data.stats import rms
def assert_dataset_equal(ds1, ds2, msg="Datasets unequal", decimal=None):
"""
Raise an assertion if two Datasets are not equal up to desired precision.
Parameters
----------
ds1, ds2 : Dataset
Datasets to compare.
msg : str
Prefix of the error message to be printed in case of failure.
decimal : None | int
Desired precision (default is exact match).
"""
assert_equal(ds1.keys(), ds2.keys(), "%s: different keys" % msg)
for k in ds1.keys():
assert_dataobj_equal(ds1[k], ds2[k], msg=msg, decimal=decimal)
assert_equal(ds1.info.keys(), ds2.info.keys(), "%s: keys in info" % msg)
def assert_dataobj_equal(d1, d2, msg="Data-objects unequal", decimal=None):
"""
Raise an assertion if two data-objects are not equal up to desired
precision.
Parameters
----------
ds1, ds2 : data-objects
Data-objects to compare.
msg : str
Prefix of the error message to be printed in case of failure.
decimal : None | int
Desired precision (default is exact match).
"""
msg = "%s:" % msg
assert_equal(d1.name, d2.name, "%s unequal names (%r vs %r"
")" % (msg, d1.name, d2.name))
msg += ' %r have' % d1.name
assert_equal(len(d1), len(d2), "%s unequal length" % msg)
if isvar(d1) and decimal:
assert_array_almost_equal(d1.x, d2.x, decimal)
elif isuv(d1):
assert_true(np.all(d1 == d2), "%s unequal values: %r vs "
"%r" % (msg, d1, d2))
elif isndvar(d1):
assert_true(np.all(d1.x == d2.x), "%s unequal values" % msg)
elif isdatalist(d1):
for i in xrange(len(d1)):
assert_equal(d1[i], d2[i], "%s unequal values" % msg)
def assert_source_space_equal(src1, src2, msg="SourceSpace Dimension objects "
"unequal"):
"""
Raise an assertion if two SourceSpace objects are not equal up to desired
precision.
Parameters
----------
src1, src2 : SourceSpace objects
SourceSpace objects to compare.
msg : str
Prefix of the error message to be printed in case of failure.
"""
msg = "%s:" % msg
assert_array_equal(src1.vertno[0], src2.vertno[0], "%s unequal lh vertno "
"(%r vs %r)" % (msg, src1.vertno[0], src2.vertno[0]))
assert_array_equal(src1.vertno[1], src2.vertno[1], "%s unequal rh vertno "
"(%r vs %r)" % (msg, src1.vertno[1], src2.vertno[1]))
assert_equal(src1.subject, src2.subject, "%s unequal subject (%r vs %r"
")" % (msg, src1.subject, src2.subject))
assert_equal(src1.src, src2.src, "%s unequal names (%r vs %r"
")" % (msg, src1.src, src2.src))
assert_equal(src1.subjects_dir, src2.subjects_dir, "%s unequal names (%r "
"vs %r)" % (msg, src1.subjects_dir, src2.subjects_dir))
def test_print():
"Run the string representation methods"
ds = datasets.get_rand()
print ds
print repr(ds)
A = ds['A']
print A
print repr(A)
Y = ds['Y']
print Y
print repr(Y)
Ynd = ds['uts']
print Ynd
print repr(Ynd)
def test_aggregate():
"Test aggregation methods"
ds = datasets.get_rand()
# don't handle inconsistencies silently
assert_raises(ValueError, ds.aggregate, 'A%B')
dsa = ds.aggregate('A%B', drop_bad=True)
assert_array_equal(dsa['n'], [15, 15, 15, 15])
idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
assert_equal(dsa['Y', 0], ds['Y', idx1].mean())
# unequal cell counts
ds = ds[:-3]
dsa = ds.aggregate('A%B', drop_bad=True)
assert_array_equal(dsa['n'], [15, 15, 15, 12])
idx1 = ds.eval("logical_and(A=='a0', B=='b0')")
assert_equal(dsa['Y', 0], ds['Y', idx1].mean())
dsa = ds.aggregate('A%B', drop_bad=True, equal_count=True)
assert_array_equal(dsa['n'], [12, 12, 12, 12])
idx1_12 = np.logical_and(idx1, idx1.cumsum() <= 12)
assert_equal(dsa['Y', 0], ds['Y', idx1_12].mean())
def test_align():
"Testing align() and align1() functions"
ds = datasets.get_rand()
ds.index()
idx = np.arange(0, ds.n_cases, 4)
ds_sub = ds.sub(np.arange(0, ds.n_cases, 2))
dsa = align1(ds_sub, idx)
assert_array_equal(dsa['index'].x, idx, "align1() failure")
dsa1, dsa2 = align(dsa, ds_sub)
assert_array_equal(dsa1['index'].x, dsa2['index'].x, "align() failed")
def test_celltable():
"Test the Celltable class."
ds = datasets.get_rand()
ds['cat'] = Factor('abcd', rep=15)
ct = Celltable('Y', 'A', ds=ds)
eq_(ct.n_cases, 60)
eq_(ct.n_cells, 2)
ct = Celltable('Y', 'A', match='rm', ds=ds)
eq_(ct.n_cases, 30)
eq_(ct.n_cells, 2)
ct = Celltable('Y', 'cat', cat=('c', 'b'), ds=ds)
eq_(ct.n_cases, 30)
eq_(ct.X[0], 'c')
eq_(ct.X[-1], 'b')
ct = Celltable('Y', 'A', match='rm', ds=ds)
eq_(ct.n_cases, 30)
assert np.all(ct.groups['a0'] == ct.groups['a1'])
ct = Celltable('Y', 'cat', match='rm', cat=('c', 'b'), ds=ds)
eq_(ct.n_cases, 30)
eq_(ct.X[0], 'c')
eq_(ct.X[-1], 'b')
# coercion of numerical X
X = ds.eval("A == 'a0'")
ct = Celltable('Y', X, cat=(None, None), ds=ds)
assert_equal(('False', 'True'), ct.cat)
assert_array_equal(ct.data['True'], ds['Y', X])
ct = Celltable('Y', X, cat=(True, False), ds=ds)
assert_equal(('True', 'False'), ct.cat)
assert_array_equal(ct.data['True'], ds['Y', X])
# test coercion of Y
ct = Celltable(ds['Y'].x, 'A', ds=ds)
assert_is_instance(ct.Y, np.ndarray)
ct = Celltable(ds['Y'].x, 'A', ds=ds, coercion=asvar)
assert_is_instance(ct.Y, Var)
# test sub
ds_sub = ds.sub("A == 'a0'")
ct_sub = Celltable('Y', 'B', ds=ds_sub)
ct = Celltable('Y', 'B', sub="A == 'a0'", ds=ds)
assert_dataobj_equal(ct_sub.Y, ct.Y)
# test sub with rm
ct_sub = Celltable('Y', 'B', match='rm', ds=ds_sub)
ct = Celltable('Y', 'B', match='rm', sub="A == 'a0'", ds=ds)
assert_dataobj_equal(ct_sub.Y, ct.Y)
# test rm sorting
ds = Dataset()
ds['rm'] = Factor('abc', rep=4)
ds['Y'] = Var(np.arange(3.).repeat(4))
ds['X'] = Factor('ab', rep=2, tile=3)
idx = np.arange(12)
np.random.shuffle(idx)
ds = ds[idx]
ct = Celltable('Y', 'X', 'rm', ds=ds)
assert_array_equal(ct.match, Factor('abc', tile=2))
assert_array_equal(ct.Y, np.tile(np.arange(3.), 2))
assert_array_equal(ct.X, Factor('ab', rep=3))
def test_combine():
"Test combine()"
ds1 = datasets.get_rand()
ds2 = datasets.get_rand()
ds = combine((ds1, ds2))
assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Basic combine")
del ds1['Y']
del ds2['YCat']
ds = combine((ds1, ds2))
assert_array_equal(ds2['Y'].x, ds['Y'].x[ds1.n_cases:], "Combine with "
"missing Var")
assert_true(np.all(ds1['YCat'] == ds['YCat'][:ds1.n_cases]), "Combine "
"with missing Factor")
# combine NDVar with unequel dimensions
ds = datasets.get_rand(utsnd=True)
y = ds['utsnd']
y1 = y.sub(sensor=['0', '1', '2', '3'])
y2 = y.sub(sensor=['1', '2', '3', '4'])
ds1 = Dataset(y1)
ds2 = Dataset(y2)
dsc = combine((ds1, ds2))
y = dsc['utsnd']
assert_equal(y.sensor.names, ['1', '2', '3'], "Sensor dimension "
"intersection failed.")
dims = ('case', 'sensor', 'time')
ref = np.concatenate((y1.get_data(dims)[:, 1:], y2.get_data(dims)[:, :3]))
assert_array_equal(y.get_data(dims), ref, "combine utsnd")
def test_dataset_combining():
"Test Dataset combination methods"
ds = datasets.get_uv()
del ds['fltvar'], ds['intvar'], ds['A']
ds2 = datasets.get_uv()
del ds2['fltvar'], ds2['intvar']
ds.update(ds2)
assert_array_equal(ds['A'], ds2['A'])
ds2 = datasets.get_uv()
del ds2['fltvar'], ds2['intvar']
ds2['B'][5] = 'something_else'
del ds['A']
assert_raises(ValueError, ds.update, ds2)
def test_dataset_indexing():
"""Test Dataset indexing"""
ds = datasets.get_uv()
ds['C', :] = 'c'
ok_(np.all(ds.eval("C == 'c'")))
def test_dataset_sorting():
"Test Dataset sorting methods"
test_array = np.arange(10)
ds = Dataset()
ds['v'] = Var(test_array)
ds['f'] = Factor(test_array)
# shuffle the Dataset
rand_idx = test_array.copy()
np.random.shuffle(rand_idx)
ds_shuffled = ds[rand_idx]
# ascending, Var, copy
dsa = ds_shuffled.sorted('v')
assert_dataset_equal(dsa, ds, "Copy sorted by Var, ascending")
# descending, Factor, in-place
ds_shuffled.sort('f', descending=True)
assert_dataset_equal(ds_shuffled, ds[::-1], "In-place sorted by Factor, "
"descending")
def test_dim_uts():
"Test UTS Dimension"
uts = UTS(-0.1, 0.005, 301)
# make sure indexing rounds correctly for floats
for i, s in enumerate(np.arange(0, 1.4, 0.05)):
idx = uts.dimindex((-0.1 + s, s))
assert_equal(idx.start, 10 * i)
assert_equal(idx.stop, 20 + 10 * i)
def test_ndvar():
"Test the NDVar class"
ds = datasets.get_rand(utsnd=True)
x = ds['utsnd']
# slicing
assert_raises(KeyError, x.sub, sensor='5')
assert_equal(x.sub(sensor='4').ndim, 2)
assert_equal(x.sub(sensor=['4']).ndim, 3)
assert_equal(x.sub(case=1, sensor='4').ndim, 1)
# baseline correction
x_bl = x - x.summary(time=(None, 0))
# assert that the baseline is 0
bl = x_bl.summary('case', 'sensor', time=(None, 0))
ok_(abs(bl) < 1e-10, "Baseline correction")
# NDVar as index
sens_mean = x.mean(('case', 'time'))
idx = sens_mean > 0
pos = sens_mean[idx]
assert_array_equal(pos.x > 0, True)
def test_ndvar_summary_methods():
"Test NDVar methods for summarizing data over axes"
ds = datasets.get_rand(utsnd=True)
x = ds['utsnd']
dim = 'sensor'
axis = x.get_axis(dim)
dims = ('case', 'sensor')
axes = tuple(x.get_axis(d) for d in dims)
# numpy functions
assert_equal(x.any(), x.x.any())
assert_array_equal(x.any(dim), x.x.any(axis))
assert_array_equal(x.any(dims), x.x.any(axes))
assert_equal(x.max(), x.x.max())
assert_array_equal(x.max(dim), x.x.max(axis))
assert_array_equal(x.max(dims), x.x.max(axes))
assert_equal(x.mean(), x.x.mean())
assert_array_equal(x.mean(dim), x.x.mean(axis))
assert_array_equal(x.mean(dims), x.x.mean(axes))
assert_equal(x.min(), x.x.min())
assert_array_equal(x.min(dim), x.x.min(axis))
assert_array_equal(x.min(dims), x.x.min(axes))
assert_equal(x.std(), x.x.std())
assert_array_equal(x.std(dim), x.x.std(axis))
assert_array_equal(x.std(dims), x.x.std(axes))
# non-numpy
assert_equal(x.rms(), rms(x.x))
assert_array_equal(x.rms(dim), rms(x.x, axis))
assert_array_equal(x.rms(dims), rms(x.x, axes))
def test_io_pickle():
"Test io by pickling"
ds = datasets.get_rand()
ds.info['info'] = "Some very useful information about the Dataset"
tempdir = tempfile.mkdtemp()
try:
dest = os.path.join(tempdir, 'test.pickled')
with open(dest, 'wb') as fid:
pickle.dump(ds, fid, protocol=pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
ds2 = pickle.load(fid)
finally:
shutil.rmtree(tempdir)
assert_dataset_equal(ds, ds2)
def test_io_txt():
"Test Dataset io as text"
ds = datasets.get_uv()
# Var that has integer values as float
ds['intflt'] = ds.eval('intvar * 1.')
ds['intflt'].name = 'intflt'
# io test
tempdir = tempfile.mkdtemp()
try:
dest = os.path.join(tempdir, 'test.txt')
ds.save_txt(dest)
ds2 = load.tsv(dest)
finally:
shutil.rmtree(tempdir)
assert_dataset_equal(ds, ds2, decimal=6)
def test_source_space():
"Test SourceSpace Dimension"
subject = 'fsaverage'
data_path = mne.datasets.sample.data_path()
mri_sdir = os.path.join(data_path, 'subjects')
mri_dir = os.path.join(mri_sdir, subject)
src_path = os.path.join(mri_dir, 'bem', subject + '-ico-5-src.fif')
label_dir = os.path.join(mri_dir, 'label')
label_ba1 = mne.read_label(os.path.join(label_dir, 'lh.BA1.label'))
label_v1 = mne.read_label(os.path.join(label_dir, 'lh.V1.label'))
label_mt = mne.read_label(os.path.join(label_dir, 'lh.MT.label'))
label_ba1_v1 = label_ba1 + label_v1
label_v1_mt = label_v1 + label_mt
src = mne.read_source_spaces(src_path)
source = SourceSpace((src[0]['vertno'], src[1]['vertno']), subject,
'ico-5', mri_sdir)
index = source.dimindex(label_v1)
source_v1 = source[index]
index = source.dimindex(label_ba1_v1)
source_ba1_v1 = source[index]
index = source.dimindex(label_v1_mt)
source_v1_mt = source[index]
index = source_ba1_v1.dimindex(source_v1_mt)
source_v1_intersection = source_ba1_v1[index]
assert_source_space_equal(source_v1, source_v1_intersection)
# index from label
index = source.index_for_label(label_v1)
assert_array_equal(index.source[index.x].vertno[0],
np.intersect1d(source.lh_vertno, label_v1.vertices, 1))
# parcellation and cluster localization
if mne.__version__ < '0.8':
return
parc = mne.read_annot(subject, parc='aparc', subjects_dir=mri_sdir)
indexes = [source.index_for_label(label) for label in parc
if len(label) > 10]
x = np.vstack([index.x for index in indexes])
ds = source._cluster_properties(x)
for i in xrange(ds.n_cases):
assert_equal(ds[i, 'location'], parc[i].name)
def test_var():
"Test Var objects"
base = Factor('aabbcde')
Y = Var.from_dict(base, {'a': 5, 'e': 8}, default=0)
assert_array_equal(Y.x, [5, 5, 0, 0, 0, 0, 8])
| [
"[email protected]"
] | |
188553b42af704e15e28d627a6b689ddccdb9a8b | b391498124fdcaef989bf3ebafffb0df43e3e07f | /pygccxml-0.8.2/unittests/decl_string_tester.py | 2e539a882efae0042f2ee800be1a0a8f58ad8294 | [
"BSL-1.0"
] | permissive | glehmann/WrapITK-unstable | 9a0dd9d387ecd59c9439465dcc32cca552e14576 | 402fc668f1f3c3dd57d0751a61efa3b1625d238b | refs/heads/master | 2021-01-10T22:02:04.715926 | 2008-05-25T16:53:07 | 2008-05-25T16:53:07 | 3,272,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | # Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
import pygccxml
from pygccxml.utils import *
from pygccxml.parser import *
from pygccxml.declarations import *
class tester_t( parser_test_case.parser_test_case_t ):
COMPILATION_MODE = COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args ):
parser_test_case.parser_test_case_t.__init__( self, *args )
self.header = os.path.join( autoconfig.data_directory, 'declarations_calldef.hpp' )
self.template = """
//test generated declaration string using gcc(xml) compiler
#include "declarations_calldef.hpp"
void test_generated_decl_string( %s );
"""
def test_member_function(self):
declarations = parse( [self.header], self.config )
member_inline_call = find_declaration( declarations, type=member_function_t, name='member_inline_call' )
self.failUnless( member_inline_call, "unable to find 'member_inline_call' function" )
decls = parse_string( self.template % member_inline_call.decl_string, self.config )
self.failUnless( decls, "Created decl_string for member function containes mistake" )
def test_free_function(self):
declarations = parse( [self.header], self.config )
return_default_args = find_declaration( declarations, type=free_function_t, name='return_default_args' )
self.failUnless( return_default_args, "unable to find 'return_default_args' function" )
decls = parse_string( self.template % return_default_args.decl_string, self.config )
self.failUnless( decls, "Created decl_string for global function containes mistake" )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
| [
"[email protected]"
] | |
761792f87be2af9725838fd2e45005f8cbb7e3b7 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/mixed_precision/experimental/__init__.py | 557964bda8534e5d71a39df895b2a0954a2758d7 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a99d880d11e16bc170b4e38a2ed534d30a03cdcd4c9dc919e44db72fb592db6d
size 562
| [
"github@cuba12345"
] | github@cuba12345 |
2b6a3dd2306c6267bd3b4723905ad8e96699bc58 | 6a8d047b4502507c67120a0a32640c6a3e60d8a5 | /apps/profiles/migrations/0015_auto_20171128_2324.py | 318ad8f57eac31c224e44335024c2a8c50376817 | [] | no_license | dwebdevcore/BoardDirector_dashboard | 320f110d7581c065920b7607ef06a457851c4bb4 | 7cd2b2abe1c660531a805d84930c8a6183b863b6 | refs/heads/master | 2020-05-26T05:32:37.501642 | 2019-05-22T22:33:25 | 2019-05-22T22:33:25 | 188,122,429 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-28 20:24
from __future__ import unicode_literals
import common.utils
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0014_auto_20170915_0653'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='custom_role_name',
field=models.CharField(blank=True, max_length=50, verbose_name='custom role name'),
),
]
| [
"[email protected]"
] | |
eb7743840173ebacd9715a3594e8cbe849d2e20c | eb7047d5a8c00d4370a55c2806a2f051287b452d | /tests/pytests/problems/TestSingleObserver.py | b8d8b92a1d2272e25463babe394d08632e7fed5a | [
"MIT"
] | permissive | mousumiroy-unm/pylith | 8361a1c0fbcde99657fd3c4e88678a8b5fc8398b | 9a7b6b4ee8e1b89bc441bcedc5ed28a3318e2468 | refs/heads/main | 2023-05-27T18:40:57.145323 | 2021-06-09T19:32:19 | 2021-06-09T19:32:19 | 373,931,160 | 0 | 0 | MIT | 2021-06-04T18:40:09 | 2021-06-04T18:40:09 | null | UTF-8 | Python | false | false | 1,384 | py | #!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
# @file tests/pytests/problems/TestSingleObserver.py
#
# @brief Unit testing of Python SingleObserver object.
import unittest
from pylith.testing.UnitTestApp import TestAbstractComponent
from pylith.problems.SingleObserver import (SingleSolnObserver, SinglePhysicsObserver)
class TestSingleSolnObserver(TestAbstractComponent):
"""Unit testing of SingleSolnObserver object.
"""
_class = SingleSolnObserver
class TestSinglePhysicsObserver(TestAbstractComponent):
"""Unit testing of SinglePhysicsObserver object.
"""
_class = SinglePhysicsObserver
if __name__ == "__main__":
suite = unittest.TestSuite()
classes = [
TestSingleSolnObserver,
TestSinglePhysicsObserver,
]
for cls in classes:
suite.addTest(unittest.makeSuite(cls))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"[email protected]"
] | |
794a613a9d0f5d34d09ddbb7cad78d49e670c820 | 4e45d134e09af47025deae667805f0eb79d4f516 | /neuroConstruct/pythonScripts/RunTestsL5PC.py | f275b53a4451da444342a7f07e133d5e957a5ed2 | [
"MIT"
] | permissive | OpenSourceBrain/L5bPyrCellHayEtAl2011 | 46c5721e21eed58cd23e96ced0cdafae58315473 | 75da6c136254b50b96dd9156b27244c083313156 | refs/heads/master | 2023-08-16T10:36:36.618573 | 2023-03-30T12:31:13 | 2023-03-30T12:31:13 | 8,250,486 | 5 | 1 | NOASSERTION | 2023-09-06T12:42:46 | 2013-02-17T12:48:49 | AGS Script | UTF-8 | Python | false | false | 2,678 | py | #
#
# File to test current configuration of project
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Wellcome Trust
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using nC.bat -python XXX.py' or 'nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc # Many useful functions such as SimManager.runMultipleSims found here
projFile = File("../L5bPyrCellHayEtAl2011.ncx")
############## Main settings ##################
simConfigs = []
simConfigs.append("Default Simulation Configuration")
simDt = 0.001
simulators = ["NEURON"]
varTimestepNeuron = True
varTimestepTolerance = 0.00001
plotSims = True
plotVoltageOnly = True
runInBackground = True
analyseSims = True
verbose = True
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
verbose = verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground,
varTimestepNeuron = varTimestepNeuron,
varTimestepTolerance = varTimestepTolerance)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
analyseSims = analyseSims)
times = [711.84126, 720.02236, 730.20796, 745.43043, 827.08991, 929.49095, 1027.7015, \
1122.267, 1214.1016, 1303.3449, 1390.8119, 1476.6185, 1561.1513, 1644.7155, \
1727.3263, 1809.1266, 1890.1703, 1971.0897, 2050.8026, 2130.5578, 2210.0852, \
2289.2517, 2368.1543, 2446.7049, 2524.948, 2603.1542, 2681.297]
spikeTimesToCheck = {'CellGroup_1_0': times}
spikeTimeAccuracy = 0.6
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
| [
"[email protected]"
] | |
e4e2d58658dd6c7e4adf95dd2a78fe739427c58f | 63f917864d85f0f9e810cbb4e6163f48611a8b3d | /home_content/admin.py | dcbef84ea0014ed258e07963b93519ce4277c9fd | [] | no_license | davidraywilson/suit_materialized | 37aa521d52f8dd746b55b121262501147dffb95c | 035405defedd5ee8257b42aac82749794080af4f | refs/heads/master | 2021-01-18T14:05:01.797452 | 2015-06-03T02:03:55 | 2015-06-03T02:03:55 | 32,526,877 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | from django.contrib import admin
from home_content.models import HomeSection, Billboard, MiniBillboard
class SectionAdmin(admin.ModelAdmin):
model = HomeSection
list_display = ('name', 'order', 'is_published')
list_editable = ('order', 'is_published')
fieldsets = (
(None, {
'fields': ('template', 'name', 'order', 'is_published')
}),
(None, {
'fields': ('image_background',)
}),
)
class Media:
js = [
'/static/admin_js/home_section.js',
'/static/admin_js/tinymce/tinymce.min.js',
'/static/admin_js/tinymce_init.js'
]
class BillboardAdmin(admin.ModelAdmin):
model = Billboard
list_display = ('name', 'order', 'publish_date', 'is_published')
list_filter = ('publish_date', 'is_published')
list_editable = ('order', 'is_published',)
fieldsets = (
(None, {
'classes': (),
'fields': ('name', 'order', 'image', 'header', 'sub_header', 'publish_date', 'expire_date', 'is_published')
}),
)
class Media:
js = [
'/static/admin_js/tinymce/tinymce.min.js',
'/static/admin_js/tinymce_init.js'
]
class MiniBillboardAdmin(admin.ModelAdmin):
model = MiniBillboard
list_display = ('name', 'order', 'publish_date', 'is_published')
list_filter = ('publish_date', 'is_published')
list_editable = ('order', 'is_published',)
fieldsets = (
(None, {
'classes': (),
'fields': ('name', 'order', 'publish_date', 'expire_date', 'is_published')
}),
(None, {
'classes': (),
'fields': ('size', 'image', 'video', 'link')
}),
)
class Media:
js = [
'/static/admin_js/tinymce/tinymce.min.js',
'/static/admin_js/tinymce_init.js'
]
admin.site.register(HomeSection, SectionAdmin)
admin.site.register(Billboard, BillboardAdmin)
admin.site.register(MiniBillboard, MiniBillboardAdmin) | [
"[email protected]"
] | |
a9631a6411f099359301c0f1709dc4854de83442 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02792/s333976944.py | 822b45df844116947cf79fed9d47791edd103ea9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def solve():
N = int(input())
cnt = [[0]*10 for i in range(10)]
for i in range(1, N+1):
target_str = str(i)
cnt[int(target_str[0])][int(target_str[-1])] += 1
ans = 0
for i in range(1, 10):
for j in range(1, 10):
ans += cnt[i][j] * cnt[j][i]
print(ans)
if __name__ == "__main__":
solve() | [
"[email protected]"
] | |
9471d90433996588d7b327c4ec36c851b54ea325 | 1699300e1225f0994fbfd5e13a7eb4436a5df14d | /05_Mini_Scans_Optimised/04_SbS_Tomo_Lattice_H_07/Make_SLURM_submission_script.py | abcabc237978907bae9272a61fb53666e9224f34 | [
"MIT"
] | permissive | HaroonRafique/PyORBIT_MD4224 | 26307a60ed79f3e170fbd655eb8cbe8cc9a0dfa9 | 6f68a80b2f8bf1cbeb9e2fc840925efe8a8b5672 | refs/heads/master | 2023-04-25T13:27:49.756836 | 2020-08-25T10:26:07 | 2020-08-25T10:26:07 | 215,249,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,315 | py | #!/usr/bin/env python
# Python script to create a SLURM submission script for PyORBIT
# 21 March 2019 Haroon Rafique CERN BE-ABP-HSI
import os
#-----------------------------------------------------------------------
# SETTINGS
#-----------------------------------------------------------------------
script_name = "SLURM_submission_script.sh"
# Switches
hyperthreading = False # Enable hyperthreading
exclusive = True # Exclusive (see SLURM documentation)
autotime = True # 2 days for short queues, 2 weeks for long queues
autotask = True # Automatically set nodes to maximum tasks
clean_all = True # Clean simulation folder before running (False when resuming pickle checkpoint)
# Must be chosen
# ~ queue = 'inf-long', 'inf-short', 'batch-long', 'batch-short'
queue = 'inf-short'
n_nodes = 2
jobname = '05_04_07'
path_to_simulation = os.path.dirname(os.path.realpath(__file__)) # This directory
# Optional - have to use with correct switches
manual_time = '504:00:00' # manually set using format 'hours:minutes:seconds'
manual_tasks = 40 # manually change ntasks
# Defaults - can be changed
output_file_name = 'slurm.%N.%j.out'
error_file_name = 'slurm.%N.%j.err'
root_dir = '/hpcscratch/user/harafiqu'
simulation_file = 'pyOrbit.py'
#-----------------------------------------------------------------------
# AUTOMATICALLY FORMAT SCRIPT
#-----------------------------------------------------------------------
n_tasks = 0
if autotask:
if hyperthreading:
if 'batch' in queue: n_tasks = 32
elif 'inf' in queue: n_tasks = 40
else:
print 'queue not recognised'
exit(0)
else:
if 'batch' in queue: n_tasks = 16
elif 'inf' in queue: n_tasks = 20
else:
print 'queue not recognised'
exit(0)
else: n_tasks = manual_tasks
time = '48:00:00'
if autotime:
if queue == 'batch-short': time = '48:00:00'
elif queue == 'inf-short': time = '120:00:00'
elif queue == 'inf-long' or 'batch-long': time = '504:00:00'
else:
print 'queue not recognised'
exit(0)
else: time = manual_time
#-----------------------------------------------------------------------
# WRITE FILE
#-----------------------------------------------------------------------
if os.path.exists(script_name):
print 'SLURM submission script ' + script_name + ' already exists. Deleting'
os.remove(script_name)
print "Creating ", script_name
f= open(script_name,"w")
f.write('#!/bin/bash')
f.write('\n#SBATCH --job-name=' + str(jobname))
f.write('\n#SBATCH --output=' + str(output_file_name))
f.write('\n#SBATCH --error=' + str(error_file_name))
f.write('\n#SBATCH --nodes=' + str(n_nodes))
f.write('\n#SBATCH --ntasks-per-node=' + str(n_tasks))
f.write('\n#SBATCH --partition=' + str(queue))
f.write('\n#SBATCH --time=' + str(time))
f.write('\n#SBATCH --mem-per-cpu=3200M')
if (exclusive): f.write('\n#SBATCH --exclusive')
if not hyperthreading: f.write('\n#SBATCH --hint=nomultithread')
f.write('\n')
f.write('\nBATCH_ROOT_DIR=' + str(root_dir))
f.write('\nRUN_DIR=' + str(path_to_simulation))
f.write('\nOrigIwd=$(pwd)')
f.write('\n')
f.write('\n# Make an output folder in the root directory to hold SLURM info file')
f.write('\ncd ${BATCH_ROOT_DIR}')
f.write('\noutput_dir="output"')
f.write('\nmkdir -p $output_dir')
f.write('\n')
f.write('\n# Fill the SLURM info file')
f.write('\nsimulation_info_file="${BATCH_ROOT_DIR}/${output_dir}/simulation_info_${SLURM_JOB_ID}.${SLURM_NODEID}.${SLURM_PROCID}.txt"')
f.write('\necho "PyOrbit path: `readlink -f ${ORBIT_ROOT}`" >> ${simulation_info_file}')
f.write('\necho "Run path: `readlink -f ${RUN_DIR}`" >> ${simulation_info_file}')
f.write('\necho "Submit host: `readlink -f ${SLURM_SUBMIT_HOST}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job name: `readlink -f ${SLURM_JOB_NAME}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job ID: `readlink -f ${SLURM_JOB_ID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Nodes allocated: `readlink -f ${SLURM_JOB_NUM_NODES}`" >> ${simulation_info_file}')
f.write('\necho "SLURM CPUS per Node: `readlink -f ${SLURM_CPUS_ON_NODE}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Node ID: `readlink -f ${SLURM_NODEID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM total cores for job: `readlink -f ${SLURM_NTASKS}`" >> ${simulation_info_file}')
f.write('\necho "SLURM process ID: `readlink -f ${SLURM_PROCID}`" >> ${simulation_info_file}')
f.write('\necho "****************************************" >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Enter job directory, clean it, and setup environment -> SLURM info file')
f.write('\ncd ${RUN_DIR}')
if clean_all:f.write('\n./clean_all.sh')
f.write('\n. setup_environment.sh >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Load correct MPI')
f.write('\nmodule load mpi/mvapich2/2.3')
f.write('\n')
f.write('\ntstart=$(date +%s)')
f.write('\n')
f.write('\n# Run the job')
if hyperthreading:f.write('\nsrun ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
else:f.write('\nsrun --hint=nomultithread ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
f.write('\n')
f.write('\ntend=$(date +%s)')
f.write('\ndt=$(($tend - $tstart))')
f.write('\necho "total simulation time (s): " $dt >> ${simulation_info_file}')
f.close()
print 'SLURM submission script creation finished'
| [
"[email protected]"
] | |
bfe9031d3c5da7284c29c3344d18127362dadb2e | 9c01350ff70d17504990d1f0077df50d6a47b0c6 | /molgym/agents/dqn_variable_actions.py | 63b4dc25c134101aa3a3b9d34ea48ae47b390cce | [] | no_license | WardLT/molecular-dqn | 0a5d1018bbc577ebb26ff58ade0ad6e0c2e1014a | 14dc46c1539088c591dde0c60947a9484353e9a9 | refs/heads/master | 2021-02-13T09:20:34.811407 | 2020-03-03T16:21:22 | 2020-03-03T16:21:22 | 244,683,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,462 | py | import random
import logging
import numpy as np
from collections import deque
from keras.models import Model
from keras.engine.network import Network
from keras.layers import Dense, Input, Lambda, Subtract, Concatenate
from keras import backend as K
from gym_exalearn.envs.exalearn_electrolyte_design import Molecule
logger = logging.getLogger(__name__)
class GraphModel(Model):
""" This is a simple modification of the Keras `Model` class to avoid
checking each input for a consistent batch_size dimension. Should work as
of keras-team/keras#11548.
"""
def _standardize_user_data(self, *args, **kwargs):
kwargs['check_array_lengths'] = False
return super(GraphModel, self)._standardize_user_data(*args, **kwargs)
def _q_target_value(inputs, gamma=0.99):
"""Function to compute the target value for Q learning"""
reward, v_tp1, done = inputs
return reward + gamma * (1.0 - done) * v_tp1
class DQNFinalState:
"""Implementation of Deep Q Learning that uses the final state after applying an action as input
Q is typically defined as a function of (state, action), written as Q(s, a).
Here, we define a new state, s', as the result of applying action a to state s
and use s' as the input to Q.
Follows the implementation described by `Zhou et al. <http://www.nature.com/articles/s41598-019-47148-x>`_.
"""
def __init__(self, env: Molecule, epsilon=1.0):
"""
Args:
epsilon (float): Exploration factor
"""
self.env = env
self.memory = deque(maxlen=2000)
# Hyper-parameters
self.gamma = 0.995 # discount rate
self.epsilon = epsilon # exploration rate
self.epsilon_min = 0.05
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.batch_size = 32
# Create the model
self._build_model()
def _huber_loss(self, target, prediction):
# sqrt(1+error^2)-1
error = prediction - target
return K.mean(K.sqrt(1+K.square(error))-1,axis=-1)
def _build_model(self):
# Get the shape of the environment
_, fingerprint_size = self.env.action_space.shape
predict_actions_input = Input(batch_shape=(None, fingerprint_size), name='single_action')
train_action_input = Input(batch_shape=(self.batch_size, fingerprint_size),
name='batch_action')
reward_input = Input(batch_shape=(self.batch_size, 1), name='rewards')
done_input = Input(batch_shape=(self.batch_size, 1), name='done')
new_actions = [Input(batch_shape=(None, fingerprint_size), name=f'next_actions_{i}')
for i in range(self.batch_size)]
# Squeeze the train action and reward input
squeeze = Lambda(K.squeeze, arguments={'axis': 1}, name='squeezer')
reward = squeeze(reward_input)
done = squeeze(done_input)
# Define the Q network. Note that we use a `Network` rather than model because
# this model is not trained
# - Takes a list of actions as input
# - Produces the value of each action as output
# TODO (wardlt): Allow users to specify a different architecture
def make_q_network(input_shape, name=None):
inputs = Input(shape=input_shape)
h1 = Dense(24, activation='relu')(inputs)
h2 = Dense(48, activation='relu')(h1)
h3 = Dense(24, activation='relu')(h2)
output = Dense(1, activation='linear')(h3)
return Network(inputs=inputs, outputs=output, name=name)
q_t = make_q_network((None, fingerprint_size), name='q_t')
q = q_t(predict_actions_input)
self.action_network = Model(inputs=predict_actions_input, outputs=q)
# Make the training network
# Part 1: Computing estimated value of the next state
# Set as the maximum Q for any action from that next state
# Note: This Q network is not updated by the optimizer. Instead, it is
# periodically updated with the weights from `q_t`, which is being updated
q_tp1 = make_q_network((None, fingerprint_size), name='q_tp1')
q_tp1.trainable = False
max_layer = Lambda(K.max, arguments={'axis': 0}, name='max_layer')
q_values = [max_layer(q_tp1(action)) for action in new_actions]
v_tp1 = Concatenate(name='v_tp1')(q_values)
# Part 2: Define the target function, the measured reward of a state
# plus the estimated value of the next state (or zero if this state is terminal)
target = Lambda(_q_target_value, name='target', arguments={'gamma': self.gamma})\
([reward, v_tp1, done])
# Part 3: Define the error single
q_t_train = q_t(train_action_input)
q_t_train = Lambda(K.reshape, arguments={'shape': (self.batch_size,)},
name='squeeze_q')(q_t_train)
error = Subtract(name='error')([q_t_train, target])
error = Lambda(K.reshape, arguments={'shape': (self.batch_size, 1)},
name='wrap_error')(error)
self.train_network = GraphModel(
inputs=[train_action_input, done_input, reward_input] + new_actions,
outputs=error)
# Add the optimizer
self.train_network.compile(optimizer='adam', loss='mean_squared_error')
def remember(self, state, action, reward, next_state, next_actions, done):
self.memory.append((action, reward, next_actions, done))
def action(self):
"""Choose the next action"""
# TODO (wardlt): State is defined in `env`. Do we need it as an input?
actions = self.env.action_space.get_possible_actions()
if np.random.rand() <= self.epsilon:
action_ix = random.randrange(self.env.action_space.n)
else:
# Invoke the action network, which gives the action with the highest reward
action_scores = self.action_network.predict(actions)
action_ix = np.argmax(action_scores)
return actions[action_ix]
def update_target_q_network(self):
"""Updates the Q function used to define the target to use the current Q network"""
q_weights = self.action_network.get_layer('q_t').get_weights()
self.train_network.get_layer('q_tp1').set_weights(q_weights)
def train(self):
"""Train model on a batch of data from the memory
Returns:
loss (str): Current loss
"""
# Check if we have enough data
if len(self.memory) < self.batch_size:
return
# Get a minibatch
actions, rewards, next_actions, done = zip(*random.sample(self.memory, self.batch_size))
# Convert inputs to numpy arrays
actions = np.array(actions, dtype=K.floatx())
rewards = np.array(rewards, dtype=K.floatx())
next_actions = [np.array(a, dtype=K.floatx()) for a in next_actions]
done = np.array(done, dtype=K.floatx())
# Give bogus moves to those that are done and lack next moves
# Needed to give the proper input shape to the model
for i, (na, d) in enumerate(zip(next_actions, done)):
if na.shape == (0,):
if not d:
raise RuntimeError('Found a move that is not terminal, yet has no next actions')
next_actions[i] = np.zeros((1, self.action_network.input_shape[1]))
# Define the target output for the network, zero
target = np.zeros((self.batch_size, 1))
# Train on this minibatch
return self.train_network.train_on_batch([actions, done, rewards] + list(next_actions), target)
def epsilon_adj(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, path):
"""Load the weights of the model
Args:
path (str): Path to a file holding the weights
"""
# Load in the training network
self.train_network.load_weights(path)
# Use it to set the weights of the "action network"
q_weights = self.train_network.get_layer('q_t').get_weights()
self.action_network.get_layer('q_t').set_weights(q_weights)
def save(self, path):
"""Save the model state
Args:
path (str): Path to save weights
"""
self.train_network.save_weights(path)
| [
"[email protected]"
] | |
60df2de014de6a9da38b852a02395c7365e9f2c1 | 41fd80f9ccc72a17c2db16b7019312a87d3181e8 | /zhang_local/pdep/network509_1.py | 459fb9592378991924c9f90b3e693788fa829b6c | [] | no_license | aberdeendinius/n-heptane | 1510e6704d87283043357aec36317fdb4a2a0c34 | 1806622607f74495477ef3fd772908d94cff04d9 | refs/heads/master | 2020-05-26T02:06:49.084015 | 2019-07-01T15:12:44 | 2019-07-01T15:12:44 | 188,069,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97,706 | py | species(
label = '[CH2][C](OO)OC[CH]CC(1239)',
structure = SMILES('[CH2][C](OO)OC[CH]CC'),
E0 = (169.452,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,3025,407.5,1350,352.5,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.347194,0.102148,-0.000106709,6.28302e-08,-1.55762e-11,20531.4,40.202], Tmin=(100,'K'), Tmax=(955.841,'K')), NASAPolynomial(coeffs=[12.3883,0.048852,-2.30731e-05,4.49679e-09,-3.19182e-13,18096.8,-20.6644], Tmin=(955.841,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(169.452,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(CJCOOH) + radical(Cs_P)"""),
)
species(
label = 'C=CCC(36)',
structure = SMILES('C=CCC'),
E0 = (-16.5218,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,385.42],'cm^-1')),
HinderedRotor(inertia=(0.10941,'amu*angstrom^2'), symmetry=1, barrier=(11.0707,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.106381,'amu*angstrom^2'), symmetry=1, barrier=(11.1092,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (56.1063,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2968.28,'J/mol'), sigma=(5.176,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.64174,0.0203702,3.0581e-05,-4.85559e-08,1.85232e-11,-1929.81,14.9537], Tmin=(100,'K'), Tmax=(991.794,'K')), NASAPolynomial(coeffs=[7.80835,0.0229241,-8.65887e-06,1.6005e-09,-1.13877e-13,-4105.1,-15.7294], Tmin=(991.794,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-16.5218,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = '[CH2]C(=O)OO(1167)',
structure = SMILES('[CH2]C(=O)OO'),
E0 = (-234.165,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,631.199,631.199,631.199,631.2],'cm^-1')),
HinderedRotor(inertia=(0.154163,'amu*angstrom^2'), symmetry=1, barrier=(43.5852,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.154163,'amu*angstrom^2'), symmetry=1, barrier=(43.5853,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.154163,'amu*angstrom^2'), symmetry=1, barrier=(43.5853,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (75.0434,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3635.24,'J/mol'), sigma=(5.76225,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=567.82 K, Pc=43.11 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.3608,0.0242042,1.63595e-05,-4.45473e-08,2.01814e-11,-28093.7,18.81], Tmin=(100,'K'), Tmax=(954.621,'K')), NASAPolynomial(coeffs=[13.6646,0.00626349,-1.68383e-06,3.41178e-10,-2.97857e-14,-31592.6,-42.2214], Tmin=(954.621,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-234.165,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(170.447,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-O2d)) + group(O2s-OsH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsOs) + radical(CJCO)"""),
)
species(
label = '[CH2][C](OO)OCC=CC(1671)',
structure = SMILES('[CH2][C](OO)OCC=CC'),
E0 = (90.6433,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,3615,1310,387.5,850,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.150845,0.0991533,-0.000112695,7.57153e-08,-2.16817e-11,11044.4,37.424], Tmin=(100,'K'), Tmax=(831.259,'K')), NASAPolynomial(coeffs=[10.0404,0.0501132,-2.42022e-05,4.74398e-09,-3.37055e-13,9350.14,-9.85909], Tmin=(831.259,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(90.6433,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(428.195,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)OsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CJCOOH) + radical(Cs_P)"""),
)
species(
label = 'H(8)',
structure = SMILES('[H]'),
E0 = (211.805,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25474.2,-0.444973], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25474.2,-0.444973], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.805,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH2][C](OO)OC=CCC(1672)',
structure = SMILES('[CH2][C](OO)OC=CCC'),
E0 = (46.3048,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,3615,1310,387.5,850,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.25805,0.101951,-8.73553e-05,2.95995e-08,-1.36212e-12,5770.45,39.573], Tmin=(100,'K'), Tmax=(1027.03,'K')), NASAPolynomial(coeffs=[24.8082,0.0244665,-9.29449e-06,1.71698e-09,-1.22091e-13,-851.416,-93.0487], Tmin=(1027.03,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(46.3048,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(428.195,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(CJCOOH) + radical(Cs_P)"""),
)
species(
label = '[CH2][CH]CC(39)',
structure = SMILES('[CH2][CH]CC'),
E0 = (255.389,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,1553.58],'cm^-1')),
HinderedRotor(inertia=(0.00260974,'amu*angstrom^2'), symmetry=1, barrier=(4.49727,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.19501,'amu*angstrom^2'), symmetry=1, barrier=(4.48366,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.194146,'amu*angstrom^2'), symmetry=1, barrier=(4.4638,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (56.1063,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.59221,0.0286125,-6.18652e-06,-3.09336e-09,1.2171e-12,30768.7,19.1946], Tmin=(100,'K'), Tmax=(1492.38,'K')), NASAPolynomial(coeffs=[6.22741,0.0257398,-1.0205e-05,1.78668e-09,-1.17174e-13,28918.5,-2.36199], Tmin=(1492.38,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(255.389,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(RCCJ)"""),
)
species(
label = '[CH3](11)',
structure = SMILES('[CH3]'),
E0 = (135.382,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([570.572,1408.13,1408.49,4000,4000,4000],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.91547,0.00184154,3.48742e-06,-3.32748e-09,8.49957e-13,16285.6,0.351741], Tmin=(100,'K'), Tmax=(1337.63,'K')), NASAPolynomial(coeffs=[3.54146,0.00476787,-1.82148e-06,3.28877e-10,-2.22546e-14,16224,1.66035], Tmin=(1337.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(135.382,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), comment="""Thermo library: primaryThermoLibrary + radical(CH3)"""),
)
species(
label = '[CH2][C](OO)OCC=C(1507)',
structure = SMILES('[CH2][C](OO)OCC=C'),
E0 = (126.669,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3000,3100,440,815,1455,1000,3615,1310,387.5,850,1000,3010,987.5,1337.5,450,1655,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (116.115,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.472793,0.0839049,-9.35087e-05,5.93011e-08,-1.58426e-11,15356.4,33.3755], Tmin=(100,'K'), Tmax=(890.982,'K')), NASAPolynomial(coeffs=[10.2857,0.0398503,-1.93413e-05,3.8062e-09,-2.7131e-13,13607.7,-12.8336], Tmin=(890.982,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(126.669,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)OsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cs_P) + radical(CJCOOH)"""),
)
species(
label = 'OH(D)(132)',
structure = SMILES('[OH]'),
E0 = (28.3945,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3668.68],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (17.0073,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.51457,2.92814e-05,-5.32177e-07,1.01951e-09,-3.85951e-13,3414.25,2.10435], Tmin=(100,'K'), Tmax=(1145.75,'K')), NASAPolynomial(coeffs=[3.07194,0.000604011,-1.39759e-08,-2.13452e-11,2.4807e-15,3579.39,4.57799], Tmin=(1145.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.3945,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH(D)""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH2]C(=O)OC[CH]CC(1673)',
structure = SMILES('[CH2]C(=O)OC[CH]CC'),
E0 = (-99.5569,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,914.286,1028.57,1142.86,1257.14,1371.43,1485.71,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.61875,0.0693938,-4.49435e-05,1.45199e-08,-1.92082e-12,-11848.6,34.2943], Tmin=(100,'K'), Tmax=(1704.52,'K')), NASAPolynomial(coeffs=[15.3541,0.034815,-1.45142e-05,2.6187e-09,-1.75314e-13,-16872,-44.6537], Tmin=(1704.52,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-99.5569,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-O2d)) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsOs) + radical(CCJCO) + radical(CJCO)"""),
)
species(
label = '[CH2][C](OO)OCC[CH]C(1674)',
structure = SMILES('[CH2][C](OO)OCC[CH]C'),
E0 = (163.996,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.1982,0.0979843,-7.95519e-05,-2.24849e-08,6.13505e-11,19846.6,38.6523], Tmin=(100,'K'), Tmax=(507.177,'K')), NASAPolynomial(coeffs=[8.85323,0.0548389,-2.62261e-05,5.05737e-09,-3.53558e-13,18645.7,-0.4118], Tmin=(507.177,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(163.996,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CJCOOH) + radical(RCCJC) + radical(Cs_P)"""),
)
species(
label = '[CH2][C](OO)O[CH]CCC(1634)',
structure = SMILES('[CH2][C](OO)O[CH]CCC'),
E0 = (150.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.957369,0.115731,-0.000139252,9.22466e-08,-2.49971e-11,18214.2,39.3408], Tmin=(100,'K'), Tmax=(891.57,'K')), NASAPolynomial(coeffs=[14.6523,0.0456976,-2.14257e-05,4.14195e-09,-2.91999e-13,15430.8,-34.1752], Tmin=(891.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(150.005,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(Cs_P) + radical(CJCOOH)"""),
)
species(
label = '[CH2]C(OO)O[CH][CH]CC(1675)',
structure = SMILES('[CH2]C(OO)O[CH][CH]CC'),
E0 = (144.661,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.797115,0.109838,-0.000120754,7.17252e-08,-1.74007e-11,17567.9,40.5682], Tmin=(100,'K'), Tmax=(991.02,'K')), NASAPolynomial(coeffs=[15.7282,0.043138,-1.97997e-05,3.81296e-09,-2.69048e-13,14292.5,-39.0081], Tmin=(991.02,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(144.661,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(CJCOOH) + radical(CCsJOCs)"""),
)
species(
label = '[CH2]C(O[O])OC[CH]CC(1676)',
structure = SMILES('[CH2]C(O[O])OC[CH]CC'),
E0 = (116.21,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.200243,0.0987664,-0.000104297,6.42959e-08,-1.68368e-11,14122.6,39.7453], Tmin=(100,'K'), Tmax=(905.755,'K')), NASAPolynomial(coeffs=[10.9232,0.0496445,-2.29498e-05,4.4236e-09,-3.11826e-13,12107.5,-12.8184], Tmin=(905.755,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(116.21,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CJCOOH) + radical(ROOJ) + radical(CCJCO)"""),
)
species(
label = '[CH2]CCCO[C]([CH2])OO(1677)',
structure = SMILES('[CH2]CCCO[C]([CH2])OO'),
E0 = (174.796,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.588603,0.109032,-0.00012884,8.83329e-08,-2.54377e-11,21181.2,40.362], Tmin=(100,'K'), Tmax=(832.114,'K')), NASAPolynomial(coeffs=[11.4435,0.0511924,-2.45747e-05,4.79678e-09,-3.39755e-13,19178.8,-15.4746], Tmin=(832.114,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(174.796,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CJCOOH) + radical(RCCJ)"""),
)
species(
label = 'CC[CH][CH]O[C](C)OO(1678)',
structure = SMILES('CC[CH][CH]O[C](C)OO'),
E0 = (135.945,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.650858,0.107313,-0.000116056,6.86848e-08,-1.67472e-11,16513.7,38.7377], Tmin=(100,'K'), Tmax=(981.929,'K')), NASAPolynomial(coeffs=[14.655,0.0449635,-2.08126e-05,4.02181e-09,-2.84247e-13,13507.8,-34.8251], Tmin=(981.929,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(135.945,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(Cs_P) + radical(CCsJOCs)"""),
)
species(
label = 'CC[CH]CO[C](C)O[O](1679)',
structure = SMILES('CC[CH]CO[C](C)O[O]'),
E0 = (107.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0778679,0.0965209,-0.000100567,6.25098e-08,-1.6724e-11,13069.5,38.0006], Tmin=(100,'K'), Tmax=(882.45,'K')), NASAPolynomial(coeffs=[9.86589,0.0514481,-2.39525e-05,4.63049e-09,-3.2689e-13,11314.5,-8.72894], Tmin=(882.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(107.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(ROOJ) + radical(Cs_P) + radical(CCJCO)"""),
)
species(
label = '[CH2]C(OO)OC[CH][CH]C(1680)',
structure = SMILES('[CH2]C(OO)OC[CH][CH]C'),
E0 = (158.651,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.366,0.103796,-0.000123626,9.01436e-08,-2.80478e-11,19231.5,42.3139], Tmin=(100,'K'), Tmax=(769.348,'K')), NASAPolynomial(coeffs=[9.34397,0.053317,-2.52163e-05,4.87703e-09,-3.43131e-13,17737.3,-1.98599], Tmin=(769.348,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(158.651,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CJCOOH) + radical(CCJCO) + radical(RCCJC)"""),
)
species(
label = 'C[CH][CH]CO[C](C)OO(1681)',
structure = SMILES('C[CH][CH]CO[C](C)OO'),
E0 = (149.935,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.287892,0.102158,-0.000122461,9.24253e-08,-3.00624e-11,18180.2,40.7228], Tmin=(100,'K'), Tmax=(735.382,'K')), NASAPolynomial(coeffs=[8.39509,0.0549213,-2.60976e-05,5.0541e-09,-3.55649e-13,16903.3,1.50243], Tmin=(735.382,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(149.935,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(RCCJC) + radical(CCJCO)"""),
)
species(
label = '[CH2]C[CH]COC([CH2])OO(1682)',
structure = SMILES('[CH2]C[CH]COC([CH2])OO'),
E0 = (169.452,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.347194,0.102148,-0.000106709,6.28302e-08,-1.55762e-11,20531.4,41.3006], Tmin=(100,'K'), Tmax=(955.841,'K')), NASAPolynomial(coeffs=[12.3883,0.048852,-2.30731e-05,4.49679e-09,-3.19182e-13,18096.8,-19.5658], Tmin=(955.841,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(169.452,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(RCCJ) + radical(CJCOOH)"""),
)
species(
label = '[CH2][C](O[O])OCCCC(1683)',
structure = SMILES('[CH2][C](O[O])OCCCC'),
E0 = (121.554,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.487138,0.106299,-0.000129234,9.42491e-08,-2.89812e-11,14774.2,38.9626], Tmin=(100,'K'), Tmax=(781.705,'K')), NASAPolynomial(coeffs=[10.2083,0.0515651,-2.41969e-05,4.66124e-09,-3.27085e-13,13102.3,-10.0016], Tmin=(781.705,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(121.554,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CJCOOH) + radical(ROOJ)"""),
)
species(
label = '[CH2]C[CH]CO[C](C)OO(1684)',
structure = SMILES('[CH2]C[CH]CO[C](C)OO'),
E0 = (160.735,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.212276,0.0997493,-0.000102424,6.0291e-08,-1.51253e-11,19477.7,39.5113], Tmin=(100,'K'), Tmax=(939.466,'K')), NASAPolynomial(coeffs=[11.3105,0.0506896,-2.40949e-05,4.7081e-09,-3.34607e-13,17312.6,-15.3602], Tmin=(939.466,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(160.735,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(Cs_P) + radical(RCCJ)"""),
)
species(
label = '[CH2][C]([O])OO(1352)',
structure = SMILES('[CH2][C]([O])OO'),
E0 = (259.617,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,360,370,350,180],'cm^-1')),
HinderedRotor(inertia=(0.365969,'amu*angstrom^2'), symmetry=1, barrier=(8.41434,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0124941,'amu*angstrom^2'), symmetry=1, barrier=(36.0133,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0124334,'amu*angstrom^2'), symmetry=1, barrier=(36.0196,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (75.0434,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.07061,0.0487342,-8.34891e-05,7.95986e-08,-2.95498e-11,31288.1,22.2062], Tmin=(100,'K'), Tmax=(816.093,'K')), NASAPolynomial(coeffs=[4.97245,0.0220937,-1.16997e-05,2.30933e-09,-1.61703e-13,31227.9,11.3297], Tmin=(816.093,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(259.617,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(170.447,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CCOJ) + radical(CJCOOH)"""),
)
species(
label = '[CH2][C]([O])OC[CH]CC(1685)',
structure = SMILES('[CH2][C]([O])OC[CH]CC'),
E0 = (321.278,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,360,370,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (114.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.00257,0.0775287,-5.00429e-05,-3.57639e-08,5.79805e-11,38737.1,34.2371], Tmin=(100,'K'), Tmax=(495.842,'K')), NASAPolynomial(coeffs=[6.64246,0.0505143,-2.42348e-05,4.71459e-09,-3.32572e-13,37950.6,8.69301], Tmin=(495.842,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(321.278,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(CJCO) + radical(Cs_P) + radical(CCOJ)"""),
)
species(
label = '[CH2][C](O[O])OC[CH]CC(1686)',
structure = SMILES('[CH2][C](O[O])OC[CH]CC'),
E0 = (321.456,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,360,370,350,3025,407.5,1350,352.5,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.263818,0.100711,-0.000119105,8.24343e-08,-2.39245e-11,38809.6,40.6597], Tmin=(100,'K'), Tmax=(827.181,'K')), NASAPolynomial(coeffs=[10.7973,0.0472239,-2.21135e-05,4.26588e-09,-3.00025e-13,36979.7,-10.6054], Tmin=(827.181,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(321.456,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(428.195,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CCJCO) + radical(ROOJ) + radical(CJCOOH)"""),
)
species(
label = '[CH2][CH]CO[C]([CH2])OO(1520)',
structure = SMILES('[CH2][CH]CO[C]([CH2])OO'),
E0 = (398.478,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,360,370,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2850,1437.5,1250,1305,750,350,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (116.115,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.225673,0.0894063,-0.000107888,7.29658e-08,-2.04345e-11,48056.2,37.6863], Tmin=(100,'K'), Tmax=(858.977,'K')), NASAPolynomial(coeffs=[11.2115,0.0382508,-1.85609e-05,3.64033e-09,-2.58587e-13,46168.8,-13.6444], Tmin=(858.977,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(398.478,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(CCJCO) + radical(CJCOOH) + radical(Cs_P)"""),
)
species(
label = '[CH2][C](OO)OC[CH][CH]C(1687)',
structure = SMILES('[CH2][C](OO)OC[CH][CH]C'),
E0 = (363.898,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,3000,3050,390,425,1340,1360,335,370,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.48486,0.106591,-0.000142435,1.15143e-07,-3.89207e-11,43920.8,43.4146], Tmin=(100,'K'), Tmax=(749.267,'K')), NASAPolynomial(coeffs=[9.50524,0.050356,-2.40454e-05,4.63614e-09,-3.24166e-13,42505.2,-1.35454], Tmin=(749.267,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(363.898,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(CCJCO) + radical(Cs_P) + radical(CJCOOH)"""),
)
species(
label = '[CH2][C](OO)O[CH][CH]CC(1688)',
structure = SMILES('[CH2][C](OO)O[CH][CH]CC'),
E0 = (349.908,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,3000,3050,390,425,1340,1360,335,370,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.789421,0.110874,-0.000132069,8.48728e-08,-2.21462e-11,42251.9,41.2316], Tmin=(100,'K'), Tmax=(927.191,'K')), NASAPolynomial(coeffs=[15.3838,0.0411,-1.91885e-05,3.7092e-09,-2.61771e-13,39252.8,-35.5722], Tmin=(927.191,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.908,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(CCJCO) + radical(CJCOOH) + radical(Cs_P)"""),
)
species(
label = '[CH2]C[CH]CO[C]([CH2])OO(1689)',
structure = SMILES('[CH2]C[CH]CO[C]([CH2])OO'),
E0 = (374.698,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,360,370,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.3852,0.103732,-0.00011998,7.85825e-08,-2.14708e-11,45217.4,42.1273], Tmin=(100,'K'), Tmax=(876.721,'K')), NASAPolynomial(coeffs=[12.1152,0.0466978,-2.23973e-05,4.37821e-09,-3.10705e-13,43025.6,-16.5348], Tmin=(876.721,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(374.698,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(Cs_P) + radical(CJCOOH) + radical(CCJCO)"""),
)
species(
label = '[CH2]C(OO)OC=CCC(1690)',
structure = SMILES('[CH2]C(OO)OC=CCC'),
E0 = (-158.942,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.28578,0.10111,-7.65547e-05,1.69405e-08,3.22438e-12,-18912.6,38.2912], Tmin=(100,'K'), Tmax=(1012.62,'K')), NASAPolynomial(coeffs=[24.9522,0.0268639,-1.01212e-05,1.87318e-09,-1.33816e-13,-25733.7,-96.0631], Tmin=(1012.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-158.942,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(CJCOOH)"""),
)
species(
label = '[CH2]C(OO)OCC=CC(1691)',
structure = SMILES('[CH2]C(OO)OCC=CC'),
E0 = (-114.603,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0741598,0.0970821,-9.75747e-05,5.73393e-08,-1.45578e-11,-13643.2,36.4608], Tmin=(100,'K'), Tmax=(922.863,'K')), NASAPolynomial(coeffs=[10.2332,0.0524061,-2.49588e-05,4.88176e-09,-3.4713e-13,-15545.6,-12.4387], Tmin=(922.863,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-114.603,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)OsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CJCOOH)"""),
)
species(
label = 'CH2(S)(14)',
structure = SMILES('[CH2]'),
E0 = (419.091,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.93,2896.01,2896.03],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.10264,-0.00144068,5.45069e-06,-3.58002e-09,7.56192e-13,50400.6,-0.411765], Tmin=(100,'K'), Tmax=(1442.36,'K')), NASAPolynomial(coeffs=[2.62648,0.00394763,-1.49924e-06,2.54539e-10,-1.62956e-14,50691.8,6.78378], Tmin=(1442.36,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(419.091,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH2][C](OO)OC[CH]C(1225)',
structure = SMILES('[CH2][C](OO)OC[CH]C'),
E0 = (193.232,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([360,370,350,3025,407.5,1350,352.5,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (117.123,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3944.37,'J/mol'), sigma=(6.88112,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=616.10 K, Pc=27.47 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.269421,0.0877374,-9.42428e-05,5.66206e-08,-1.42417e-11,23370,35.7416], Tmin=(100,'K'), Tmax=(945.523,'K')), NASAPolynomial(coeffs=[11.4378,0.0404897,-1.92878e-05,3.77136e-09,-2.68121e-13,21258,-17.514], Tmin=(945.523,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(193.232,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(378.308,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(Cs_P) + radical(CJCOOH)"""),
)
species(
label = '[CH2][C](OO)OCC([CH2])C(1692)',
structure = SMILES('[CH2][C](OO)OCC([CH2])C'),
E0 = (165.59,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.662523,0.108338,-0.000124203,8.09108e-08,-2.18195e-11,20078.8,40.3361], Tmin=(100,'K'), Tmax=(892.078,'K')), NASAPolynomial(coeffs=[12.9758,0.0471853,-2.13752e-05,4.06572e-09,-2.84027e-13,17645.5,-23.9032], Tmin=(892.078,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(165.59,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsCsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CJCOOH) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C1(OO)OCC1CC(1243)',
structure = SMILES('[CH2]C1(OO)OCC1CC'),
E0 = (-116.044,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.745367,0.078543,1.06633e-05,-9.48853e-08,5.05451e-11,-13761.7,32.5097], Tmin=(100,'K'), Tmax=(898.395,'K')), NASAPolynomial(coeffs=[29.557,0.0139756,1.00742e-06,-5.56937e-10,4.07387e-14,-22045.5,-126.236], Tmin=(898.395,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-116.044,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsCsH) + group(Cs-CsCsOsOs) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Oxetane) + radical(CJCOOH)"""),
)
species(
label = '[CH2]C([O])(O)OC[CH]CC(1693)',
structure = SMILES('[CH2]C([O])(O)OC[CH]CC'),
E0 = (-64.3461,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.46258,0.107133,-0.000147351,1.25291e-07,-4.3672e-11,-7587,40.5004], Tmin=(100,'K'), Tmax=(810.507,'K')), NASAPolynomial(coeffs=[7.60695,0.0541861,-2.50756e-05,4.73956e-09,-3.26356e-13,-8464.05,5.92398], Tmin=(810.507,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-64.3461,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsOs) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCJCO) + radical(CJCO)"""),
)
species(
label = 'C=C(OO)OC[CH]CC(1245)',
structure = SMILES('C=C(OO)OC[CH]CC'),
E0 = (-65.8073,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.458993,0.0997742,-9.70447e-05,5.08763e-08,-1.0974e-11,-7755.61,35.5115], Tmin=(100,'K'), Tmax=(1102.6,'K')), NASAPolynomial(coeffs=[15.3874,0.0422861,-1.88359e-05,3.58826e-09,-2.51916e-13,-11250,-42.486], Tmin=(1102.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-65.8073,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(CCJCO)"""),
)
species(
label = 'CC[CH]C[O](457)',
structure = SMILES('CC[CH]C[O]'),
E0 = (121.476,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,332.604,2260.78,2262.04],'cm^-1')),
HinderedRotor(inertia=(0.131194,'amu*angstrom^2'), symmetry=1, barrier=(10.4348,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00287841,'amu*angstrom^2'), symmetry=1, barrier=(10.4364,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.392513,'amu*angstrom^2'), symmetry=1, barrier=(31.348,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (72.1057,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42933,0.0366362,-1.55369e-05,2.23845e-09,-2.94399e-15,14664.7,21.3149], Tmin=(100,'K'), Tmax=(2038.94,'K')), NASAPolynomial(coeffs=[13.569,0.0210029,-8.61212e-06,1.4706e-09,-9.22625e-14,8829.08,-43.5346], Tmin=(2038.94,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(121.476,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCJCO)"""),
)
species(
label = '[CH2][C]OO(1383)',
structure = SMILES('[CH2][C]OO'),
E0 = (489.885,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,256.496],'cm^-1')),
HinderedRotor(inertia=(0.262911,'amu*angstrom^2'), symmetry=1, barrier=(12.0977,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.484979,'amu*angstrom^2'), symmetry=1, barrier=(23.1063,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.62339,'amu*angstrom^2'), symmetry=1, barrier=(28.7098,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (59.044,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.22612,0.0405311,-5.37167e-05,3.54818e-08,-9.20642e-12,58982.2,15.6805], Tmin=(100,'K'), Tmax=(944.91,'K')), NASAPolynomial(coeffs=[9.51151,0.00969003,-4.75723e-06,9.38608e-10,-6.69851e-14,57605.4,-19.0543], Tmin=(944.91,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(489.885,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(145.503,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CH2_triplet) + radical(CJCOOH)"""),
)
species(
label = '[O]O(16)',
structure = SMILES('[O]O'),
E0 = (-8.19602,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1036.72,2034.11,2034.11],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (33.0067,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(892.977,'J/mol'), sigma=(3.458,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.04595,-0.00173474,1.0377e-05,-1.02207e-08,3.3493e-12,-986.755,4.63579], Tmin=(100,'K'), Tmax=(932.129,'K')), NASAPolynomial(coeffs=[3.21022,0.00367946,-1.27704e-06,2.18051e-10,-1.46343e-14,-910.359,8.18305], Tmin=(932.129,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.19602,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsH) + group(O2s-OsH) + radical(HOOJ)"""),
)
species(
label = '[CH2][C]OC[CH]CC(896)',
structure = SMILES('[CH2][C]OC[CH]CC'),
E0 = (551.546,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (98.143,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.47464,0.0804128,-7.90731e-05,4.30897e-08,-9.78417e-12,66460.3,30.0008], Tmin=(100,'K'), Tmax=(1044.01,'K')), NASAPolynomial(coeffs=[11.8354,0.0368858,-1.65355e-05,3.15594e-09,-2.21674e-13,64088.1,-25.2978], Tmin=(1044.01,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(551.546,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(382.466,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CJCO) + radical(CH2_triplet) + radical(CCJCO)"""),
)
species(
label = 'C[CH2](6)',
structure = SMILES('C[CH2]'),
E0 = (108.526,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,474.132,1048.55,2319.88,2320.55,2321.73],'cm^-1')),
HinderedRotor(inertia=(0.000749852,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (29.0611,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2097.75,'J/mol'), sigma=(4.302,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82183,-0.00343357,5.09256e-05,-6.2021e-08,2.37073e-11,13066,7.61644], Tmin=(100,'K'), Tmax=(900.314,'K')), NASAPolynomial(coeffs=[5.15622,0.00943121,-1.81945e-06,2.21194e-10,-1.4348e-14,12064.1,-2.91103], Tmin=(900.314,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(108.526,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJ)"""),
)
species(
label = '[CH]CO[C]([CH2])OO(1385)',
structure = SMILES('[CH]CO[C]([CH2])OO'),
E0 = (465.325,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.481228,0.0835298,-0.000117059,8.95236e-08,-2.77531e-11,56086.8,31.2368], Tmin=(100,'K'), Tmax=(785.8,'K')), NASAPolynomial(coeffs=[10.9817,0.0300763,-1.5018e-05,2.94897e-09,-2.0839e-13,54436.6,-16.8904], Tmin=(785.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(465.325,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(282.692,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CJCOOH) + radical(CCJ2_triplet)"""),
)
species(
label = '[CH]CC(45)',
structure = SMILES('[CH]CC'),
E0 = (327.691,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,319.066,319.142,1779.44],'cm^-1')),
HinderedRotor(inertia=(0.00165544,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00165526,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (42.0797,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.00517,0.0155111,1.85851e-05,-3.16862e-08,1.23732e-11,39453.6,11.9344], Tmin=(100,'K'), Tmax=(982.292,'K')), NASAPolynomial(coeffs=[6.73204,0.0159276,-5.86166e-06,1.06538e-09,-7.51285e-14,37969.2,-9.8082], Tmin=(982.292,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(327.691,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJ2_triplet)"""),
)
species(
label = '[CH2]O[C]([CH2])OO(1384)',
structure = SMILES('[CH2]O[C]([CH2])OO'),
E0 = (242.529,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,360,370,350,287.045,288.106],'cm^-1')),
HinderedRotor(inertia=(0.148269,'amu*angstrom^2'), symmetry=1, barrier=(8.7013,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.564521,'amu*angstrom^2'), symmetry=1, barrier=(32.7696,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.000101649,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.149117,'amu*angstrom^2'), symmetry=1, barrier=(8.68842,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0278713,'amu*angstrom^2'), symmetry=1, barrier=(32.77,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (89.07,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.28011,0.0645131,-8.51934e-05,6.1044e-08,-1.77929e-11,29263.2,26.3264], Tmin=(100,'K'), Tmax=(832.472,'K')), NASAPolynomial(coeffs=[9.74505,0.0238394,-1.19053e-05,2.35308e-09,-1.67469e-13,27853.9,-12.96], Tmin=(832.472,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.529,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(236.962,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsHHH) + group(Cs-OsHHH) + radical(Cs_P) + radical(CsJOCH3) + radical(CJCOOH)"""),
)
species(
label = '[CH2][C](OO)OC[C]CC(1694)',
structure = SMILES('[CH2][C](OO)OC[C]CC'),
E0 = (417.765,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.726276,0.111956,-0.000140173,9.8933e-08,-2.88358e-11,50408.6,38.9726], Tmin=(100,'K'), Tmax=(828.151,'K')), NASAPolynomial(coeffs=[12.6901,0.0471551,-2.28047e-05,4.45296e-09,-3.15053e-13,48186.3,-23.2243], Tmin=(828.151,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(417.765,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CJCOOH) + radical(Cs_P) + radical(CCJ2_triplet)"""),
)
species(
label = '[CH][C](OO)OC[CH]CC(1695)',
structure = SMILES('[CH][C](OO)OC[CH]CC'),
E0 = (403.704,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,200,800,933.333,1066.67,1200,1333.33,1466.67,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (130.142,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.355131,0.102704,-0.000113726,7.06675e-08,-1.83534e-11,48705.4,39.2411], Tmin=(100,'K'), Tmax=(918.424,'K')), NASAPolynomial(coeffs=[12.4835,0.0467901,-2.24098e-05,4.38535e-09,-3.11717e-13,46347,-21.6058], Tmin=(918.424,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(403.704,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJCO) + radical(CCJ2_triplet) + radical(Cs_P)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.64289,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.53101,-0.000123661,-5.02999e-07,2.43531e-09,-1.40881e-12,-1046.98,2.96747], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.95258,0.0013969,-4.92632e-07,7.8601e-11,-4.60755e-15,-923.949,5.87189], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-8.64289,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'He',
structure = SMILES('[He]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (4.0026,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(84.8076,'J/mol'), sigma=(2.576,'angstroms'), dipoleMoment=(0,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""NOx2018"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""He""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ar',
structure = SMILES('[Ar]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (169.452,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (308.18,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (264.879,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (169.452,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (285.941,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (169.452,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (328.444,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (278.236,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (334.233,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (304.623,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (321.654,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (300.577,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (253.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (212.702,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (212.128,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (244.544,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (230.245,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (232.63,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (515.006,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (349.777,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (540.356,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (533.86,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (575.703,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (561.712,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (586.503,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (232.852,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (177.82,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (612.323,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (415.052,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (177.736,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (264.138,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS32',
E0 = (169.452,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS33',
E0 = (616.527,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS34',
E0 = (548.517,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS35',
E0 = (608.167,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS36',
E0 = (604.536,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS37',
E0 = (629.569,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS38',
E0 = (615.509,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['C=CCC(36)', '[CH2]C(=O)OO(1167)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH2][C](OO)OCC=CC(1671)', 'H(8)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(1.46e+08,'cm^3/(mol*s)'), n=1.64, Ea=(5.73208,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2555 used for Cds-CsH_Cds-CsH;HJ
Exact match found for rate rule [Cds-CsH_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction3',
reactants = ['[CH2][C](OO)OC=CCC(1672)', 'H(8)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(2.182e+10,'cm^3/(mol*s)'), n=0.859, Ea=(6.76971,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 2821 used for Cds-OsH_Cds-CsH;HJ
Exact match found for rate rule [Cds-OsH_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]C(=O)OO(1167)', '[CH2][CH]CC(39)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(1.81675,'m^3/(mol*s)'), n=2.00263, Ea=(148.227,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R_R;CJ] + [Od_R;YJ] for rate rule [Od_R;CJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond
Ea raised from 146.6 to 148.2 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction5',
reactants = ['[CH3](11)', '[CH2][C](OO)OCC=C(1507)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(532000,'cm^3/(mol*s)'), n=1.85, Ea=(23.8906,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 2779 used for Cds-HH_Cds-Cs\O2s/H;CsJ-HHH
Exact match found for rate rule [Cds-HH_Cds-Cs\O2s/H;CsJ-HHH]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['OH(D)(132)', '[CH2]C(=O)OC[CH]CC(1673)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.12189e+07,'m^3/(mol*s)'), n=-0.377333, Ea=(240.614,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R_R;OJ_pri] for rate rule [Od_R;OJ_pri]
Euclidian distance = 1.0
family: R_Addition_MultipleBond
Ea raised from 238.0 to 240.6 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction7',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2][C](OO)OCC[CH]C(1674)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(6.76e+09,'s^-1'), n=0.88, Ea=(158.992,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 357 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2][C](OO)O[CH]CCC(1634)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(5.4e-20,'s^-1'), n=9.13, Ea=(108.784,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 341 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeO
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeO]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C(OO)O[CH][CH]CC(1675)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(5.8344e+08,'s^-1'), n=1.32036, Ea=(164.782,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_O;Y_rad_out;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C(O[O])OC[CH]CC(1676)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(40813.3,'s^-1'), n=2.17068, Ea=(135.172,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3H_SS;Y_rad_out;O_H_out] + [R3H_SS_O;Y_rad_out;XH_out] for rate rule [R3H_SS_O;Y_rad_out;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]CCCO[C]([CH2])OO(1677)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(1.18e+10,'s^-1'), n=0.82, Ea=(146.858,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 186 used for R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['CC[CH][CH]O[C](C)OO(1678)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(869.832,'s^-1'), n=2.67444, Ea=(131.125,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4Hall;C_rad_out_2H;XH_out] for rate rule [R4HJ_1;C_rad_out_2H;XH_out]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['CC[CH]CO[C](C)O[O](1679)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(8.6e-09,'s^-1'), n=5.55, Ea=(83.68,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4Hall;C_rad_out_2H;O_H_out] for rate rule [R4HJ_1;C_rad_out_2H;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C(OO)OC[CH][CH]C(1680)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(379583,'s^-1'), n=1.54051, Ea=(43.2505,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;Y_rad_out;Cs_H_out_H/NonDeC] for rate rule [R5HJ_3;Y_rad_out;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['C[CH][CH]CO[C](C)OO(1681)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(25800,'s^-1'), n=1.67, Ea=(42.6768,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6Hall;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C[CH]COC([CH2])OO(1682)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(3.31883e+06,'s^-1'), n=1.02765, Ea=(75.0925,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6Hall;Y_rad_out;Cs_H_out_2H] for rate rule [R6HJ_3;Y_rad_out;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2][C](O[O])OCCCC(1683)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(46.1,'s^-1'), n=3.21, Ea=(60.7935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6Hall;C_rad_out_H/NonDeC;XH_out] for rate rule [R6HJ_3;C_rad_out_H/NonDeC;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C[CH]CO[C](C)OO(1684)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(64.2,'s^-1'), n=2.1, Ea=(63.1784,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R7Hall;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2][C]([O])OO(1352)', '[CH2][CH]CC(39)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.9789e+07,'m^3/(mol*s)'), n=-0.126319, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -15.6 to -15.6 kJ/mol.
Ea raised from -15.6 to 0 kJ/mol."""),
)
reaction(
label = 'reaction20',
reactants = ['OH(D)(132)', '[CH2][C]([O])OC[CH]CC(1685)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(3.05166e+07,'m^3/(mol*s)'), n=0.045, Ea=(0.1046,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [O_pri_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction21',
reactants = ['H(8)', '[CH2][C](O[O])OC[CH]CC(1686)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(5.00518e+06,'m^3/(mol*s)'), n=0.282325, Ea=(7.09479,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Y_rad;O_rad/NonDe] + [H_rad;O_sec_rad] for rate rule [H_rad;O_rad/NonDe]
Euclidian distance = 1.0
family: R_Recombination"""),
)
reaction(
label = 'reaction22',
reactants = ['[CH3](11)', '[CH2][CH]CO[C]([CH2])OO(1520)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(1.66881e+08,'m^3/(mol*s)'), n=-0.401267, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;C_methyl]
Euclidian distance = 0
family: R_Recombination
Ea raised from -6.7 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['H(8)', '[CH2][C](OO)OC[CH][CH]C(1687)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(4.34078e+06,'m^3/(mol*s)'), n=0.278577, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction24',
reactants = ['H(8)', '[CH2][C](OO)O[CH][CH]CC(1688)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(4.34078e+06,'m^3/(mol*s)'), n=0.278577, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction25',
reactants = ['[CH2]C[CH]CO[C]([CH2])OO(1689)', 'H(8)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(3.48677e-12,'cm^3/(molecule*s)'), n=0.6, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 18 used for C_rad/H2/Cs;H_rad
Exact match found for rate rule [C_rad/H2/Cs;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -3.3 to 0 kJ/mol."""),
)
reaction(
label = 'reaction26',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C(OO)OC=CCC(1690)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 1 used for R3radExo;Y_rad_NDe;XH_Rrad_NDe
Exact match found for rate rule [R3radExo;Y_rad_NDe;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction27',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C(OO)OCC=CC(1691)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(6.42e+09,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction28',
reactants = ['CH2(S)(14)', '[CH2][C](OO)OC[CH]C(1225)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(215646,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [carbene;R_H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: 1,2_Insertion_carbene
Ea raised from -5.1 to 0 kJ/mol."""),
)
reaction(
label = 'reaction29',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2][C](OO)OCC([CH2])C(1692)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(3.53e+06,'s^-1'), n=1.73, Ea=(245.601,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HH)CJ;CsJ;CH3] for rate rule [cCs(-HH)CJ;CsJ-CsH;CH3]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction30',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C1(OO)OCC1CC(1243)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;Y_rad_out;Cpri_rad_out_single] for rate rule [R4_SSS;Y_rad_out;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction31',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['[CH2]C([O])(O)OC[CH]CC(1693)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(4.72906e+10,'s^-1'), n=0, Ea=(94.6862,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnOOH;Y_rad_out] for rate rule [ROOH;Y_rad_out]
Euclidian distance = 1.0
family: intra_OH_migration"""),
)
reaction(
label = 'reaction32',
reactants = ['[CH2][C](OO)OC[CH]CC(1239)'],
products = ['C=C(OO)OC[CH]CC(1245)'],
transitionState = 'TS32',
kinetics = Arrhenius(A=(3.98e+07,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [Y_12_20] for rate rule [Y_12_20a]
Euclidian distance = 1.0
family: 1,2-Birad_to_alkene"""),
)
reaction(
label = 'reaction33',
reactants = ['CC[CH]C[O](457)', '[CH2][C]OO(1383)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS33',
kinetics = Arrhenius(A=(43.5839,'m^3/(mol*s)'), n=1.88017, Ea=(5.1666,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction34',
reactants = ['[O]O(16)', '[CH2][C]OC[CH]CC(896)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS34',
kinetics = Arrhenius(A=(43.5839,'m^3/(mol*s)'), n=1.88017, Ea=(5.1666,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction35',
reactants = ['C[CH2](6)', '[CH]CO[C]([CH2])OO(1385)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS35',
kinetics = Arrhenius(A=(1.14854e+06,'m^3/(mol*s)'), n=0.575199, Ea=(34.3157,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/Cs;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction36',
reactants = ['[CH]CC(45)', '[CH2]O[C]([CH2])OO(1384)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS36',
kinetics = Arrhenius(A=(1.14854e+06,'m^3/(mol*s)'), n=0.575199, Ea=(34.3157,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/O;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction37',
reactants = ['[CH2][C](OO)OC[C]CC(1694)', 'H(8)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS37',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction38',
reactants = ['[CH][C](OO)OC[CH]CC(1695)', 'H(8)'],
products = ['[CH2][C](OO)OC[CH]CC(1239)'],
transitionState = 'TS38',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
network(
label = '509',
isomers = [
'[CH2][C](OO)OC[CH]CC(1239)',
],
reactants = [
('C=CCC(36)', '[CH2]C(=O)OO(1167)'),
],
bathGas = {
'N2': 0.25,
'Ne': 0.25,
'He': 0.25,
'Ar': 0.25,
},
)
pressureDependence(
label = '509',
Tmin = (1200,'K'),
Tmax = (1500,'K'),
Tcount = 10,
Tlist = ([1201.48,1213.22,1236.21,1269.31,1310.55,1356.92,1404.16,1447.02,1479.84,1497.7],'K'),
Pmin = (1,'atm'),
Pmax = (10,'atm'),
Pcount = 10,
Plist = ([1.02771,1.14872,1.41959,1.89986,2.67608,3.83649,5.40396,7.23219,8.93758,9.98989],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
] | |
0296081c7b5ffd91876af93bf00f5ccdda287dd6 | 292cec77b5003a2f80360d0aee77556d12d990f7 | /src/bentoml_cli/worker/grpc_api_server.py | 87a8e336eb31fd6ceeb1eba0e4e6b26cc1e24fd8 | [
"Apache-2.0"
] | permissive | yubozhao/BentoML | 194a6ec804cc1c6dbe7930c49948b6707cbc3c5f | d4bb5cbb90f9a8ad162a417103433b9c33b39c84 | refs/heads/master | 2022-12-17T00:18:55.555897 | 2022-12-06T00:11:39 | 2022-12-06T00:11:39 | 178,978,385 | 3 | 0 | Apache-2.0 | 2020-12-01T18:17:15 | 2019-04-02T01:53:53 | Python | UTF-8 | Python | false | false | 4,379 | py | from __future__ import annotations
import json
import typing as t
import click
@click.command()
@click.argument("bento_identifier", type=click.STRING, required=False, default=".")
@click.option("--host", type=click.STRING, required=False, default=None)
@click.option("--port", type=click.INT, required=False, default=None)
@click.option(
"--runner-map",
type=click.STRING,
envvar="BENTOML_RUNNER_MAP",
help="JSON string of runners map, default sets to envars `BENTOML_RUNNER_MAP`",
)
@click.option(
"--working-dir",
type=click.Path(exists=True),
help="Working directory for the API server",
)
@click.option(
"--prometheus-dir",
type=click.Path(exists=True),
help="Required by prometheus to pass the metrics in multi-process mode",
)
@click.option(
"--worker-id",
required=False,
type=click.INT,
default=None,
help="If set, start the server as a bare worker with the given worker ID. Otherwise start a standalone server with a supervisor process.",
)
@click.option(
"--enable-reflection",
type=click.BOOL,
is_flag=True,
help="Enable reflection.",
)
@click.option(
"--enable-channelz",
type=click.BOOL,
is_flag=True,
help="Enable channelz.",
default=False,
)
@click.option(
"--max-concurrent-streams",
type=click.INT,
help="Maximum number of concurrent incoming streams to allow on a HTTP2 connection.",
default=None,
)
@click.option(
"--ssl-certfile",
type=str,
default=None,
help="SSL certificate file",
)
@click.option(
"--ssl-keyfile",
type=str,
default=None,
help="SSL key file",
)
@click.option(
"--ssl-ca-certs",
type=str,
default=None,
help="CA certificates file",
)
def main(
bento_identifier: str,
host: str,
port: int,
prometheus_dir: str | None,
runner_map: str | None,
working_dir: str | None,
worker_id: int | None,
enable_reflection: bool,
enable_channelz: bool,
max_concurrent_streams: int | None,
ssl_certfile: str | None,
ssl_keyfile: str | None,
ssl_ca_certs: str | None,
):
"""
Start BentoML API server.
\b
This is an internal API, users should not use this directly. Instead use `bentoml serve-grpc <path> [--options]`
"""
import bentoml
from bentoml._internal.log import configure_server_logging
from bentoml._internal.context import component_context
from bentoml._internal.configuration.containers import BentoMLContainer
component_context.component_type = "grpc_api_server"
component_context.component_index = worker_id
configure_server_logging()
if worker_id is None:
# worker ID is not set; this server is running in standalone mode
# and should not be concerned with the status of its runners
BentoMLContainer.config.runner_probe.enabled.set(False)
BentoMLContainer.development_mode.set(False)
if prometheus_dir is not None:
BentoMLContainer.prometheus_multiproc_dir.set(prometheus_dir)
if runner_map is not None:
BentoMLContainer.remote_runner_mapping.set(json.loads(runner_map))
svc = bentoml.load(bento_identifier, working_dir=working_dir, standalone_load=True)
if not port:
port = BentoMLContainer.grpc.port.get()
if not host:
host = BentoMLContainer.grpc.host.get()
# setup context
component_context.component_name = svc.name
if svc.tag is None:
component_context.bento_name = svc.name
component_context.bento_version = "not available"
else:
component_context.bento_name = svc.tag.name
component_context.bento_version = svc.tag.version or "not available"
from bentoml._internal.server import grpc
grpc_options: dict[str, t.Any] = {
"bind_address": f"{host}:{port}",
"enable_reflection": enable_reflection,
"enable_channelz": enable_channelz,
}
if max_concurrent_streams:
grpc_options["max_concurrent_streams"] = int(max_concurrent_streams)
if ssl_certfile:
grpc_options["ssl_certfile"] = ssl_certfile
if ssl_keyfile:
grpc_options["ssl_keyfile"] = ssl_keyfile
if ssl_ca_certs:
grpc_options["ssl_ca_certs"] = ssl_ca_certs
grpc.Server(svc.grpc_servicer, **grpc_options).run()
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| [
"[email protected]"
] | |
77a1e59545434c658624ddc110f40d384f6e9774 | b31e7898aa5131125f243eaff973049b17e08512 | /.venv/bin/coverage | 85f14e7690c8b0e6cbea6f2ec81bf93f8cf98538 | [] | no_license | ramsred/MyProjects | f2978eeda3d73421daf0da9f2d012caef6c3ccda | a7f90ef1ecfbc7517be61e71286bd14405985de5 | refs/heads/master | 2023-07-09T03:19:17.683705 | 2023-07-02T19:30:19 | 2023-07-02T19:30:19 | 71,980,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | #!/workspaces/MyProjects/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
7cb73ceefb5f84a42e46e124b96ab48221531a18 | 368ef102345a6575d97727b314addcd65f150e19 | /Modules/Crawler/Test/test_urlencode.py | 1551c2f5ebf5acfefcf1449c5331ebfdb14c4aa6 | [] | no_license | newstar123/Pythonlab_DEV | 844b43b6a8aef977515e367bd367e658e37cfa19 | cdb2f0c2369291d6bccd597c33354830e5606eab | refs/heads/master | 2020-04-07T18:08:06.631487 | 2018-11-21T19:47:54 | 2018-11-21T19:47:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | # -*- coding: utf-8 -*-
#---------------------------------------
# ็จๅบ๏ผtest_urlencode.py
# ็ๆฌ๏ผ0.1
# ไฝ่
๏ผctang
# ๆฅๆ๏ผ2016-02-19
# ่ฏญ่จ๏ผPython 2.7.10
# ่ฏดๆ๏ผๆต่ฏurlencodeๆนๆณ
#---------------------------------------
import urllib
import urllib2
data = {}
data['word'] = 'python'
url_value = urllib.urlencode(data)
url = "http://www.baidu.com/s?"
full_url = url + url_value
print full_url
page = urllib2.urlopen(full_url).read()
result = page.decode('utf-8')
print result
| [
"[email protected]"
] | |
fd829064e4909b62f86477e660a7cd3ab16a8829 | 20c9f3a089286a442cc15f8a31bb34e110e68d8b | /tests/python/setitem.py | 3c7a408d59d93ef0b1725084817a4cd7ce5274e2 | [
"MIT"
] | permissive | denim2x/py2nim | 00ca515daef897d380dbf4915583a470ffe4c94e | 56fc2699d31241c60bed726f59efea4bf46be238 | refs/heads/master | 2021-09-28T06:37:42.786868 | 2018-11-15T08:12:30 | 2018-11-15T08:12:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | class A:
def __setitem__(self, e, f):
print(e + f)
a = A()
a[2] = 8
| [
"[email protected]"
] | |
37fd9413602461c02c3a941cb3de80c99a03bdcd | 5c238e795dd280f3ccd3d772d75ce8664c9e2887 | /ch09/improved_spark_mllib_model.py | 35b6016e18ca191a147eb20e1123bf01d321acef | [
"MIT"
] | permissive | wikibook/agile-data-science | d149878fff6da6cd8e71c579e0817af8ff4ff05b | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | refs/heads/master | 2020-03-11T06:07:21.118819 | 2018-04-17T00:25:27 | 2018-04-17T00:25:27 | 129,822,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,615 | py | # !/usr/bin/env python
import sys, os, re
import json
import datetime, iso8601
from tabulate import tabulate
# airflow์์ ๋ ์ง์ ๊ธฐ๋ณธ ๊ฒฝ๋ก๋ฅผ main()์ผ๋ก ์ ๋ฌ
def main(base_path):
APP_NAME = "train_spark_mllib_model.py"
# SparkSession์ด ์์ผ๋ฉด ํ๊ฒฝ ์์ฑ
try:
sc and spark
except NameError as e:
import findspark
findspark.init()
import pyspark
import pyspark.sql
sc = pyspark.SparkContext()
spark = pyspark.sql.SparkSession(sc).builder.appName(APP_NAME).getOrCreate()
#
# {
# "ArrDelay":5.0,"CRSArrTime":"2015-12-31T03:20:00.000-08:00","CRSDepTime":"2015-12-31T03:05:00.000-08:00",
# "Carrier":"WN","DayOfMonth":31,"DayOfWeek":4,"DayOfYear":365,"DepDelay":14.0,"Dest":"SAN","Distance":368.0,
# "FlightDate":"2015-12-30T16:00:00.000-08:00","FlightNum":"6109","Origin":"TUS"
# }
#
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, DateType, TimestampType
from pyspark.sql.types import StructType, StructField
from pyspark.sql.functions import udf
schema = StructType([
StructField("ArrDelay", DoubleType(), True), # "ArrDelay":5.0
StructField("CRSArrTime", TimestampType(), True), # "CRSArrTime":"2015-12-31T03:20:00.000-08:00"
StructField("CRSDepTime", TimestampType(), True), # "CRSDepTime":"2015-12-31T03:05:00.000-08:00"
StructField("Carrier", StringType(), True), # "Carrier":"WN"
StructField("DayOfMonth", IntegerType(), True), # "DayOfMonth":31
StructField("DayOfWeek", IntegerType(), True), # "DayOfWeek":4
StructField("DayOfYear", IntegerType(), True), # "DayOfYear":365
StructField("DepDelay", DoubleType(), True), # "DepDelay":14.0
StructField("Dest", StringType(), True), # "Dest":"SAN"
StructField("Distance", DoubleType(), True), # "Distance":368.0
StructField("FlightDate", DateType(), True), # "FlightDate":"2015-12-30T16:00:00.000-08:00"
StructField("FlightNum", StringType(), True), # "FlightNum":"6109"
StructField("Origin", StringType(), True), # "Origin":"TUS"
])
input_path = "{}/data/simple_flight_delay_features.json".format(
base_path
)
features = spark.read.json(input_path, schema=schema)
features.first()
#
# FlightNum์ ๋์ฒดํ Route ๋ณ์ ์ถ๊ฐ
#
from pyspark.sql.functions import lit, concat
features_with_route = features.withColumn(
'Route',
concat(
features.Origin,
lit('-'),
features.Dest
)
)
features_with_route.show(6)
#
# ์์ ๋ ๋์ฐฉ/์ถ๋ฐ ์๊ฐ ์ถ๊ฐ
#
from pyspark.sql.functions import hour
features_with_hour = features_with_route.withColumn(
"CRSDepHourOfDay",
hour(features.CRSDepTime)
)
features_with_hour = features_with_hour.withColumn(
"CRSArrHourOfDay",
hour(features.CRSArrTime)
)
features_with_hour.select("CRSDepTime", "CRSDepHourOfDay", "CRSArrTime", "CRSArrHourOfDay").show()
#
# pysmark.ml.feature.Bucketizer๋ฅผ ์ฌ์ฉํด์ ArrDelay๋ฅผ on-time, slightly late, very late (0, 1, 2)์ผ๋ก ๊ตฌ๊ฐํ
#
from pyspark.ml.feature import Bucketizer
# ๊ตฌ๊ฐํ ๋ชจ๋ธ ์ค์
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# ๋ชจ๋ธ ์ ์ฅ
arrival_bucketizer_path = "{}/models/arrival_bucketizer_2.0.bin".format(base_path)
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# ๋ชจ๋ธ ์ ์ฉ
ml_bucketized_features = arrival_bucketizer.transform(features_with_hour)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show()
#
# pyspark.ml.feature์ ํน์ง ๋๊ตฌ ์ํฌํธ
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# ๋ฒ์ฃผ ํ๋๋ฅผ ์ธ๋ฑ์ค๋ก ์ ํ
for column in ["Carrier", "Origin", "Dest", "Route"]:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# ํ์ดํ๋ผ์ธ ๋ชจ๋ธ ์ ์ฅ
string_indexer_output_path = "{}/models/string_indexer_model_3.0.{}.bin".format(
base_path,
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
# ์ฐ์ํ ์์น ํ๋๋ฅผ ๋ช
๋ชฉํ ํ๋์ ์ธ๋ฑ์ค์ ๊ฒฐํฉํด์ ํ๋์ ํน์ง ๋ฒกํฐ๋ก ๋ง๋ฆ
numeric_columns = [
"DepDelay", "Distance",
"DayOfMonth", "DayOfWeek",
"DayOfYear", "CRSDepHourOfDay",
"CRSArrHourOfDay"]
index_columns = ["Carrier_index", "Origin_index",
"Dest_index", "Route_index"]
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# ์์น ๋ฒกํฐ ์ด์
๋ธ๋ฌ๋ฅผ ์ ์ฅ
vector_assembler_path = "{}/models/numeric_vector_assembler_3.0.bin".format(base_path)
vector_assembler.write().overwrite().save(vector_assembler_path)
# ์ธ๋ฑ์ค ์ด ์ ๊ฑฐ
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# ํ์ ๋ ํน์ง์ ๊ฒ์ฌ
final_vectorized_features.show()
#
# ๋ถ๋ฅ ๋ชจ๋ธ์ ๊ต์ฐจ ๊ฒ์ฆ, ํ๋ จ, ํ๊ฐ: 4๊ฐ์ ์งํ์ ๋ํด 5ํ ๋ฐ๋ณต
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print("\nRun {} out of {} of test/train splits in cross validation...".format(
i,
split_count,
)
)
# ํ๋ จ/ํ
์คํธ ๋ฐ์ดํฐ ๋ถํ
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# ์ ์ฒด ๋ฐ์ดํฐ์ ๋ํด ๋๋ค ํฌ๋ ์คํธ ๋ถ๋ฅ ๋ชจ๋ธ์ ์ธ์คํด์คํํ๊ณ ์ ํฉ์ํด
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4657,
)
model = rfc.fit(training_data)
# ์์ ๋ชจ๋ธ ๋์ ์ ๋ชจ๋ธ์ ์ ์ฅ
model_output_path = "{}/models/spark_random_forest_classifier.flight_delays.baseline.bin".format(
base_path
)
model.write().overwrite().save(model_output_path)
# ํ
์คํธ ๋ฐ์ดํฐ๋ก ๋ชจ๋ธ์ ํ๊ฐ
predictions = model.transform(test_data)
# ์ด ํ
์คํธ/ํ๋ จ ๋ฐ์ดํฐ ๋ถํ ์ ๊ฒฐ๊ณผ๋ฅผ ๊ฐ ์งํ๋ณ๋กํ๊ฐ
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print("{} = {}".format(metric_name, score))
#
# ํน์ง ์ค์๋ ์์ง
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
#
# ์งํ๋ณ ํ๊ท ๊ณผ ํ์คํธ์ฐจ ํ๊ฐ ๋ฐ ํ๋ก ์ถ๋ ฅ
#
import numpy as np
score_averages = defaultdict(float)
# ํ ๋ฐ์ดํฐ ๊ณ์ฐ
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# ํ ์ถ๋ ฅ
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
#
# ์ ์๋ฅผ ์คํ ์ฌ์ด์ ์กด์ฌํ๋ ์ ์ ๋ก๊ทธ์ ์ ์ง
#
import pickle
# ์ ์ ๋ก๊ทธ๋ฅผ ์ ์ฌํ๊ฑฐ๋ ๋น ๋ก๊ทธ๋ฅผ ์ด๊ธฐํ
try:
score_log_filename = "{}/models/score_log.pickle".format(base_path)
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# ๊ธฐ์กด ์ ์ ๋ก๊ทธ ๊ณ์ฐ
score_log_entry = {metric_name: score_averages[metric_name] for metric_name in metric_names}
# ๊ฐ ์งํ์ ๋ํ ์ ์ ๋ณํ๋ฅผ ๊ณ์ฐํ๊ณ ๋์คํ๋ ์ด
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# ๊ธฐ์กด ํ๊ท ์ ์๋ฅผ ๋ก๊ทธ์ ์ถ๊ฐ
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
#
# ํน์ง ์ค์๋ ๋ณํ๋ฅผ ๋ถ์ํ๊ณ ๋ณด๊ณ
#
# ๊ฐ ํน์ง์ ๋ํ ํ๊ท ๊ณ์ฐ
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# ํน์ง ์ค์๋๋ฅผ ๋ด๋ฆผ์ฐจ์์ผ๋ก ์ ๋ ฌํ๊ณ ์ถ๋ ฅ
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
#
# ์ด๋ฒ ์คํ ๊ฒฐ๊ณผ์ธ ํน์ง ์ค์๋๋ฅผ ์ด์ ์คํ ๊ฒฐ๊ณผ์ ๋น๊ต
#
# ํน์ง ์ค์๋ ๋ก๊ทธ๋ฅผ ์ ์ฌํ๊ฑฐ๋ ๋น ๋ก๊ทธ๋ฅผ ์ด๊ธฐํ
try:
feature_log_filename = "{}/models/feature_log.pickle".format(base_path)
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# ๊ฐ ํน์ง์ ๋ํ ์ ์ ๋ณํ๋ฅผ ๊ณ์ฐํ๊ณ ๋์คํ๋ ์ด
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
# ๋ณ๋ ๊ฐ ๊ณ์ฐ
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
# ํน์ง ๋ณ๋ ๊ฐ์ ์ ๋ ฌํด ๊ฐ์ฅ ํฐ ๋ณ๋์ด ์๋ ํน์ง์ ๋จผ์ ๋์ค๊ฒ ํ๋ค
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
# ์ ๋ ฌ๋ ํน์ง ๋ณ๋ ๊ฐ ๋์คํ๋ ์ด
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
# ๋ก๊ทธ์ ๊ธฐ์กด ํ๊ท ๋ณ๋ ๊ฐ์ ์ถ๊ฐ
feature_log.append(feature_importance_entry)
# ๋ค์ ์คํ์ ์ํด ๋ก๊ทธ ์ ์ง
pickle.dump(feature_log, open(feature_log_filename, "wb"))
if __name__ == "__main__":
main(sys.argv[1])
| [
"[email protected]"
] | |
8cbb4e9c73c074d346ac82b353623f3c95b574d0 | 37c6021f06d5d5aca2693b12449aab483d123669 | /backend/task_profile/migrations/0001_initial.py | 37879e374141ed157d4e079eb19ba798a8d6f010 | [] | no_license | crowdbotics-apps/den-sc-ity-21727 | 8165971a3611a143b2ba2a5bf865375a10cb7744 | 13790db00a09483f4a23df177c544b6d07acd2c3 | refs/heads/master | 2022-12-31T04:32:18.963986 | 2020-10-19T21:46:40 | 2020-10-19T21:46:40 | 305,520,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | # Generated by Django 2.2.16 on 2020-10-19 21:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TaskerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('vehicle', models.CharField(blank=True, max_length=50, null=True)),
('closing_message', models.TextField(blank=True, null=True)),
('work_area_radius', models.FloatField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ManyToManyField(related_name='notification_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitecode_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
410d4bb5e5e781c634ccb36147315481554ca815 | 542d6c7a1303916a60bf0d7d24c8499a02f961c1 | /lib/python3.7/site-packages/azure/mgmt/recoveryservicesbackup/models/mab_file_folder_protected_item.py | f04f303870c1f5e21e635c8a9ec6e22c29c9d67c | [] | no_license | jim-minter/azhack | e9918a916d2b71f3adcc4f1ea208312ad9c59210 | 0e8631fd067014a9f3000101a886e7a9a94acc95 | refs/heads/master | 2020-05-07T19:50:43.980332 | 2019-04-11T15:37:43 | 2019-04-11T22:20:29 | 180,830,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,151 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protected_item import ProtectedItem
class MabFileFolderProtectedItem(ProtectedItem):
"""MAB workload-specific backup item.
:param backup_management_type: Type of backup managemenent for the backed
up item. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB', 'DPM',
'AzureBackupServer', 'AzureSql'
:type backup_management_type: str or :class:`BackupManagementType
<azure.mgmt.recoveryservicesbackup.models.BackupManagementType>`
:param workload_type: Type of workload this item represents. Possible
values include: 'Invalid', 'VM', 'FileFolder', 'AzureSqlDb', 'SQLDB',
'Exchange', 'Sharepoint', 'VMwareVM', 'SystemState', 'Client',
'GenericDataSource'
:type workload_type: str or :class:`DataSourceType
<azure.mgmt.recoveryservicesbackup.models.DataSourceType>`
:param container_name: Unique name of container
:type container_name: str
:param source_resource_id: ARM ID of the resource to be backed up.
:type source_resource_id: str
:param policy_id: ID of the backup policy with which this item is backed
up.
:type policy_id: str
:param last_recovery_point: Timestamp when the last (latest) backup copy
was created for this backup item.
:type last_recovery_point: datetime
:param protected_item_type: Polymorphic Discriminator
:type protected_item_type: str
:param friendly_name: Friendly name of this backup item.
:type friendly_name: str
:param computer_name: Name of the computer associated with this backup
item.
:type computer_name: str
:param last_backup_status: Status of last backup operation.
:type last_backup_status: str
:param protection_state: Protected, ProtectionStopped, IRPending or
ProtectionError
:type protection_state: str
:param is_scheduled_for_deferred_delete: Specifies if the item is
scheduled for deferred deletion.
:type is_scheduled_for_deferred_delete: bool
:param deferred_delete_sync_time_in_utc: Sync time for deferred deletion.
:type deferred_delete_sync_time_in_utc: long
:param extended_info: Additional information with this backup item.
:type extended_info: :class:`MabFileFolderProtectedItemExtendedInfo
<azure.mgmt.recoveryservicesbackup.models.MabFileFolderProtectedItemExtendedInfo>`
"""
_validation = {
'protected_item_type': {'required': True},
}
_attribute_map = {
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'workload_type': {'key': 'workloadType', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'policy_id': {'key': 'policyId', 'type': 'str'},
'last_recovery_point': {'key': 'lastRecoveryPoint', 'type': 'iso-8601'},
'protected_item_type': {'key': 'protectedItemType', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'computer_name': {'key': 'computerName', 'type': 'str'},
'last_backup_status': {'key': 'lastBackupStatus', 'type': 'str'},
'protection_state': {'key': 'protectionState', 'type': 'str'},
'is_scheduled_for_deferred_delete': {'key': 'isScheduledForDeferredDelete', 'type': 'bool'},
'deferred_delete_sync_time_in_utc': {'key': 'deferredDeleteSyncTimeInUTC', 'type': 'long'},
'extended_info': {'key': 'extendedInfo', 'type': 'MabFileFolderProtectedItemExtendedInfo'},
}
def __init__(self, backup_management_type=None, workload_type=None, container_name=None, source_resource_id=None, policy_id=None, last_recovery_point=None, friendly_name=None, computer_name=None, last_backup_status=None, protection_state=None, is_scheduled_for_deferred_delete=None, deferred_delete_sync_time_in_utc=None, extended_info=None):
super(MabFileFolderProtectedItem, self).__init__(backup_management_type=backup_management_type, workload_type=workload_type, container_name=container_name, source_resource_id=source_resource_id, policy_id=policy_id, last_recovery_point=last_recovery_point)
self.friendly_name = friendly_name
self.computer_name = computer_name
self.last_backup_status = last_backup_status
self.protection_state = protection_state
self.is_scheduled_for_deferred_delete = is_scheduled_for_deferred_delete
self.deferred_delete_sync_time_in_utc = deferred_delete_sync_time_in_utc
self.extended_info = extended_info
self.protected_item_type = 'MabFileFolderProtectedItem'
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.