blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27930989c4f710c63ba8fdbe07e1fe62f4767919 | b6ee71c540af500c2938e78f928bfd2d88585eea | /threestrandcode/apps/api/tests/assignments.py | b02c4b86cdd02ef374a26b38a4bd90e174f9b095 | [] | no_license | 3-strand-code/3sc-api | 114e1c633cbb026a200fc74a065db39072b63763 | 1e4c051758aab85f0f6b6efa6c3aa713f05eb75e | refs/heads/master | 2016-09-01T16:11:18.668787 | 2016-02-15T01:29:04 | 2016-02-15T01:29:04 | 48,534,175 | 0 | 0 | null | 2016-02-23T20:52:47 | 2015-12-24T08:41:32 | Python | UTF-8 | Python | false | false | 1,284 | py | import random
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from faker import Factory
from loremipsum import generate_paragraph
from model_mommy import mommy
from applicants.models import Applicant
from homework.models import Course, Recipe
from homework.recipes import MakeGHPage
class TestAssignments(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser(username="admin", password="test", email="[email protected]")
self.user = User.objects.create_user(username="test", password="test", email="[email protected]")
self.application = Applicant.objects.create(email="[email protected]", user=self.user)
self.course = mommy.make(Course)
self.make_gh_page_recipe = Recipe.objects.create(
creator=self.admin,
instructions='',
module=MakeGHPage.get_name(),
course=self.course,
)
def test_create_assignment_hooks_github_repo(self):
# create user and all that shit
# post to assignment endpoint
# it should call "create_hook" on that repo
self.client.login(username="test", password="test")
self.client.post()
pass
| [
"[email protected]"
] | |
4e2de6d4cb15b3eb73b16ff6453fac402218a793 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /dp_regression/experiment_test.py | a2e9967b9ad34b3318655ce57a3d11f4ad93f12c | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,307 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiment."""
from absl.testing import absltest
import numpy as np
from dp_regression import experiment
class ExperimentTest(absltest.TestCase):
def test_r_squared(self):
predictions = np.asarray([[3, -1, 2], [0, 2, 3]])
labels = np.asarray([1, 2, 3])
r_squared = experiment.r_squared(predictions, labels)
# residual_sum_squares:
# (3 - 1)^2 + (-1 - 2)^2 + (2 - 3)^2 = 14
# (0-1)^2 + (2-2)^2 + (3-3)^2 = 1
# total sum squares:
# (1 - 2)^2 + (2 - 2)^2 + (3 - 2)^2 = 2
# R^2:
# 1 - (14 / 2) = -6
# 1 - (1 / 2) = 0.5
np.testing.assert_array_almost_equal(r_squared, np.asarray([-6, 0.5]))
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
10e6a6e562dce5f1b086e9743cb47cf6e26114bc | a3812dc03ea9f818be7df41eec424cfb1a3fc5ce | /fluent_blogs/admin/abstractbase.py | af38b85f4b299bb9421402df4f4f2b2557a030d0 | [
"Apache-2.0"
] | permissive | msaelices/django-fluent-blogs | 76991e701c998594854be9e348b37ed927313b72 | c892c5acfd847e6a4b587a34edc55df29563225b | refs/heads/master | 2021-01-16T19:01:59.286552 | 2015-08-27T15:06:25 | 2015-08-27T15:06:25 | 41,442,271 | 0 | 1 | null | 2015-08-26T18:18:47 | 2015-08-26T18:18:47 | null | UTF-8 | Python | false | false | 10,455 | py | import django
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.widgets import AdminTextInputWidget, AdminTextareaWidget
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch
from django.utils.timezone import now
from django.utils.translation import ugettext, ugettext_lazy as _
from fluent_blogs import appsettings
from fluent_blogs.admin.forms import AbstractEntryBaseAdminForm, AbstractTranslatableEntryBaseAdminForm
from fluent_blogs.base_models import AbstractEntryBase
from fluent_blogs.models import get_entry_model
from fluent_utils.dry.admin import MultiSiteAdminMixin
from fluent_contents.admin import PlaceholderFieldAdmin
from parler.admin import TranslatableAdmin
from parler.models import TranslationDoesNotExist
EntryModel = get_entry_model()
class AbstractEntryBaseAdmin(MultiSiteAdminMixin, PlaceholderFieldAdmin):
"""
The base functionality of the admin, which only uses the fields of the
:class:`~fluent_blogs.base_models.AbstractEntryBase` model.
Everything else is branched off in the :class:`EntryAdmin` class.
"""
filter_site = appsettings.FLUENT_BLOGS_FILTER_SITE_ID
list_display = ('title', 'status_column', 'modification_date', 'actions_column')
list_filter = ('status',)
date_hierarchy = 'publication_date'
search_fields = ('slug', 'title')
actions = ['make_published']
form = AbstractEntryBaseAdminForm
prepopulated_fields = {'slug': ('title',),}
radio_fields = {
'status': admin.HORIZONTAL,
}
raw_id_fields = ('author',)
FIELDSET_GENERAL = (None, {
'fields': ('title', 'slug', 'status',),
})
FIELDSET_PUBLICATION = (_('Publication settings'), {
'fields': ('publication_date', 'publication_end_date', 'author'),
'classes': ('collapse',),
})
class Media:
css = {
'all': ('fluent_blogs/admin/admin.css',)
}
def get_readonly_fields(self, request, obj=None):
readonly_fields = list(super(AbstractEntryBaseAdmin, self).get_readonly_fields(request, obj))
if not request.user.is_superuser:
readonly_fields.append('author')
return readonly_fields
def save_model(self, request, obj, form, change):
# Automatically store the user in the author field.
if not obj.author_id:
obj.author = request.user
if not obj.publication_date:
# auto_now_add makes the field uneditable.
# default fills the field before the post is written (too early)
obj.publication_date = now()
obj.save()
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Allow formfield_overrides to contain field names too.
"""
overrides = self.formfield_overrides.get(db_field.name)
if overrides:
kwargs.update(overrides)
field = super(AbstractEntryBaseAdmin, self).formfield_for_dbfield(db_field, **kwargs)
# Pass user to the form.
if db_field.name == 'author':
field.user = kwargs['request'].user
return field
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# When the page is accessed via a pagetype, warn that the node can't be previewed yet.
context['preview_error'] = ''
if 'fluent_pages' in settings.INSTALLED_APPS:
from fluent_pages.urlresolvers import PageTypeNotMounted, MultipleReverseMatch
try:
self._reverse_blogpage_index(request, obj)
except PageTypeNotMounted:
from fluent_blogs.pagetypes.blogpage.models import BlogPage
context['preview_error'] = ugettext("The blog entry can't be previewed yet, a '{page_type_name}' page needs to be created first.").format(page_type_name=BlogPage._meta.verbose_name)
except MultipleReverseMatch:
# When 'entry_archive_index is ambiguous (because there are multiple blog nodes in the fluent-pages tree),
# the edit page will automatically pick an option.
pass
except NoReverseMatch:
# Since forgetting the pagetype app is easy, give off a warning to help developers
# find their way with these apps.
raise ImproperlyConfigured(
"To use django-fluent-blogs, either include('fluent_blogs.urls') in the URLConf, "
"or add the 'fluent_blogs.pagetypes.blogpage' app to the INSTALLED_APPS."
)
return super(AbstractEntryBaseAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _reverse_blogpage_index(self, request, obj=None):
# Internal method with "protected access" to handle translation differences.
# This is only called when 'fluent_pages' is in the INSTALLED_APPS.
from fluent_pages.urlresolvers import mixed_reverse
return mixed_reverse('entry_archive_index')
# ---- List code ----
STATUS_ICONS = (
(AbstractEntryBase.PUBLISHED, 'icon-yes.gif'),
(AbstractEntryBase.DRAFT, 'icon-unknown.gif'),
)
@classmethod
def get_status_column(cls, entry):
# Create a status column, is also reused by templatetags/fluent_blogs_admin_tags.py
status = entry.status
title = next(rec[1] for rec in AbstractEntryBase.STATUSES if rec[0] == status)
icon = next(rec[1] for rec in cls.STATUS_ICONS if rec[0] == status)
if django.VERSION >= (1, 4):
admin = settings.STATIC_URL + 'admin/img/'
else:
admin = settings.ADMIN_MEDIA_PREFIX + 'img/admin/'
return u'<img src="{admin}{icon}" width="10" height="10" alt="{title}" title="{title}" />'.format(admin=admin, icon=icon, title=title)
def status_column(self, entry):
# Method is needed because can't assign attributes to a class method
return self.get_status_column(entry)
status_column.allow_tags = True
status_column.short_description = _('Status')
@classmethod
def get_actions_column(cls, entry):
return u' '.join(cls._actions_column_icons(entry))
@classmethod
def _actions_column_icons(cls, entry):
actions = []
if cls.can_preview_object(entry):
try:
url = entry.get_absolute_url()
except (NoReverseMatch, TranslationDoesNotExist):
# A Blog Entry is already added, but the URL can no longer be resolved.
# This can either mean that urls.py is missing a 'fluent_blogs.urls' (unlikely),
# or that this is a PageTypeNotMounted exception because the "Blog page" node was removed.
# In the second case, the edit page should still be reachable, and the "view on site" link will give an alert.
pass
else:
actions.append(
u'<a href="{url}" title="{title}" target="_blank"><img src="{static}fluent_blogs/img/admin/world.gif" width="16" height="16" alt="{title}" /></a>'.format(
url=url, title=_('View on site'), static=settings.STATIC_URL)
)
return actions
@classmethod
def can_preview_object(cls, entry):
""" Override whether the node can be previewed. """
return hasattr(entry, 'get_absolute_url') and entry.is_published
def actions_column(self, entry):
return self.get_actions_column(entry)
actions_column.allow_tags = True
actions_column.short_description = _('Actions')
def make_published(self, request, queryset):
rows_updated = queryset.update(status=AbstractEntryBase.PUBLISHED)
if rows_updated == 1:
message = "1 entry was marked as published."
else:
message = "{0} entries were marked as published.".format(rows_updated)
self.message_user(request, message)
make_published.short_description = _("Mark selected entries as published")
class AbstractTranslatableEntryBaseAdmin(TranslatableAdmin, AbstractEntryBaseAdmin):
"""
The base functionality of the admin, which only uses the fields of the
:class:`~fluent_blogs.base_models.AbstractTranslatedEntryBase` model.
Everything else is branched off in the :class:`EntryAdmin` class.
"""
form = AbstractTranslatableEntryBaseAdminForm
list_display = ('title', 'language_column', 'status_column', 'modification_date', 'actions_column')
search_fields = ('translations__slug', 'translations__title')
prepopulated_fields = {} # Not supported by django-parler 0.9.2, using get_prepopulated_fields() as workaround.
def get_prepopulated_fields(self, request, obj=None):
# Still allow to override self.prepopulated_fields in other custom classes,
# but default to the settings which are compatible with django-parler.
return self.prepopulated_fields or {'slug': ('title',),}
def _reverse_blogpage_index(self, request, obj=None):
# Updated mixed_reverse() call, with language code included.
from fluent_pages.urlresolvers import mixed_reverse
language_code = self.get_form_language(request, obj)
return mixed_reverse('entry_archive_index', language_code=language_code)
def get_language_short_title(self, language_code):
"""
Turn the language code to uppercase.
"""
return language_code.upper()
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
extra_context['FLUENT_BLOGS_IS_TRANSLATABLE'] = True
return super(AbstractTranslatableEntryBaseAdmin, self).changelist_view(request, extra_context)
class SeoEntryAdminMixin(object):
"""
Mixin for the SEO fields.
"""
FIELDSET_SEO = (_('SEO settings'), {
'fields': ('meta_keywords', 'meta_description', 'meta_title',),
'classes': ('collapse',),
})
# AbstractEntryBaseAdmin allows to specify the widgets by field name,
# which formfield_overrides doesn't support by default.
formfield_overrides = {
'meta_keywords': {
'widget': AdminTextInputWidget(attrs={'class': 'vLargeTextField'})
},
'meta_description': {
'widget': AdminTextareaWidget(attrs={'rows': 3})
},
} | [
"[email protected]"
] | |
98a8aff95c45a2477fb0daa2102191e16e591101 | de4c5ecaf541d67e7cbf02837d93cf303d23b5da | /src/app/model/flash_msg.py | 11cca6b9e7356ac435f6b39c82155b57a7ea7b71 | [
"Apache-2.0"
] | permissive | shadowmint/py-test-watcher | d140064cafeb0b2efce8a403a3abd63322f812d0 | 36d33206b104c81e2d6acebdbed2dddee71fe2a7 | refs/heads/master | 2021-01-19T14:07:13.441335 | 2013-07-01T06:07:56 | 2013-07-01T06:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | # Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .base import Base
from nark import *
from base import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy import Sequence
from sqlalchemy import *
from sqlalchemy.orm import *
import datetime
import app
# Possible message types
FlashTypes = enum(NOTICE=1, SUCCESS=2, FAILURE=3)
class FlashMsg(Base):
__tablename__ = 'flash_msg'
id = Column(Integer, Sequence('flash_msg_id_seq'), primary_key=True)
created_on = Column(DateTime, nullable=False)
level = Column(Integer, nullable=False)
message = Column(String, nullable=False)
def __init__(self, level, message):
self.level = level
self.message = message
self.created_on = datetime.datetime.utcnow()
def __repr__(self):
return "<Flash('%s (%s, created on: %s)')>" % (self.message, self.level, self.created_on)
@resolve(app.scope)
class Flash(object):
""" Container for the prefs objects """
def __init__(self, db=IDb):
self.db = db
def session(self):
self.db.connect()
return self.db.session
def fail(self, message):
""" Post a new message to tell the user about """
session = self.session()
record = FlashMsg(FlashTypes.FAILURE, message)
session.add(record)
session.commit()
log.error("Flash! %s (FAILURE)" % message)
def success(self, message):
""" Post a new message to tell the user about """
session = self.session()
record = FlashMsg(FlashTypes.SUCCESS, message)
session.add(record)
session.commit()
log.info("Flash! %s (SUCCESS)" % message)
def notice(self, message):
""" Post a new message to tell the user about """
session = self.session()
record = FlashMsg(FlashTypes.NOTICE, message)
session.add(record)
session.commit()
log.info("Flash! %s (NOTICE)" % message)
def get(self):
""" Return the next pending flash message """
session = self.session()
if self.any():
rtn = session.query(FlashMsg).order_by(FlashMsg.created_on).first()
session.delete(rtn)
session.commit()
return rtn
return None
def any(self):
""" Return true if any pending messages """
session = self.session()
return session.query(FlashMsg).count() > 0
# Logging
log = Logging.get()
| [
"[email protected]"
] | |
d7d6b4cd87007c2c1eba4c550c67c0c1c1bddee4 | cb062c48280311134fe22573a41f9c4d6631b795 | /src/xm/core/TransactionInfo.py | 00f218c97fc571e7a710ca83d842ded158650ac2 | [
"MIT"
] | permissive | xm-blockchain/xm-core | da1e6bb4ceb8ab642e5d507796e2cc630ed23e0f | 2282b435a02f061424d656155756d8f50238bcfd | refs/heads/main | 2023-01-15T19:08:31.399219 | 2020-11-19T03:54:19 | 2020-11-19T03:54:19 | 314,127,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from xm.core import config
from xm.core.misc import ntp
from xm.core.txs.Transaction import Transaction
class TransactionInfo:
def __init__(self, tx: Transaction, block_number: int, timestamp: int=None):
self._transaction = tx
self._block_number = block_number
self._timestamp = timestamp
if not self._timestamp:
self._timestamp = ntp.getTime()
def __lt__(self, tx_info):
if self.transaction.fee < tx_info.transaction.fee:
return True
return False
@property
def transaction(self):
return self._transaction
@property
def block_number(self):
return self._block_number
@property
def timestamp(self):
return self._timestamp
def is_stale(self, current_block_number: int):
if current_block_number > self._block_number + config.user.stale_transaction_threshold:
return True
# If chain recovered from a fork where chain height is reduced
# then update block_number of the transactions in pool
if current_block_number < self._block_number:
self.update_block_number(current_block_number)
return False
def update_block_number(self, current_block_number: int):
self._block_number = current_block_number
def validate(self, new_state_container, update_state_container, block_number) -> bool:
addresses_set = set()
self.transaction.set_affected_address(addresses_set)
state_container = new_state_container(addresses_set,
block_number,
False,
None)
if not update_state_container(self.transaction, state_container):
return False
# Nonce should not be checked during transaction validation,
# as the appropriate nonce can be set by miner before placing
# the txn into block
if not self.transaction.validate_all(state_container, False):
return False
return True
| [
"[email protected]"
] | |
768aeb458d3c1a278bb6c13bfb68415378dea271 | 76e931912629c37beedf7c9b112b53e7de5babd7 | /2-mouth02/day09/insert_many.py | f985b8313dcbcd47dce05b6061d60655455b3c3e | [
"Apache-2.0"
] | permissive | gary-gggggg/gary | c59ac21d8e065f296ff986d11a0e4cbf186a1bc4 | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | refs/heads/main | 2023-02-23T06:54:34.500683 | 2021-02-01T10:17:02 | 2021-02-01T10:17:02 | 334,905,744 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import pymysql
db_dic = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "123456",
"database": "gary",
"charset": "utf8"
}
# 链接数据库
db = pymysql.connect(**db_dic) # 一个链接口
# 创建游标 游标对象:执行sql得到结果的对象
cur = db.cursor() # 打开完成
# 操作数据
list1 = [
("张三", 21, 'male', 65),
("李四", 18, 'female', 47),
("王五", 16, 'others', 94)
]
try:
sql = "insert into school (name,age,gender,grade) values (%s,%s,%s,%s);"
cur.executemany(sql, list1)
db.commit()
except Exception as e:
print(e)
db.rollback()
# 关闭数据库
cur.close()
db.close()
| [
"[email protected]"
] | |
b69d77cf2d656cfdd59cd2d926c4ff2a3d3b483e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_85/45.py | 6c238130b6c65c8e115cbd376dfa2b64f752cdeb | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | def dist_at(a,c,dist):
return dist[a%c]
def normal_time(n,c,dist):
time = 0
for star in xrange(0,n):
length = dist_at(star,c,dist)
time += length*2
return time
def solve():
test = int(raw_input())
for case in xrange(1,test+1):
dist = []
input = map(int,raw_input().split())
for i in xrange(0,len(input)):
if i == 0:
l = input[i]
elif i == 1:
t = input[i]
elif i == 2:
n = input[i]
elif i == 3:
c = input[i]
else:
dist.append(input[i])
current = 0
dist_from_current = 0.0
for star in xrange(0,n):
next = dist_at(star,c,dist)
time = next*2
if time < t:
t = t - time
current = star+1
elif time == t:
current = star+1
dist_from_current = 0.0
else:
current = star
dist_from_current = 0.5*t
break
# print current,dist_from_current
a = current
b = current + 1
dist_left_to_b = dist_at(a,c,dist) - dist_from_current
list = []
for star in xrange(b,n):
list.append(dist_at(star,c,dist))
list.append(dist_left_to_b)
count = 0
normal = normal_time(n,c,dist)
saved = 0
if l != 0:
for val in sorted(list,reverse=True):
saved += val
count += 1
if count == l:
break
if l == 0:
print "Case #" + str(case) + ": " + str(int(normal))
else:
print "Case #" + str(case) + ": " + str(int(normal-saved))
solve()
| [
"[email protected]"
] | |
cb6cde984e9367c6b77787362767d0684e97c498 | 11bb0cbe6de2a0a4e94fc0ba610f61894d5593a1 | /VBS_Zgamma/AQGC/combine/run/zz/test/th2_to_txt.py | 9f8455d301e3b0c8e56d9350a9029ec067096327 | [] | no_license | AnYpku/PKU-Cluster | 0dc4a88445aeb3ca239b2d7d7f796c6a67f3f69c | f9ffbcb7988053f4618fd015c1bb656d92ff51c6 | refs/heads/master | 2022-11-01T23:46:59.442037 | 2022-10-21T06:37:43 | 2022-10-21T06:37:43 | 188,202,345 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | #!/usr/bin/env python
from ROOT import gROOT, THStack, TH1D, TList, TFile, TH1F
print '-----begin to transfer TH2D to txt for Higgs-combine tool----- \n'
f1 = TFile.Open("chAll.root")
th1_ZA_sig = f1.Get("diboson")
th1_ZA = f1.Get("QCD_gg")
th1_non_prompt = f1.Get("QCD_qq")
print '>>>>begin to read bin content to the txt file>>>>'
for i in range(1,7):
f = open('./%s_bin_%d.txt'%('chAll', i),'w')
f.write('imax 1 number of channels\n')
f.write('jmax 2 number of processes-1\n')
f.write('kmax 1 number of nuisance parameters (sources of systematical uncertainties)\n')
f.write('------------\n')
f.write('# we have just one channel, in which we observe 0 events\n')
f.write('bin chAll%i\n'%(i))
bin_content = th1_non_prompt.GetBinContent(i)+th1_ZA.GetBinContent(i)+th1_ZA_sig.GetBinContent(i)
# bincontent of each precess
non_prompt_bincontent = th1_non_prompt.GetBinContent(i) if th1_non_prompt.GetBinContent(i)>0 else 0
ZA_bincontent = th1_ZA.GetBinContent(i) if th1_ZA.GetBinContent(i) else 0
ZA_sig_bincontent = th1_ZA_sig.GetBinContent(i) if th1_ZA_sig.GetBinContent(i)>0 else 0
# bin error
non_prompt_binerror = th1_non_prompt.GetBinError(i)/non_prompt_bincontent if non_prompt_bincontent>0 else 0
non_prompt_binerror = non_prompt_binerror if non_prompt_binerror<1 else 1
non_prompt_binerror =non_prompt_binerror+1
ZA_binerror = th1_ZA.GetBinError(i)/ZA_bincontent if ZA_bincontent>0 else 0
ZA_binerror = ZA_binerror if ZA_binerror<1 else 1
ZA_binerror = ZA_binerror+1
ZA_sig_binerror = th1_ZA_sig.GetBinError(i)/ZA_sig_bincontent if ZA_sig_bincontent>0 else 0
ZA_sig_binerror = ZA_sig_binerror if ZA_sig_binerror<1 else 1
ZA_sig_binerror = ZA_sig_binerror+1
f.write('observation %.2f\n'%bin_content)
f.write('------------\n')
f.write('# now we list the expected events for signal and all backgrounds in that bin\n')
f.write('# the second process line must have a positive number for backgrounds, and 0 for signal\n')
f.write('# then we list the independent sources of uncertainties, and give their effect (syst. error)\n')
f.write('# on each process and bin\n')
f.write('bin\tchAll%i\tchAll%i\tchAll%i\n'%(i,i,i))
f.write('process\tsig\tQCDgg\tQCDqq\n')
f.write('process\t0\t1\t2\n')
f.write('rate\t%0.2f\t%0.2f\t%0.2f\n'%(ZA_sig_bincontent,ZA_bincontent, non_prompt_bincontent, ))
f.write('------------\n')
f.write('lumi\tlnN\t1.06\t1.06\t-\t#lumi\n')
# f.write('VBS_stat_%s_%s_bin_%d\tlnN\t%0.2f\t-\t-\n'%('chAll','18',i,ZA_sig_binerror))
# f.write('QCDgg_stat_%s_%s_bin_%d\tlnN\t-\t%0.2f\t-\n'%('chAll','18',i,ZA_binerror))
# f.write('QCDqq_stat_%s_%s_bin_%d\tlnN\t-\t-\t%0.2f\n'%('chAll','18',i,non_prompt_binerror))
print 'bin ',i,' ',ZA_sig_binerror,' ',ZA_binerror,' ',non_prompt_binerror,' '
| [
"[email protected]"
] | |
c755e0f999df38cc3bc6191386c7b4da163fc42e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/YeOldeLancer/d___stra.py | f523443b1ff964dc7109509426fe56b1cce4791e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,098 | py | # Google Code Jam 2015, Qualification Round
# Problem C. Dijkstra
# Lance C. Simons
def dijkstra_eval(s):
if s.startswith("-"):
d = dijkstra_digit(s[:2])
else:
d = dijkstra_digit(s[0])
for c in s[1:]:
d = d * c
return d
class dijkstra_digit:
mtable = {"1":{"1": "1", "i": "i", "j": "j", "k": "k"},
"i":{"1": "i", "i":"-1", "j": "k", "k":"-j"},
"j":{"1": "j", "i":"-k", "j":"-1", "k": "i"},
"k":{"1": "k", "i": "j", "j":"-i", "k":"-1"} }
def __init__(self, *args):
self.positive = True
if type(args[0]) == type(""):
self.positive = len(args[0]) == 1
self.value = args[0][-1]
elif len(args) == 1:
self.value = args[0]
else:
self.positive, self.value = args
def __neg__(self):
return dijkstra_digit(not self.positive, self.value)
def __mul__(self, other):
if type(other) == type(""):
other = dijkstra_eval(other)
d = dijkstra_digit(self.mtable[self.value][other.value])
if self.positive != other.positive:
d = -d
return d
def __rmul__(self, other):
return dijkstra_eval(other) * self
def __pow__(self, exp):
exp = exp % 4
if self.value == "1":
if exp == 0:
return dijkstra_digit("1")
elif exp == 1:
return dijkstra_digit(self.positive, "1")
elif exp == 2:
return dijkstra_digit(True, "1")
else:
return dijkstra_digit(self.positive, "1")
else:
if exp == 0:
return dijkstra_digit("1")
elif exp == 1:
return dijkstra_digit(self.positive, self.value)
elif exp == 2:
return dijkstra_digit("-1")
else:
return dijkstra_digit(not self.positive, self.value)
def __eq__(self, other):
if type(other) == type(""):
other = dijkstra_eval(other)
return self.positive == other.positive and self.value == other.value
def __ne__(self, other):
return not self == other
def __str__(self):
return ("" if self.positive else "-") + self.value
def __repr__(self):
return str(self)
def correctable(count, substr, maxtest=6):
# Exit early if they are not equal
if (dijkstra_eval(substr) ** count) != "ijk":
return False
strlen = len(substr)
def at(i):
return substr[i % strlen]
def search_fwd(goal, *start):
if not all(start): return None
start = sum(start)
i = 0
fwd = dijkstra_digit(at(i+start))
while fwd != goal and i < strlen * maxtest:
i += 1
fwd = fwd * at(i+start)
if fwd != goal:
return None
return i+1
def search_rev(goal, *end):
if not all(end): return None
end = sum(end)
i = 0
rev = dijkstra_digit(at(end-1-i))
while rev != goal and i < strlen * maxtest:
i += 1
rev = at(end-1-i) * rev
if rev != goal:
return None
return i+1
def valid(*args):
return all(args)
def will_fit(*chars):
chars_used = sum(chars)
words_used = ((chars_used - 1) / strlen) + 1
return words_used <= count
# Forward search
i_used_fwd = search_fwd("i")
j_used_fwd = search_fwd("j", i_used_fwd)
k_used_fwd = search_fwd("k", i_used_fwd, j_used_fwd)
if valid(i_used_fwd, j_used_fwd, k_used_fwd):
return will_fit(i_used_fwd, j_used_fwd, k_used_fwd)
# Reverse search
k_used_rev = search_rev("k")
j_used_rev = search_rev("j", k_used_rev)
i_used_rev = search_rev("i", k_used_rev, j_used_rev)
if valid(i_used_rev, j_used_rev, k_used_rev):
return will_fit(i_used_rev, j_used_rev, k_used_rev)
if valid(i_used_fwd, j_used_fwd, k_used_rev):
return will_fit(i_used_fwd, j_used_fwd, k_used_rev)
if valid(i_used_fwd, j_used_rev, k_used_rev):
return will_fit(i_used_fwd, j_used_rev, k_used_rev)
return False
def go(infilename, outfilename):
inf = open(infilename, "r")
outf = open(outfilename, "w")
runs = int(inf.next().strip())
for i in range(runs):
L,X = map(int, inf.next().strip().split())
substr = inf.next().strip()
outf.write("Case #%d: %s\n" % (i+1, {True:"YES", False:"NO"}[correctable(X,substr)]))
if __name__ == "__main__":
import sys
go(sys.argv[1], sys.argv[1].replace(".in", ".out"))
| [
"[email protected]"
] | |
dffce93acf81be40e78272c45459c21d33a2780a | 3ff28c714fef7f568e8dfce0a2d4a16d6d10e8ef | /Using Python to Access Web Data - University of Michigan/get_contents_between_tags.py | 86f3d5bb546264bc8aadff7a06c6be873c6ea2f3 | [] | no_license | avishkakavindu/Fun-Times | a0325c045a3d9316fd00d1c9b025a994498762d5 | 6861558c668892ce2a0b1b37ecfac30883f0f3b5 | refs/heads/master | 2022-08-13T15:23:22.939576 | 2020-05-22T10:18:30 | 2020-05-22T10:18:30 | 264,172,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
# Retrieve all of the anchor tags
tags = soup('a')
for tag in tags:
# Look at the parts of a tag
print('TAG:', tag) #gets entire tag
print('URL:', tag.get('href', None)) #gets value belongs to attribute 'href'
print('Contents:', tag.contents[0]) #gets content between the tags
print('Attrs:', tag.attrs) #gets attributes of the tags and there values returns a dictionary
| [
"[email protected]"
] | |
c8e6c842ce09125be40f318e5d67694aa1bf17f4 | 91e18177b07a842b84863cee8cad118666107b4b | /schedule/migrations/0001_initial.py | 59d213cde1acd6355cf40fbd8e7f4b4590b2abd1 | [] | no_license | HyeonGyuChi/NewBeTon_2019 | 6c55797af34715a803cf4eee245b1c7b77584f2a | 1d93bdaec9dbf1eb82ea689eb01b106e835d373f | refs/heads/master | 2020-05-19T08:21:20.622579 | 2019-05-05T00:43:19 | 2019-05-05T00:43:19 | 184,919,650 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | # Generated by Django 2.2.1 on 2019-05-04 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='TimeTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subjuect_name', models.CharField(max_length=30)),
('day', models.CharField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')], max_length=1)),
('start_time', models.CharField(choices=[(9, '9시'), (10, '10시'), (11, '11시'), (12, '12시'), (13, '13시'), (14, '14시'), (15, '15시'), (16, '16시'), (17, '17시'), (18, '18시')], max_length=2)),
('end_time', models.CharField(choices=[(9, '9시'), (10, '10시'), (11, '11시'), (12, '12시'), (13, '13시'), (14, '14시'), (15, '15시'), (16, '16시'), (17, '17시'), (18, '18시')], max_length=2)),
('user_id', models.ForeignKey(on_delete='CASCADE', to='schedule.User')),
],
),
]
| [
"[email protected]"
] | |
ea87a335075397221ad96fc8a450587dcbe0c2c2 | 273eb20546083f0e23a8077a3f6d383ed37ffef6 | /Bricks/Qt4_MultipleMotorsBrick.py | dda64ad67cca45f17bcc363e8cba0386acbdb934 | [] | no_license | douglasbeniz/BlissFramework | fd886b161b9ba6f246424b1352a99820303d48aa | 11486d6c91fc0077e967cb2321743466a7c1aa8b | refs/heads/master | 2021-01-24T15:22:54.876055 | 2017-09-21T19:12:44 | 2017-09-21T19:12:44 | 55,637,790 | 0 | 0 | null | 2016-04-06T20:20:11 | 2016-04-06T20:20:11 | null | UTF-8 | Python | false | false | 4,972 | py | #
# Project: MXCuBE
# https://github.com/mxcube.
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui
from PyQt4 import QtCore
from Qt4_MotorSpinBoxBrick import Qt4_MotorSpinBoxBrick
from BlissFramework import Qt4_Icons
from BlissFramework.Qt4_BaseComponents import BlissWidget
from BlissFramework.Utils import Qt4_widget_colors
__category__ = 'Motor'
class Qt4_MultipleMotorsBrick(BlissWidget):
def __init__(self, *args):
BlissWidget.__init__(self, *args)
# Hardware objects ----------------------------------------------------
# Internal values -----------------------------------------------------
self.motor_hwobj_list = []
self.motor_widget_list = []
self.motor_widget_labels = []
self.predefined_positions_list = []
self.positions = None
# Properties ----------------------------------------------------------
self.addProperty('mnemonic', 'string', '')
self.addProperty('labels','string','')
self.addProperty('predefinedPositions', 'string', '')
# Signals -------------------------------------------------------------
# Slots ---------------------------------------------------------------
# Graphic elements ----------------------------------------------------
self.main_group_box = QtGui.QGroupBox(self)
# Layout --------------------------------------------------------------
self.main_groupbox_hlayout = QtGui.QHBoxLayout(self.main_group_box)
self.main_groupbox_hlayout.setSpacing(2)
self.main_groupbox_hlayout.setContentsMargins(0, 0, 0, 0)
self.main_hlayout = QtGui.QHBoxLayout(self)
self.main_hlayout.addWidget(self.main_group_box)
self.main_hlayout.setSpacing(2)
self.main_hlayout.setContentsMargins(2, 2, 2, 2)
# Size Policy ---------------------------------------------------------
# Qt signal/slot connections ------------------------------------------
# Other ---------------------------------------------------------------
def propertyChanged(self, property_name, old_value, new_value):
if property_name == 'mnemonic':
hwobj_names_list = new_value.split()
for hwobj_name in hwobj_names_list:
temp_motor_hwobj = self.getHardwareObject(hwobj_name)
temp_motor_widget = Qt4_MotorSpinBoxBrick(self)
temp_motor_widget.set_motor(temp_motor_hwobj, hwobj_name)
temp_motor_widget.move_left_button.hide()
temp_motor_widget.move_right_button.hide()
temp_motor_widget.step_button.hide()
temp_motor_widget.set_line_step(10.0)
temp_motor_widget.step_changed(None)
self.main_groupbox_hlayout.addWidget(temp_motor_widget)
self.motor_hwobj_list.append(temp_motor_hwobj)
self.motor_widget_list.append(temp_motor_widget)
if len(self.motor_widget_labels):
for index, label in enumerate(self.motor_widget_labels):
self.motor_widget_list[index].setLabel(label)
elif property_name == 'icons':
icons_list = new_value.split()
elif property_name == 'labels':
self.motor_widget_labels = new_value.split()
if len(self.motor_widget_list):
for index, label in enumerate(self.motor_widget_labels):
self.motor_widget_list[index].setLabel(label)
elif property_name == 'predefinedPositions':
self.predefined_positions_list = new_value.split()
for predefined_position in self.predefined_positions_list:
temp_position_button = QtGui.QPushButton(predefined_position, self.main_group_box)
self.main_groupbox_hlayout.addWidget(temp_position_button)
temp_position_button.clicked.connect(lambda: \
self.predefined_position_clicked(predefined_position))
else:
BlissWidget.propertyChanged(self,property_name, old_value, new_value)
def predefined_position_clicked(self, predefined_position):
for motor in self.motor_hwobj_list:
motor.move_to_predefined_position(predefined_position.lower())
| [
"[email protected]"
] | |
d54f685c7714a72608acb26e0491b4540d60859f | 220b79a0c02d43817a5fe4fb0d73e6061507f09d | /mlflow_tools/display/list_model_versions.py | d5bc037b0d944561b8595a72e0d5665b6990dffc | [] | no_license | amesar/mlflow-tools | 7ae5976297545417f5974f418028246e3d74da5f | 2e7282397e9d3a29b4c30aae8ee5d26511d9ab15 | refs/heads/master | 2023-07-25T12:54:01.087785 | 2023-07-25T01:44:54 | 2023-07-25T01:44:54 | 232,914,433 | 34 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | """
List all registered models versions.
"""
import click
from tabulate import tabulate
from mlflow_tools.common.click_options import opt_sort_attr, opt_sort_order, opt_columns, opt_output_csv_file
from mlflow_tools.api import api_factory
from mlflow_tools.display.display_utils import process_df
pandas_api = api_factory.get_pandas_api()
def to_pandas_dataframe(model_name=None, use_by_models=False):
filter = f"name = '{model_name}'" if model_name else None
if use_by_models:
df = pandas_api.search_model_versions_by_models(filter=filter)
else:
df = pandas_api.search_model_versions(filter=filter)
return df
def list(model_name, columns=None, csv_file=None, sort_attr="name", sort_order="asc", use_by_models=False):
df = to_pandas_dataframe(model_name, use_by_models)
df = process_df(df, columns, sort_attr, sort_order, csv_file)
print(tabulate(df, headers="keys", tablefmt="psql", showindex=False))
print(f"Versions: {df.shape[0]}")
@click.command()
@click.option("--model",
help="Registered model to filter by.",
type=str,
required=False,
show_default=True
)
@opt_sort_attr
@opt_sort_order
@click.option("--use-by-models",
help="Use 'by models' variant to search for versions.",
type=bool,
required=False
)
@opt_columns
@opt_output_csv_file
def main(model, sort_attr, sort_order, use_by_models, columns, csv_file):
print("Options:")
for k,v in locals().items():
print(f" {k}: {v}")
if columns:
columns = columns.split(",")
list(model,
sort_attr = sort_attr,
sort_order = sort_order,
use_by_models = use_by_models,
columns = columns,
csv_file = csv_file
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1976cf0f7d94870552d7607100e8d9e5a7980f1f | a1c166a1ac4782f1f0792e0fd21741360373b376 | /frontEnd/widgets/commodity_tree.py | f2d1ae1f2f4111d1292e37489fab3a0caa3674f6 | [] | no_license | xiaomapython/QlaskExplor | 3c7b75866b8276a5c2de3fbfddf779e1a66691d0 | c8b1757d08d06d350f7ca41897bbf4378fde3911 | refs/heads/master | 2020-06-23T08:08:45.169160 | 2019-05-15T02:05:17 | 2019-05-15T02:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | # _*_ coding:utf-8 _*_
# company: RuiDa Futures
# author: zizle
"""目录树组件"""
from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, pyqtSignal
from settings import MEDIA_PATH
class Tree(QTreeWidget):
variety_click_signal = pyqtSignal(str) # 品种被点击信号槽
def __init__(self):
super(Tree, self).__init__()
self.__init_style()
def __init_style(self):
"""
读取文件初始化风格
:return: None
"""
style_sheet = """
QTreeWidget {
outline: 0px;
min-width: 270px;
max-width: 270px;
color: black;
background: #F5F5F5;
font-size: 13px;
border: none;
}
QHeaderView {
font-size: 14px;
}
QTreeWidget::Item {
width: 100%;
height: 30px;
border-bottom: 1px solid #EEEEEE;
}
QTreeWidget::Item:selected {
background: powderblue;
}
QTreeWidget::Item:hover {
background: lightgrey;
}
"""
self.setHeaderLabels(["商品"]) # 目录树头标签(labels方便添加和去除)
self.setStyleSheet(style_sheet) # 根据文件设置风格
self.setHeaderHidden(True) # 隐藏目录树的表头
self.setContextMenuPolicy(Qt.CustomContextMenu) # 可设置右键自定义菜单,需绑定事件`customContextMenuRequested`并实现槽函数
class TreeItem(QTreeWidgetItem):
def __init__(self, collected=False):
super(TreeItem, self).__init__()
self.is_collected = collected
self.__init_style()
def __init_style(self):
"""
初始化为不收藏
:return:
"""
self.no_collection_icon = QIcon(MEDIA_PATH + "no_collection.png")
self.collected_icon = QIcon(MEDIA_PATH + "collected.png")
if self.is_collected:
self.collected()
def no_collection(self):
self.setIcon(0, QIcon())
def collected(self):
self.setIcon(0, self.collected_icon)
| [
"[email protected]"
] | |
ca195756563c4334cb337249ba85c5e2e578d954 | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /1heptane/pdep/network3564_1.py | ec5a036766e00639f0b96adc90a739908204914a | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 115,411 | py | species(
label = '[CH2]C(C[CH]C=C)CCC(18964)',
structure = SMILES('[CH2]C=CCC([CH2])CCC'),
E0 = (203.219,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.00456,0.0969264,-5.70261e-05,1.14194e-08,1.13568e-12,24632.7,40.6139], Tmin=(100,'K'), Tmax=(1133.17,'K')), NASAPolynomial(coeffs=[17.3478,0.0503351,-1.94316e-05,3.46811e-09,-2.35473e-13,19305.6,-55.3731], Tmin=(1133.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(203.219,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Isobutyl)"""),
)
species(
label = 'C=CCCC(134)',
structure = SMILES('C=CCCC'),
E0 = (-40.302,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,434.463,434.474],'cm^-1')),
HinderedRotor(inertia=(0.0736945,'amu*angstrom^2'), symmetry=1, barrier=(9.87096,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.073698,'amu*angstrom^2'), symmetry=1, barrier=(9.87114,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0736934,'amu*angstrom^2'), symmetry=1, barrier=(9.87114,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (70.1329,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3052.11,'J/mol'), sigma=(5.53315,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=476.73 K, Pc=40.88 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.9015,0.0362536,1.29132e-05,-3.55767e-08,1.43068e-11,-4763.1,19.8563], Tmin=(100,'K'), Tmax=(1027.61,'K')), NASAPolynomial(coeffs=[9.28067,0.0304042,-1.19376e-05,2.20664e-09,-1.55067e-13,-7487.42,-21.8213], Tmin=(1027.61,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-40.302,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'butadiene13(2459)',
structure = SMILES('C=CC=C'),
E0 = (96.4553,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(1.30712,'amu*angstrom^2'), symmetry=1, barrier=(30.0532,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2968.28,'J/mol'), sigma=(5.18,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.80599,0.0102584,6.1726e-05,-9.01643e-08,3.59117e-11,11658.5,12.0621], Tmin=(100,'K'), Tmax=(946.047,'K')), NASAPolynomial(coeffs=[12.4694,0.0100554,-2.41207e-06,4.57077e-10,-3.93161e-14,8010.78,-43.6375], Tmin=(946.047,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(96.4553,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""butadiene13""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2][CH]C1CC(CCC)C1(19610)',
structure = SMILES('[CH2][CH]C1CC(CCC)C1'),
E0 = (276.677,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0621463,0.070232,1.70588e-05,-6.11226e-08,2.51145e-11,33439.3,38.0687], Tmin=(100,'K'), Tmax=(1038.8,'K')), NASAPolynomial(coeffs=[15.6132,0.0534377,-2.15976e-05,4.05713e-09,-2.87677e-13,27832,-49.4664], Tmin=(1038.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(276.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(586.17,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclobutane) + radical(Cs_S) + radical(RCCJ)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C=CCC(=C)CCC(19611)',
structure = SMILES('[CH2]C=CCC(=C)CCC'),
E0 = (118.743,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,350,440,435,1725,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (123.215,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.757338,0.0901503,-4.26196e-05,-3.14816e-09,6.04774e-12,14464.9,37.906], Tmin=(100,'K'), Tmax=(1097.31,'K')), NASAPolynomial(coeffs=[17.8177,0.0476783,-1.9063e-05,3.50159e-09,-2.42927e-13,8868.88,-60.3568], Tmin=(1097.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.743,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(557.07,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(CC=C=C)CCC(19612)',
structure = SMILES('[CH2]C(CC=C=C)CCC'),
E0 = (228.324,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([540,610,2055,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (123.215,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.851484,0.0977103,-6.97393e-05,2.65428e-08,-4.18308e-12,27642.9,38.8377], Tmin=(100,'K'), Tmax=(1468.28,'K')), NASAPolynomial(coeffs=[17.5855,0.0474826,-1.84263e-05,3.24422e-09,-2.16083e-13,22228.8,-57.1917], Tmin=(1468.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.324,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(557.07,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(Isobutyl)"""),
)
species(
label = 'npropyl(83)',
structure = SMILES('[CH2]CC'),
E0 = (87.0621,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000],'cm^-1')),
HinderedRotor(inertia=(0.0928812,'amu*angstrom^2'), symmetry=1, barrier=(2.13552,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.092914,'amu*angstrom^2'), symmetry=1, barrier=(2.13628,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (43.0877,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2218.31,'J/mol'), sigma=(4.982,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.02815,0.0147023,2.4051e-05,-3.66738e-08,1.38611e-11,10512.1,12.4699], Tmin=(100,'K'), Tmax=(984.464,'K')), NASAPolynomial(coeffs=[6.16543,0.0184495,-6.79029e-06,1.23049e-09,-8.63866e-14,9095.06,-6.67607], Tmin=(984.464,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(87.0621,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), label="""npropyl""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C=CCC=C(18982)',
structure = SMILES('[CH2]C=CCC=C'),
E0 = (204.223,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,3000,3100,440,815,1455,1000,2950,3100,1380,975,1025,1650,180,1108.32],'cm^-1')),
HinderedRotor(inertia=(0.776098,'amu*angstrom^2'), symmetry=1, barrier=(17.844,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.776657,'amu*angstrom^2'), symmetry=1, barrier=(17.8569,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0809512,'amu*angstrom^2'), symmetry=1, barrier=(17.875,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (81.1357,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.56198,0.0414532,1.08835e-05,-4.06133e-08,1.75684e-11,24660.9,23.6619], Tmin=(100,'K'), Tmax=(1001.1,'K')), NASAPolynomial(coeffs=[12.2189,0.0272833,-1.04544e-05,1.94479e-09,-1.38781e-13,21103.5,-34.8737], Tmin=(1001.1,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(204.223,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH2][CH]C=C(2458)',
structure = SMILES('[CH2]C=C[CH2]'),
E0 = (274.714,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100],'cm^-1')),
HinderedRotor(inertia=(0.210311,'amu*angstrom^2'), symmetry=1, barrier=(25.2351,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.779031,'amu*angstrom^2'), symmetry=1, barrier=(93.4717,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.56318,0.0223429,1.87067e-05,-3.93099e-08,1.63982e-11,33100.5,13.4097], Tmin=(100,'K'), Tmax=(974.264,'K')), NASAPolynomial(coeffs=[9.82995,0.0151966,-5.22272e-06,9.67656e-10,-7.0786e-14,30607.7,-26.985], Tmin=(974.264,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(274.714,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C=CC[C](C)CCC(19613)',
structure = SMILES('[CH2]C=CC[C](C)CCC'),
E0 = (183.559,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.081043,0.0929016,-5.8415e-05,1.91666e-08,-2.70622e-12,22220.2,36.6268], Tmin=(100,'K'), Tmax=(1515.48,'K')), NASAPolynomial(coeffs=[12.5011,0.0596923,-2.55454e-05,4.70722e-09,-3.2096e-13,18406.6,-29.3055], Tmin=(1515.48,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(183.559,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Tertalkyl)"""),
)
species(
label = '[CH2]C(CC=[C]C)CCC(19614)',
structure = SMILES('[CH2]C(CC=[C]C)CCC'),
E0 = (289.561,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.479983,0.0976225,-6.86974e-05,2.6764e-08,-4.48823e-12,34987.8,38.9123], Tmin=(100,'K'), Tmax=(1339.72,'K')), NASAPolynomial(coeffs=[13.0443,0.057243,-2.34867e-05,4.26633e-09,-2.89996e-13,31364.1,-30.2895], Tmin=(1339.72,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.561,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C=CCC(C)[CH]CC(19615)',
structure = SMILES('[CH2]C=CCC(C)[CH]CC'),
E0 = (192.679,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.05352,0.0959756,-5.98636e-05,1.84955e-08,-2.30252e-12,23368.7,41.611], Tmin=(100,'K'), Tmax=(1846.87,'K')), NASAPolynomial(coeffs=[23.5888,0.0426037,-1.6515e-05,2.84759e-09,-1.84323e-13,14266.7,-92.3911], Tmin=(1846.87,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(192.679,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cs_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C=C[CH]C(C)CCC(19616)',
structure = SMILES('[CH2]C=C[CH]C(C)CCC'),
E0 = (139.249,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.758115,0.0892362,-3.28594e-05,-1.30305e-08,9.16431e-12,16932.1,36.8413], Tmin=(100,'K'), Tmax=(1089.67,'K')), NASAPolynomial(coeffs=[17.3085,0.0516986,-2.08071e-05,3.8361e-09,-2.66775e-13,11286,-59.7119], Tmin=(1089.67,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(139.249,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(C[C]=CC)CCC(19617)',
structure = SMILES('[CH2]C(C[C]=CC)CCC'),
E0 = (289.561,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.479983,0.0976225,-6.86974e-05,2.6764e-08,-4.48823e-12,34987.8,38.9123], Tmin=(100,'K'), Tmax=(1339.72,'K')), NASAPolynomial(coeffs=[13.0443,0.057243,-2.34867e-05,4.26633e-09,-2.89996e-13,31364.1,-30.2895], Tmin=(1339.72,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.561,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C=CCC(C)C[CH]C(19618)',
structure = SMILES('[CH2]C=CCC(C)C[CH]C'),
E0 = (192.583,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.73241,0.0941234,-5.81361e-05,1.82037e-08,-2.33988e-12,23340.6,40.6067], Tmin=(100,'K'), Tmax=(1754.69,'K')), NASAPolynomial(coeffs=[19.4012,0.0482265,-1.89007e-05,3.29675e-09,-2.1599e-13,16275,-67.8469], Tmin=(1754.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(192.583,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(RCCJC) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C=[C]CC(C)CCC(19619)',
structure = SMILES('[CH2]C=[C]CC(C)CCC'),
E0 = (235.978,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.02908,0.0993039,-6.63246e-05,2.26802e-08,-3.1749e-12,28571.8,39.4082], Tmin=(100,'K'), Tmax=(1639.68,'K')), NASAPolynomial(coeffs=[20.3467,0.0471576,-1.86203e-05,3.28432e-09,-2.17625e-13,21561.9,-74.2877], Tmin=(1639.68,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(235.978,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C([CH]C=CC)CCC(19620)',
structure = SMILES('[CH2]C(C=C[CH]C)CCC'),
E0 = (191.689,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.697891,0.0897289,-3.93976e-05,-4.90587e-09,6.41929e-12,23235.5,38.8261], Tmin=(100,'K'), Tmax=(1083.89,'K')), NASAPolynomial(coeffs=[16.0406,0.0518913,-2.01567e-05,3.63223e-09,-2.48964e-13,18201.1,-49.7612], Tmin=(1083.89,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(191.689,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Isobutyl) + radical(Allyl_S)"""),
)
species(
label = '[CH2]C=CCC(C)CC[CH2](19621)',
structure = SMILES('[CH2]C=CCC(C)CC[CH2]'),
E0 = (203.383,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.00588,0.0961003,-5.48565e-05,1.02065e-08,1.07238e-12,24653.1,40.6311], Tmin=(100,'K'), Tmax=(1193.41,'K')), NASAPolynomial(coeffs=[18.1918,0.0500872,-2.00645e-05,3.6427e-09,-2.49018e-13,18765.5,-60.8507], Tmin=(1193.41,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(203.383,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(RCCJ)"""),
)
species(
label = '[CH2][C]=CCC(C)CCC(19622)',
structure = SMILES('[CH2][C]=CCC(C)CCC'),
E0 = (235.978,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.02908,0.0993039,-6.63246e-05,2.26802e-08,-3.1749e-12,28571.8,39.4082], Tmin=(100,'K'), Tmax=(1639.68,'K')), NASAPolynomial(coeffs=[20.3467,0.0471576,-1.86203e-05,3.28432e-09,-2.17625e-13,21561.9,-74.2877], Tmin=(1639.68,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(235.978,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2][C](CC=CC)CCC(19623)',
structure = SMILES('[CH2][C](CC=CC)CCC'),
E0 = (237.142,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,360,370,350,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.149143,0.0975641,-7.9216e-05,4.29071e-08,-1.0986e-11,28665.6,38.4153], Tmin=(100,'K'), Tmax=(879.094,'K')), NASAPolynomial(coeffs=[6.17688,0.0687798,-3.0101e-05,5.66042e-09,-3.93666e-13,27553.4,8.71105], Tmin=(879.094,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(237.142,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Isobutyl) + radical(Tertalkyl)"""),
)
species(
label = '[CH2]C([CH]CC)CC=CC(19624)',
structure = SMILES('[CH2]C([CH]CC)CC=CC'),
E0 = (246.262,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1380,1390,370,380,2900,435,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.340847,0.0927875,-5.85173e-05,1.93309e-08,-2.70102e-12,29776.1,40.494], Tmin=(100,'K'), Tmax=(1575.47,'K')), NASAPolynomial(coeffs=[14.6465,0.0547355,-2.22881e-05,4.00036e-09,-2.68317e-13,25053.7,-38.624], Tmin=(1575.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(246.262,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cs_S) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C(C[CH]C)CC=CC(19625)',
structure = SMILES('[CH2]C(C[CH]C)CC=CC'),
E0 = (246.166,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.14324,0.0920477,-5.94491e-05,2.12823e-08,-3.34857e-12,29754.7,39.9613], Tmin=(100,'K'), Tmax=(1380.74,'K')), NASAPolynomial(coeffs=[10.9307,0.0599667,-2.45975e-05,4.45501e-09,-3.01803e-13,26696.6,-17.037], Tmin=(1380.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(246.166,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Isobutyl) + radical(RCCJC)"""),
)
species(
label = '[CH2]CCC([CH2])CC=CC(19626)',
structure = SMILES('[CH2]CCC([CH2])CC=CC'),
E0 = (256.966,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.713364,0.0972175,-6.5884e-05,2.38488e-08,-3.62063e-12,31080.4,41.0697], Tmin=(100,'K'), Tmax=(1495.27,'K')), NASAPolynomial(coeffs=[16.165,0.0520659,-2.05893e-05,3.65407e-09,-2.44184e-13,26032.9,-47.149], Tmin=(1495.27,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(256.966,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Isobutyl) + radical(RCCJ)"""),
)
species(
label = '[CH2][CH]CCC(137)',
structure = SMILES('[CH2][CH]CCC'),
E0 = (231.608,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,2520.07,2520.09],'cm^-1')),
HinderedRotor(inertia=(0.115877,'amu*angstrom^2'), symmetry=1, barrier=(4.7946,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.115947,'amu*angstrom^2'), symmetry=1, barrier=(4.79475,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.32425,'amu*angstrom^2'), symmetry=1, barrier=(54.7442,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00289323,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (70.1329,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.95927,0.043164,-1.89531e-05,3.2896e-09,-1.25701e-13,27931,23.7177], Tmin=(100,'K'), Tmax=(1936.29,'K')), NASAPolynomial(coeffs=[12.6506,0.0264639,-1.01885e-05,1.70855e-09,-1.07054e-13,22781,-37.5335], Tmin=(1936.29,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(231.608,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(RCCJC)"""),
)
species(
label = '[CH2]C([CH2])CCC(453)',
structure = SMILES('[CH2]C([CH2])CCC'),
E0 = (212.606,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,282.908,282.921],'cm^-1')),
HinderedRotor(inertia=(1.50122,'amu*angstrom^2'), symmetry=1, barrier=(85.2679,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00828994,'amu*angstrom^2'), symmetry=1, barrier=(85.267,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.000839525,'amu*angstrom^2'), symmetry=1, barrier=(8.63533,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00210608,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.50198,'amu*angstrom^2'), symmetry=1, barrier=(85.2677,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (84.1595,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.946288,0.0603019,-2.78545e-05,-1.90127e-09,4.43179e-12,25686.5,27.643], Tmin=(100,'K'), Tmax=(992.809,'K')), NASAPolynomial(coeffs=[10.2432,0.036692,-1.31042e-05,2.24241e-09,-1.49184e-13,23158.1,-20.5791], Tmin=(992.809,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(212.606,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Isobutyl) + radical(Isobutyl)"""),
)
species(
label = '[CH]=C[CH2](2461)',
structure = SMILES('[CH]C=C'),
E0 = (376.654,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,229.711,230.18,230.787],'cm^-1')),
HinderedRotor(inertia=(1.33306,'amu*angstrom^2'), symmetry=1, barrier=(50.5153,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (40.0639,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.31912,0.00817959,3.34736e-05,-4.36194e-08,1.58213e-11,45331.5,10.6389], Tmin=(100,'K'), Tmax=(983.754,'K')), NASAPolynomial(coeffs=[5.36755,0.0170743,-6.35108e-06,1.1662e-09,-8.2762e-14,44095,-3.44606], Tmin=(983.754,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(376.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2]C(CCC)CC1[CH]C1(19627)',
structure = SMILES('[CH2]C(CCC)CC1[CH]C1'),
E0 = (318.998,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.424805,0.0823929,-1.96339e-05,-2.47502e-08,1.32711e-11,38538.7,39.8334], Tmin=(100,'K'), Tmax=(1043.92,'K')), NASAPolynomial(coeffs=[15.4396,0.0521879,-2.01765e-05,3.65951e-09,-2.53132e-13,33560,-45.3669], Tmin=(1043.92,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(318.998,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(582.013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclopropane) + radical(Isobutyl) + radical(cyclopropane)"""),
)
species(
label = '[CH2]C1[CH]CC(CCC)C1(19628)',
structure = SMILES('[CH2]C1[CH]CC(CCC)C1'),
E0 = (194.747,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.304986,0.0595826,4.84881e-05,-9.66403e-08,3.90636e-11,23574.9,36.9338], Tmin=(100,'K'), Tmax=(982.575,'K')), NASAPolynomial(coeffs=[15.2943,0.0511038,-1.87783e-05,3.42077e-09,-2.41972e-13,18093,-48.0238], Tmin=(982.575,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(194.747,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(590.328,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(Cyclopentane) + radical(Cs_S) + radical(Isobutyl)"""),
)
species(
label = 'C=C(CC=CC)CCC(19629)',
structure = SMILES('C=C(CC=CC)CCC'),
E0 = (-32.7566,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.890114,0.0939586,-5.45773e-05,1.29399e-08,-4.14692e-13,-3752.5,38.119], Tmin=(100,'K'), Tmax=(1297.3,'K')), NASAPolynomial(coeffs=[18.0797,0.0497138,-1.98907e-05,3.5792e-09,-2.41968e-13,-9873.14,-62.957], Tmin=(1297.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-32.7566,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(582.013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=C=CCC(C)CCC(19630)',
structure = SMILES('C=C=CCC(C)CCC'),
E0 = (23.2416,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.12876,0.0986752,-6.47458e-05,2.15334e-08,-2.90927e-12,2991.66,38.08], Tmin=(100,'K'), Tmax=(1707.34,'K')), NASAPolynomial(coeffs=[21.9134,0.0446915,-1.7318e-05,3.01424e-09,-1.97577e-13,-4876.52,-85.4114], Tmin=(1707.34,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(23.2416,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(582.013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(CC)C[CH]C=C(18957)',
structure = SMILES('[CH2]C=CCC([CH2])CC'),
E0 = (226.999,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (110.197,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3599.97,'J/mol'), sigma=(6.5858,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=562.31 K, Pc=28.6 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.195739,0.0802968,-3.70645e-05,-4.04231e-09,6.18488e-12,27463,35.4614], Tmin=(100,'K'), Tmax=(1053.63,'K')), NASAPolynomial(coeffs=[15.1902,0.0439632,-1.6769e-05,3.00361e-09,-2.0579e-13,22995.3,-45.3861], Tmin=(1053.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(226.999,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(507.183,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Isobutyl) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C=CC[CH]CCCC(19631)',
structure = SMILES('[CH2]C=CC[CH]CCCC'),
E0 = (198.289,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.16206,0.0911617,-5.44123e-05,1.6354e-08,-2.05532e-12,23997.5,38.6317], Tmin=(100,'K'), Tmax=(1718.16,'K')), NASAPolynomial(coeffs=[15.344,0.0550625,-2.28968e-05,4.12563e-09,-2.76027e-13,18669.1,-44.5688], Tmin=(1718.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(198.289,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(RCCJCC)"""),
)
species(
label = 'C=C[CH]CC[CH]CCC(18962)',
structure = SMILES('[CH2]C=CCC[CH]CCC'),
E0 = (198.289,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.16206,0.0911617,-5.44123e-05,1.6354e-08,-2.05532e-12,23997.5,38.6317], Tmin=(100,'K'), Tmax=(1718.16,'K')), NASAPolynomial(coeffs=[15.344,0.0550625,-2.28968e-05,4.12563e-09,-2.76027e-13,18669.1,-44.5688], Tmin=(1718.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(198.289,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(RCCJCC) + radical(Allyl_P)"""),
)
species(
label = 'CCCC1CC=CCC1(18966)',
structure = SMILES('CCCC1CC=CCC1'),
E0 = (-101.651,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.24244,0.0580774,5.94795e-05,-1.09147e-07,4.30232e-11,-12068.8,30.4585], Tmin=(100,'K'), Tmax=(997.718,'K')), NASAPolynomial(coeffs=[16.8275,0.0512052,-1.98226e-05,3.73437e-09,-2.69184e-13,-18345.7,-64.3877], Tmin=(997.718,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-101.651,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(594.485,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(Cyclohexene)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C=CC[CH]CCC(19211)',
structure = SMILES('[CH2]C=CC[CH]CCC'),
E0 = (222.069,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (110.197,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.357918,0.0776937,-4.44559e-05,1.2525e-08,-1.44906e-12,26840.8,34.533], Tmin=(100,'K'), Tmax=(1881.13,'K')), NASAPolynomial(coeffs=[15.8616,0.0447269,-1.81682e-05,3.20862e-09,-2.10916e-13,21008,-50.0593], Tmin=(1881.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(222.069,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(507.183,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(RCCJCC) + radical(Allyl_P)"""),
)
species(
label = '[CH]=CCC([CH2])CCC(3395)',
structure = SMILES('[CH]=CCC([CH2])CCC'),
E0 = (334.841,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (110.197,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.44624,0.0874639,-6.16365e-05,2.31592e-08,-3.58704e-12,40440.6,36.9315], Tmin=(100,'K'), Tmax=(1500.8,'K')), NASAPolynomial(coeffs=[16.8558,0.0413502,-1.55478e-05,2.68644e-09,-1.76756e-13,35247.2,-53.5655], Tmin=(1500.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.841,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(507.183,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C(C=CC=C)CCC(19632)',
structure = SMILES('[CH2]C(C=CC=C)CCC'),
E0 = (161.02,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (123.215,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.799564,0.0909258,-3.98921e-05,-1.20381e-08,1.11709e-11,19551.9,38.2099], Tmin=(100,'K'), Tmax=(1002.13,'K')), NASAPolynomial(coeffs=[18.5766,0.0443638,-1.62663e-05,2.89202e-09,-1.99271e-13,14122.9,-63.0208], Tmin=(1002.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(161.02,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(557.07,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C([CH]CC=C)CCC(19298)',
structure = SMILES('[CH2]C([CH]CC=C)CCC'),
E0 = (261.854,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.589856,0.0948129,-6.1189e-05,2.06344e-08,-2.90842e-12,31663.5,41.8359], Tmin=(100,'K'), Tmax=(1591.62,'K')), NASAPolynomial(coeffs=[16.5009,0.051861,-2.07096e-05,3.6792e-09,-2.45223e-13,26223.1,-48.5603], Tmin=(1591.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(261.854,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Cs_S)"""),
)
species(
label = '[CH2]C(CCC)CC[C]=C(19633)',
structure = SMILES('[CH2]C(CCC)CC[C]=C'),
E0 = (305.154,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,3000,3100,440,815,1455,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.692429,0.0993083,-7.05201e-05,2.73126e-08,-4.47774e-12,36873.3,40.1156], Tmin=(100,'K'), Tmax=(1388.1,'K')), NASAPolynomial(coeffs=[14.9492,0.0542346,-2.18127e-05,3.91964e-09,-2.64607e-13,32530.9,-40.4758], Tmin=(1388.1,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.154,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S) + radical(Isobutyl)"""),
)
species(
label = '[CH2][C](CCC)CCC=C(19634)',
structure = SMILES('[CH2][C](CCC)CCC=C'),
E0 = (252.735,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,360,370,350,2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,3000,3100,440,815,1455,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.151313,0.0969292,-7.36301e-05,3.47244e-08,-7.56758e-12,30541.7,38.8535], Tmin=(100,'K'), Tmax=(1019.68,'K')), NASAPolynomial(coeffs=[7.62718,0.0664166,-2.87456e-05,5.37964e-09,-3.73157e-13,28955.4,1.175], Tmin=(1019.68,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(252.735,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Tertalkyl)"""),
)
species(
label = '[CH]=CCCC([CH2])CCC(19635)',
structure = SMILES('[CH]=CCCC([CH2])CCC'),
E0 = (314.408,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.00105,0.101265,-7.24517e-05,2.77636e-08,-4.41594e-12,38001.6,41.161], Tmin=(100,'K'), Tmax=(1453.42,'K')), NASAPolynomial(coeffs=[17.6888,0.0498285,-1.93668e-05,3.41428e-09,-2.27679e-13,32568.7,-55.9952], Tmin=(1453.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(314.408,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Cds_P)"""),
)
species(
label = '[CH2]C([CH]CC)CCC=C(19636)',
structure = SMILES('[CH2]C([CH]CC)CCC=C'),
E0 = (261.854,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.589856,0.0948129,-6.1189e-05,2.06344e-08,-2.90842e-12,31663.5,41.8359], Tmin=(100,'K'), Tmax=(1591.62,'K')), NASAPolynomial(coeffs=[16.5009,0.051861,-2.07096e-05,3.6792e-09,-2.45223e-13,26223.1,-48.5603], Tmin=(1591.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(261.854,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Cs_S)"""),
)
species(
label = '[CH2]C(C[CH]C)CCC=C(19637)',
structure = SMILES('[CH2]C(C[CH]C)CCC=C'),
E0 = (261.758,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,3000,3100,440,815,1455,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.359603,0.0937754,-6.13967e-05,2.1962e-08,-3.38185e-12,31640.4,41.1789], Tmin=(100,'K'), Tmax=(1437.94,'K')), NASAPolynomial(coeffs=[12.9224,0.0568283,-2.28553e-05,4.09334e-09,-2.75238e-13,27820.6,-27.7236], Tmin=(1437.94,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(261.758,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(RCCJC)"""),
)
species(
label = '[CH2]CCC([CH2])CCC=C(19638)',
structure = SMILES('[CH2]CCC([CH2])CCC=C'),
E0 = (272.559,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.955481,0.0991807,-6.84065e-05,2.5026e-08,-3.79352e-12,32967.5,42.3853], Tmin=(100,'K'), Tmax=(1518.25,'K')), NASAPolynomial(coeffs=[18.0298,0.0491624,-1.89899e-05,3.32732e-09,-2.20588e-13,27202.5,-57.1354], Tmin=(1518.25,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(272.559,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(RCCJ)"""),
)
species(
label = '[CH]=C[CH]CC(C)CCC(19639)',
structure = SMILES('[CH]C=CCC(C)CCC'),
E0 = (217.322,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2995,3025,975,1000,1300,1375,400,500,1630,1680,1380,1390,370,380,2900,435,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,200,800,960,1120,1280,1440,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.11692,0.0984705,-5.41017e-05,1.15166e-08,-7.6438e-14,26333.5,40.084], Tmin=(100,'K'), Tmax=(1318.71,'K')), NASAPolynomial(coeffs=[17.8798,0.055693,-2.23287e-05,3.99025e-09,-2.67927e-13,20032.5,-61.7137], Tmin=(1318.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(217.322,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""),
)
species(
label = 'CCCC1C[CH][CH]CC1(19640)',
structure = SMILES('CCCC1C[CH][CH]CC1'),
E0 = (141.981,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.547348,0.0635773,1.5915e-05,-4.14907e-08,1.38131e-11,17210.5,32.9342], Tmin=(100,'K'), Tmax=(1219.42,'K')), NASAPolynomial(coeffs=[9.42627,0.0674798,-2.95122e-05,5.55548e-09,-3.85567e-13,12589.4,-21.7315], Tmin=(1219.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(141.981,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(594.485,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + ring(Cyclohexane) + radical(cyclohexane) + radical(cyclohexane)"""),
)
species(
label = 'C=CCCC(=C)CCC(19641)',
structure = SMILES('C=CCCC(=C)CCC'),
E0 = (-21.7808,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.873259,0.0932511,-4.90714e-05,5.37815e-09,2.51161e-12,-2432.65,38.4594], Tmin=(100,'K'), Tmax=(1173.37,'K')), NASAPolynomial(coeffs=[17.5493,0.0506184,-2.03552e-05,3.71233e-09,-2.54769e-13,-8144.41,-59.2808], Tmin=(1173.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-21.7808,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(582.013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=CC=CC(C)CCC(19642)',
structure = SMILES('C=CC=CC(C)CCC'),
E0 = (-44.0625,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.83317,0.0893963,-2.77949e-05,-2.42224e-08,1.47409e-11,-5110.8,36.5506], Tmin=(100,'K'), Tmax=(1024.32,'K')), NASAPolynomial(coeffs=[19.0133,0.0473634,-1.81814e-05,3.32455e-09,-2.32624e-13,-11037.3,-68.7565], Tmin=(1024.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-44.0625,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(582.013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH)"""),
)
species(
label = '[CH2]C(C=C)C([CH2])CCC(18965)',
structure = SMILES('[CH2]C(C=C)C([CH2])CCC'),
E0 = (264.423,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3683.39,'J/mol'), sigma=(6.85685,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=575.34 K, Pc=25.92 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.949974,0.0973429,-5.97543e-05,1.31679e-08,1.19907e-12,31990.7,42.1061], Tmin=(100,'K'), Tmax=(1061.85,'K')), NASAPolynomial(coeffs=[16.3523,0.0501543,-1.85067e-05,3.22602e-09,-2.1662e-13,27302.1,-47.1812], Tmin=(1061.85,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(264.423,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(577.856,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Isobutyl)"""),
)
species(
label = 'C=CC1CC(CCC)C1(18967)',
structure = SMILES('C=CC1CC(CCC)C1'),
E0 = (5.74062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (124.223,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0443419,0.0623963,5.05502e-05,-1.03539e-07,4.20497e-11,854.666,33.8487], Tmin=(100,'K'), Tmax=(990.456,'K')), NASAPolynomial(coeffs=[18.2204,0.0484407,-1.8348e-05,3.43637e-09,-2.47879e-13,-5661.86,-68.3866], Tmin=(990.456,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(5.74062,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(590.328,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclobutane)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (203.219,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (276.677,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (330.535,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (451.915,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (318.649,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (285.457,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (305.898,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (410.95,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (344.638,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (322.463,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (451.482,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (289.828,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (280.287,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (621.619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (256.101,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (275.878,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (326.366,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (307.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (258.448,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (397.967,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (506.322,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (589.259,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (429.155,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (263.329,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (228.192,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (228.192,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (646.861,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (363.154,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (363.154,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (211.419,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (603.632,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS32',
E0 = (716.404,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS33',
E0 = (374.402,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS34',
E0 = (332.983,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS35',
E0 = (386.956,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS36',
E0 = (505.35,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS37',
E0 = (377.279,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS38',
E0 = (471.413,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS39',
E0 = (303.485,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS40',
E0 = (300.858,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS41',
E0 = (300.173,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS42',
E0 = (349.599,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS43',
E0 = (290.697,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS44',
E0 = (292.187,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS45',
E0 = (281.466,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS46',
E0 = (424.358,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS47',
E0 = (211.503,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=CCCC(134)', 'butadiene13(2459)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2][CH]C1CC(CCC)C1(19610)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(1.20551e+07,'s^-1'), n=1.225, Ea=(73.4584,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R5_SS_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 70.0 to 73.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', '[CH2]C=CCC(=C)CCC(19611)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.0051739,'m^3/(mol*s)'), n=2.82163, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 102 used for Cds-CsCs_Cds-HH;HJ
Exact match found for rate rule [Cds-CsCs_Cds-HH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -4.8 to 0 kJ/mol."""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)', '[CH2]C(CC=C=C)CCC(19612)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(4.42e+08,'cm^3/(mol*s)'), n=1.64, Ea=(11.7989,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2713 used for Ca_Cds-HH;HJ
Exact match found for rate rule [Ca_Cds-HH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['npropyl(83)', '[CH2]C=CCC=C(18982)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(1020,'cm^3/(mol*s)'), n=2.41, Ea=(27.3634,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 418 used for Cds-CsH_Cds-HH;CsJ-CsHH
Exact match found for rate rule [Cds-CsH_Cds-HH;CsJ-CsHH]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['C=CCCC(134)', '[CH2][CH]C=C(2458)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(137,'cm^3/(mol*s)','*|/',2), n=2.84, Ea=(51.0448,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 5 used for Cds-CsH_Cds-HH;CsJ-CdHH
Exact match found for rate rule [Cds-CsH_Cds-HH;CsJ-CdHH]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C=CC[C](C)CCC(19613)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(5.265e-07,'s^-1'), n=5.639, Ea=(102.68,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 38 used for R2H_S;C_rad_out_2H;Cs_H_out_Cs2
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_Cs2]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C(CC=[C]C)CCC(19614)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(1.63e+08,'s^-1'), n=1.73, Ea=(207.731,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 123 used for R2H_S;C_rad_out_2H;Cd_H_out_doubleC
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cd_H_out_doubleC]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C=CCC(C)[CH]CC(19615)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(1.064e+06,'s^-1'), n=1.93, Ea=(141.419,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 108 used for R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)
Exact match found for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C=C[CH]C(C)CCC(19616)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(25000,'s^-1'), n=2.28, Ea=(119.244,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 85 used for R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]C(C[C]=CC)CCC(19617)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C=CCC(C)C[CH]C(19618)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(6.44e+09,'s^-1'), n=0.13, Ea=(86.6088,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 131 used for R4H_SSS;C_rad_out_2H;Cs_H_out_H/NonDeC
Exact match found for rate rule [R4H_SSS;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2]C=[C]CC(C)CCC(19619)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSS;Cd_rad_out;Cs_H_out] for rate rule [R4H_SSS;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 2.44948974278
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C([CH]C=CC)CCC(19620)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(2e+10,'s^-1'), n=0, Ea=(418.4,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4H_SDS;C_rad_out_single;Cs_H_out_H/(NonDeC/Cs)] for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['[CH2]C=CCC(C)CC[CH2](19621)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(68850,'s^-1'), n=1.68, Ea=(52.7184,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 111 used for R5H_CCC;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_CCC;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH2][C]=CCC(C)CCC(19622)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(263079,'s^-1'), n=1.73643, Ea=(39.8993,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_RSSR;Y_rad_out;Cs_H_out_2H] for rate rule [R5H_DSSS;Cd_rad_out;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2][C](CC=CC)CCC(19623)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1.45388e+06,'s^-1'), n=1.705, Ea=(89.2238,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R5H_SSMS;Y_rad_out;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH2]C([CH]CC)CC=CC(19624)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(138.3,'s^-1'), n=3.21, Ea=(60.7935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6H;C_rad_out_H/NonDeC;Cs_H_out] for rate rule [R6H_RSSMS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C(C[CH]C)CC=CC(19625)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1062,'s^-1'), n=1.81, Ea=(55.2288,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 116 used for R7H;C_rad_out_2H;Cs_H_out_H/NonDeC
Exact match found for rate rule [R7H;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH2]CCC([CH2])CC=CC(19626)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(6.81e+06,'s^-1'), n=1.5, Ea=(141.001,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R8H;C_rad_out_2H;Cs_H_out] for rate rule [R8H;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction21',
reactants = ['[CH2][CH]CCC(137)', '[CH2][CH]C=C(2458)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(3.13324e+07,'m^3/(mol*s)'), n=0.074875, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [C_rad/H2/Cd;Y_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -0.3 to 0 kJ/mol."""),
)
reaction(
label = 'reaction22',
reactants = ['[CH2]C([CH2])CCC(453)', '[CH]=C[CH2](2461)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.67111e+08,'m^3/(mol*s)'), n=-0.424942, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Y_rad;C_rad/H2/Cs] + [Cd_rad;C_pri_rad] for rate rule [Cd_rad;C_rad/H2/Cs]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -0.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C(CCC)CC1[CH]C1(19627)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(1.05e+08,'s^-1'), n=1.192, Ea=(225.936,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1600,'K'), comment="""Estimated using template [R3_D;doublebond_intra_pri;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_pri_HNd_Cs;radadd_intra_cs2H]
Euclidian distance = 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction24',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C1[CH]CC(CCC)C1(19628)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1.89094e+07,'s^-1'), n=0.979167, Ea=(60.1101,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R5_CsCs_RH_D;doublebond_intra_pri;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction25',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=C(CC=CC)CCC(19629)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(2.1261e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction26',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=C=CCC(C)CCC(19630)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(2.1261e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction27',
reactants = ['CH2(S)(23)', '[CH2]C(CC)C[CH]C=C(18957)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.31021e+06,'m^3/(mol*s)'), n=0.189, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [carbene;C_pri] for rate rule [carbene;C_pri/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: 1,2_Insertion_carbene
Ea raised from -1.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction28',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH2]C=CC[CH]CCCC(19631)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction29',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=C[CH]CC[CH]CCC(18962)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction30',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['CCCC1CC=CCC1(18966)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(2.53377e+11,'s^-1'), n=0.0685, Ea=(8.20064,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R6;C_rad_out_2H;Cpri_rad_out_2H] + [R6_SSSDS;C_rad_out_single;Cpri_rad_out_2H] for rate rule [R6_SSSDS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 1.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction31',
reactants = ['CH2(19)', '[CH2]C=CC[CH]CCC(19211)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/NonDeC;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction32',
reactants = ['CH2(19)', '[CH]=CCC([CH2])CCC(3395)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS32',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_pri_rad;Birad]
Euclidian distance = 2.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction33',
reactants = ['H(3)', '[CH2]C(C=CC=C)CCC(19632)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS33',
kinetics = Arrhenius(A=(1.35e+08,'cm^3/(mol*s)'), n=1.64, Ea=(1.58992,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2557 used for Cds-CsH_Cds-CdH;HJ
Exact match found for rate rule [Cds-CsH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction34',
reactants = ['[CH2][CH]CCC(137)', 'butadiene13(2459)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS34',
kinetics = Arrhenius(A=(0.0534234,'m^3/(mol*s)'), n=2.459, Ea=(4.91982,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-HH_Cds-CdH;CJ]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction35',
reactants = ['[CH2]C([CH]CC=C)CCC(19298)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS35',
kinetics = Arrhenius(A=(1.682e+10,'s^-1'), n=0.35, Ea=(125.102,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 160 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction36',
reactants = ['[CH2]C(CCC)CC[C]=C(19633)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS36',
kinetics = Arrhenius(A=(1.9054e+11,'s^-1'), n=0.853, Ea=(200.196,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction37',
reactants = ['[CH2][C](CCC)CCC=C(19634)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS37',
kinetics = Arrhenius(A=(1.29711e+07,'s^-1'), n=1.52333, Ea=(124.544,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;Y_rad_out;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction38',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['[CH]=CCCC([CH2])CCC(19635)'],
transitionState = 'TS38',
kinetics = Arrhenius(A=(8.32e+10,'s^-1'), n=0.77, Ea=(268.194,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 195 used for R3H_SD;C_rad_out_H/NonDeC;Cd_H_out_singleH
Exact match found for rate rule [R3H_SD;C_rad_out_H/NonDeC;Cd_H_out_singleH]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction39',
reactants = ['[CH2]C([CH]CC)CCC=C(19636)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS39',
kinetics = Arrhenius(A=(0.502,'s^-1'), n=3.86, Ea=(41.6308,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 332 used for R4H_SSS;C_rad_out_H/NonDeC;Cs_H_out_H/Cd
Exact match found for rate rule [R4H_SSS;C_rad_out_H/NonDeC;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction40',
reactants = ['[CH2]C(C[CH]C)CCC=C(19637)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS40',
kinetics = Arrhenius(A=(34.9816,'s^-1'), n=2.57, Ea=(39.0995,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R5H_CCC;C_rad_out_single;Cs_H_out_H/Cd] + [R5H_CCC;C_rad_out_H/NonDeC;Cs_H_out_1H] for rate rule [R5H_CCC;C_rad_out_H/NonDeC;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction41',
reactants = ['[CH2]CCC([CH2])CCC=C(19638)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS41',
kinetics = Arrhenius(A=(1160,'s^-1'), n=1.94, Ea=(27.6144,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 88 used for R6H_SSSSS;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R6H_SSSSS;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction42',
reactants = ['[CH]=C[CH]CC(C)CCC(19639)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS42',
kinetics = Arrhenius(A=(22.7193,'s^-1'), n=3.21897, Ea=(132.277,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_singleH;Cs_H_out_2H] for rate rule [R6HJ_2;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction43',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['CCCC1C[CH][CH]CC1(19640)'],
transitionState = 'TS43',
kinetics = Arrhenius(A=(9.63396e+08,'s^-1'), n=0.483333, Ea=(87.4777,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6_linear;doublebond_intra_pri_2H;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction44',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=CCCC(=C)CCC(19641)'],
transitionState = 'TS44',
kinetics = Arrhenius(A=(2.6374e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction45',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=CC=CC(C)CCC(19642)'],
transitionState = 'TS45',
kinetics = Arrhenius(A=(4.00798e+09,'s^-1'), n=0.37, Ea=(78.2471,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad;XH_Rrad_De] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction32',
reactants = ['[CH2]C(C=C)C([CH2])CCC(18965)'],
products = ['[CH2]C(C[CH]C=C)CCC(18964)'],
transitionState = 'TS46',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction47',
reactants = ['[CH2]C(C[CH]C=C)CCC(18964)'],
products = ['C=CC1CC(CCC)C1(18967)'],
transitionState = 'TS47',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/OneDe]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
network(
label = '3564',
isomers = [
'[CH2]C(C[CH]C=C)CCC(18964)',
],
reactants = [
('C=CCCC(134)', 'butadiene13(2459)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '3564',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
] | |
6d0630b36cc1a1461e8d0c1184d82ac87708df69 | b98273a1a4cab384054c8ca7018c74a5b89dccf2 | /visu/generate_hidden_violation.py | 83b9ece19d1a8c4eac95593b51d6f54c5966efdb | [] | no_license | BeyondTheClouds/VMPlaceS | e860c01e2d134d23df9f5c920b176a9219ee8783 | f29817aac6de0d11885ff1bd9062c087e0e4ef37 | refs/heads/master | 2021-01-17T03:25:21.792907 | 2020-06-25T11:12:57 | 2020-06-25T11:12:57 | 14,586,877 | 11 | 6 | null | 2020-06-25T11:12:58 | 2013-11-21T12:26:19 | Java | UTF-8 | Python | false | false | 14,872 | py | #!/usr/bin/python
from __future__ import division
from pkg_resources import WorkingSet , DistributionNotFound
working_set = WorkingSet()
import itertools
# Printing all installed modules
#print tuple(working_set)
# Detecting if module is installed
dependency_found = True
try:
dep = working_set.require('Jinja2')
except DistributionNotFound:
dependency_found = False
pass
if not dependency_found:
try:
# Installing it (anyone knows a better way?)
from setuptools.command.easy_install import main as install
install(['Jinja2'])
print("run again as normal user to process results")
except DistributionNotFound:
print("run this script as sudo to install a missing template engine")
pass
sys.exit(0)
import csv
import subprocess
import time
import os
import json
import jinja2
import traceback
################################################################################
# Constant and parameters
################################################################################
duration = 3600
################################################################################
# Functions of the script
################################################################################
def execute_cmd(args):
print "%s" % args
# return "%s" % args
out, err = subprocess.Popen(args,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if not err == "":
print err
return out
def render_template(template_file_path, vars, output_file_path):
templateLoader = jinja2.FileSystemLoader( searchpath="." )
templateEnv = jinja2.Environment( loader=templateLoader )
TEMPLATE_FILE = template_file_path
template = templateEnv.get_template( TEMPLATE_FILE )
templateVars = vars
outputText = template.render( templateVars )
with open(output_file_path, "w") as text_file:
text_file.write(outputText)
################################################################################
# Clean data and scripts folders
################################################################################
execute_cmd(["rm", "-r", "clouds"])
execute_cmd(["mkdir", "clouds"])
################################################################################
# Detect algorithms used in experiments
################################################################################
algos = []
for dirname, dirnames, filenames in os.walk('./events'):
# print path to all subdirectories first.
for filename in filenames:
if filename.endswith(".json"):
with open("%s/%s" % (dirname, filename), 'r') as f:
header_line = f.readline()
header_data = json.loads(header_line)
data = header_data["data"]
algo = data["algorithm"]
if not algo in algos:
algos += [algo]
print algos
################################################################################
# Detect (server_count, vm_count) combination used in experiments
################################################################################
nodes_tuples = []
vms_tuples = []
nodes_vms_tuples = []
for dirname, dirnames, filenames in os.walk('./events'):
# print path to all subdirectories first.
for filename in filenames:
if filename.endswith(".json"):
with open("%s/%s" % (dirname, filename), 'r') as f:
header_line = f.readline()
header_data = json.loads(header_line)
data = header_data["data"]
compute_node_count = data["server_count"]
service_node_count = data["service_node_count"]
node_count = compute_node_count + service_node_count
if not compute_node_count in nodes_tuples:
nodes_tuples += [compute_node_count]
if not data["vm_count"] in vms_tuples:
vms_tuples += [data["vm_count"]]
# nodes_vms_tuple = "%s-%s" % (data["server_count"], data["vm_count"])
# if not nodes_vms_tuple in nodes_vms_tuples:
# nodes_vms_tuples += [nodes_vms_tuple]
# Order the tuples
nodes_tuples = sorted(nodes_tuples)
vms_tuples = sorted(vms_tuples)
nodes_vms_tuples = [str(tuple2[0])+"-"+str(tuple2[1]) for tuple2 in zip(nodes_tuples, vms_tuples)]
# nodes_vms_tuples = sorted(nodes_vms_tuples)
print nodes_tuples
print vms_tuples
print nodes_vms_tuples
################################################################################
# Fill data maps with computed metrics
################################################################################
def export_csv_data(algo, node_count, violations_smp_detected, violations_smp_hidden, violations_out_detected, violations_out_hidden):
folder_name = "clouds/data/%s-%d" % (algo, node_count)
execute_cmd(["mkdir", "-p", folder_name])
render_template("template/cloud_data.jinja2", {"algo": algo, "node_count": node_count, "violations": violations_smp_detected, "labels": ["smp_det_time", "smp_det_duration", "node", "type"]}, "%s/violations_smp_det.csv" % (folder_name))
render_template("template/cloud_data.jinja2", {"algo": algo, "node_count": node_count, "violations": violations_smp_hidden, "labels": ["smp_hid_time", "smp_hid_duration", "node", "type"]}, "%s/violations_smp_hid.csv" % (folder_name))
render_template("template/cloud_data.jinja2", {"algo": algo, "node_count": node_count, "violations": violations_out_detected, "labels": ["out_det_time", "out_det_duration", "node", "type"]}, "%s/violations_out_det.csv" % (folder_name))
render_template("template/cloud_data.jinja2", {"algo": algo, "node_count": node_count, "violations": violations_out_hidden, "labels": ["out_hid_time", "out_hid_duration", "node", "type"]}, "%s/violations_out_hid.csv" % (folder_name))
map_algos_size = {}
map_hidden_violation_count = {}
map_detected_violation_count = {}
map_detected_violation_ratio = {}
# variable that is used to detect "violation-out", "violation-normal" and "violation-sched":
# it will store the last line about "violations-out" or "violation-det", to detect if the next
# "violation" has been already processed!
last_line = None
for dirname, dirnames, filenames in os.walk('./events'):
# print path to all subdirectories first.
for filename in filenames:
if filename.endswith(".json"):
with open("%s/%s" % (dirname, filename), 'r') as f:
header_line = f.readline()
header_data = json.loads(header_line)
data = header_data["data"]
algo = data["algorithm"]
compute_node_count = data["server_count"]
service_node_count = data["service_node_count"]
node_count = compute_node_count + service_node_count
nodes_vms_tuple = "%s-%s" % (data["algorithm"], compute_node_count)
if not map_algos_size.has_key(compute_node_count):
map_algos_size[compute_node_count] = []
map_algos_size[compute_node_count] += [algo]
_violations_det_per_node = {}
_violations_out_per_node = {}
_violations_smp_per_node = {}
for line in f.readlines():
try:
data = json.loads(line)
if float(data["time"]) > duration:
continue
if data["event"] == "trace_event" and data["value"] == "violation-det":
current_violation_det = (float(data["time"]), float(data["duration"]), data["origin"], "det")
if not _violations_det_per_node.has_key(data["origin"]):
_violations_det_per_node[data["origin"]] = []
_violations_det_per_node[data["origin"]] += [current_violation_det]
if data["event"] == "trace_event" and data["value"] == "violation-out":
current_violation_out = (float(data["time"]), float(data["duration"]), data["origin"], "out")
if not _violations_out_per_node.has_key(data["origin"]):
_violations_out_per_node[data["origin"]] = []
_violations_out_per_node[data["origin"]] += [current_violation_out]
if data["event"] == "trace_event" and data["value"] == "violation":
current_violation_smp = (float(data["time"]), float(data["duration"]), data["origin"], "smp")
if not _violations_smp_per_node.has_key(data["origin"]):
_violations_smp_per_node[data["origin"]] = []
_violations_smp_per_node[data["origin"]] += [current_violation_smp]
except Exception as e:
# print traceback.format_exc()
pass
f.seek(0)
nodes = set(_violations_smp_per_node.keys() + _violations_out_per_node.keys())
violations_smp_detected = []
violations_smp_hidden = []
violations_out_detected = []
violations_out_hidden = []
for node in nodes:
try:
current_violation_det = _violations_det_per_node[node] if _violations_det_per_node.has_key(node) else []
current_violation_out = _violations_out_per_node[node] if _violations_out_per_node.has_key(node) else []
current_violation_smp = _violations_smp_per_node[node] if _violations_smp_per_node.has_key(node) else []
product = itertools.product(current_violation_smp, current_violation_det)
product_filtered = [element for element in product if abs(element[0][0] + element[0][1] - element[1][0] - element[1][1]) < 0.01]
violations_smp_per_node_detected = set([element[0] for element in product_filtered])
violations_smp_per_node_hidden = set([element for element in current_violation_smp if element not in violations_smp_per_node_detected])
if len(violations_smp_per_node_detected) + len(violations_smp_per_node_hidden) != len(current_violation_smp):
print("%s + %s = %s" % (violations_smp_per_node_detected, violations_smp_per_node_hidden, current_violation_smp))
product = itertools.product(current_violation_out, current_violation_det)
product_filtered = [element for element in product if abs(element[0][0] + element[0][1] - element[1][0] - element[1][1]) < 0.01]
violations_out_per_node_detected = set([element[0] for element in product_filtered])
violations_out_per_node_hidden = set([element for element in current_violation_out if element not in violations_out_per_node_detected])
if len(violations_out_per_node_detected) + len(violations_out_per_node_hidden) != len(current_violation_out):
print("%s + %s = %s" % (violations_out_per_node_detected, violations_out_per_node_hidden, current_violation_out))
violations_smp_detected += violations_smp_per_node_detected
violations_smp_hidden += violations_smp_per_node_hidden
violations_out_detected += violations_out_per_node_detected
violations_out_hidden += violations_out_per_node_hidden
except:
pass
hidden_violation_count = len(violations_smp_hidden) + len(violations_out_hidden)
detected_violation_count = len(violations_smp_detected) + len(violations_out_detected)
map_hidden_violation_count[nodes_vms_tuple] = hidden_violation_count
map_detected_violation_count[nodes_vms_tuple] = detected_violation_count
map_detected_violation_ratio[nodes_vms_tuple] = detected_violation_count / (detected_violation_count + hidden_violation_count)
# print("%s@%d => %d" % (algo, compute_node_count, violation_total_time))
# export_csv_data(algo, compute_node_count, violations_smp_detected, violations_smp_hidden, violations_out_detected, violations_out_hidden)
################################################################################
# Generate CSV files from data maps
################################################################################
print map_hidden_violation_count
print map_detected_violation_count
print map_detected_violation_ratio
render_template("template/matrix_data.jinja2", {"algos": algos, "server_counts": nodes_tuples, "data": map_hidden_violation_count }, "data/hidden_violation_count.csv")
render_template("template/matrix_data.jinja2", {"algos": algos, "server_counts": nodes_tuples, "data": map_detected_violation_count }, "data/detected_violation_count.csv")
render_template("template/matrix_data.jinja2", {"algos": algos, "server_counts": nodes_tuples, "data": map_detected_violation_ratio }, "data/detected_violation_ratio.csv")
group_by_nodes = ["distributed", "hierarchical"]
not_group_by_nodes = list(set(algos) - set(group_by_nodes))
print("group_by_nodes -> %s" %(group_by_nodes))
print("not_group_by_nodes -> %s" %(not_group_by_nodes))
render_template("template/matrix_script.jinja2", {"source": "data/hidden_violation_count.csv", "x_label": "Configuration", "y_label": "count", "algos": algos, "x_axis": zip(nodes_tuples, vms_tuples), "group_by_nodes": [], "not_group_by_nodes": [], "title": "hidden_violation_count"}, "scripts/hidden_violation_count.r")
render_template("template/matrix_script.jinja2", {"source": "data/detected_violation_count.csv", "x_label": "Configuration", "y_label": "count", "algos": algos, "x_axis": zip(nodes_tuples, vms_tuples), "group_by_nodes": [], "not_group_by_nodes": [], "title": "detected_violation_count"}, "scripts/detected_violation_count.r")
render_template("template/matrix_script.jinja2", {"source": "data/detected_violation_ratio.csv", "x_label": "Configuration", "y_label": "percentage", "algos": algos, "x_axis": zip(nodes_tuples, vms_tuples), "group_by_nodes": [], "not_group_by_nodes": [], "title": "detected_violation_ratio"}, "scripts/detected_violation_ratio.r")
| [
"[email protected]"
] | |
1428a143a04dea36c997dcea9eae210a8267879d | ddd18c78b27b9c85629feeab6914fc925aea9099 | /practice19c.py | 084a5e674ea8d90e9b0c68ad3070cbca3f20330d | [] | no_license | harmansehmbi/Project19 | 7e6331050599db2d33f1c20aef5ad13c5d7a17a8 | 6c000d05bbc397132e9fe13a48f89b57b48b70da | refs/heads/master | 2020-06-13T04:38:55.514033 | 2019-06-30T16:28:13 | 2019-06-30T16:28:13 | 194,537,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | import numpy as np
arr1 = np.array([(8, 9), (10, 12), (13, 14)])
print(arr1[0:2, 1])
arr2 = np.array([10, 20, 30])
print(arr2.min())
print(arr2.max())
print(arr2.sum())
arr3 = np.array([(1, 2, 3), (4, 5, 6)])
print(arr3.sum(axis=0))
arr4 = np.array([(4, 9, 16), (11, 13, 15)])
print(np.sqrt(arr4))
print(np.std(arr4))
arr5 = np.array([(1, 2, 3), (4, 5, 6)])
arr6 = np.array([(1, 2, 3), (4, 5, 6)])
print(arr5 + arr6)
print(arr5 - arr6)
print(arr5 * arr6)
print(arr5 / arr6)
print(arr5 // arr6)
print("===============================")
X = np.array([(1, 2, 3), (4, 5, 6)])
Y = np.array([(1, 2, 3), (4, 5, 6)])
print(np.vstack((X,Y)))
print(np.hstack((X,Y)))
Z = np.array((7, 21, 3))
print(np.sin(Z))
print(np.log10(Z)) | [
"[email protected]"
] | |
83a7906165e04bf8bb596c05be03851eaf23994e | 8f205d31e8e5555d69e0a7db086a3c93de6d2806 | /kube/task_generation/merge_ccs.py | 0668da5478328e17e92f72b24c014457c97dc7cc | [
"MIT"
] | permissive | torms3/Synaptor | 94e0f04478118399db91d79a8a8b478858fd4138 | 5de74aa61b3d04e88e6bc4c336d543f89d64b9a4 | refs/heads/master | 2021-05-21T19:08:43.625841 | 2020-06-19T23:10:47 | 2020-06-19T23:10:47 | 252,764,824 | 0 | 0 | NOASSERTION | 2020-04-03T15:03:17 | 2020-04-03T15:03:16 | null | UTF-8 | Python | false | false | 600 | py | import argparse
from taskqueue import TaskQueue
import synaptor.cloud.kube.parser as parser
import synaptor.cloud.kube.task_creation as tc
def main(configfilename):
config = parser.parse(configfilename)
task = tc.create_merge_ccs_task(
config["storagestrs"][0], config["szthresh"],
config["maxfaceshape"])
tq = TaskQueue(config["queueurl"])
tq.insert_all([task])
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("configfilename")
args = argparser.parse_args()
main(args.configfilename)
| [
"[email protected]"
] | |
1d6a28ceea23dbf9a4e2bb8dce039f191a0c9674 | 9a3c74efd991dd6923614f69b9a71e6309b54a01 | /eight_mile/pytorch/serialize.py | 6a3103bf7eeaa4953fb2a2d9b7a580a1d638f408 | [
"Apache-2.0"
] | permissive | wenshuoliu/mead-layers | 45e3b8bdd992971118f07199d2558b3433ad39bf | 1aecce4f7c90987f5fdb8c9112b07b7e846749d8 | refs/heads/master | 2020-12-22T14:02:59.059548 | 2020-01-28T15:26:48 | 2020-01-28T15:26:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,890 | py | import torch
import torch.nn as nn
import numpy as np
from typing import Dict
from eight_mile.pytorch.layers import Dense, TransformerEncoderStack, TransformerEncoder, MultiHeadedAttention
def to_weight_array(pytorch_layer: nn.Module, name: str) -> Dict:
"""Convert a {`LayerNorm`, `Linear`, `layers.Dense`} to `weights` and `bias` arrays
:param pytorch_layer: A layer to get weights for
:param name: The name of this layer to serialize
:return: A Dictionary containing `weights` and `bias` keys
"""
if isinstance(pytorch_layer, Dense):
pytorch_layer = pytorch_layer.layer
weights = pytorch_layer.weight.cpu().detach().numpy()
bias = pytorch_layer.bias.cpu().detach().numpy()
return {f"{name}/weights": weights, f"{name}/bias": bias}
def from_weight_array(pytorch_layer: nn.Module, d: Dict, name: str):
"""Read in {`LayerNorm`, `Linear`, `layers.Dense`} from `weights` and `bias` fields
:param pytorch_layer: A layer to get weights for
:param d: A Dict containing the arrays by key
:param name: The name of this layer
:return: None
"""
if isinstance(pytorch_layer, Dense):
pytorch_layer = pytorch_layer.layer
pytorch_layer.weight = nn.Parameter(torch.from_numpy(d[f"{name}/weights"]), requires_grad=True)
pytorch_layer.bias = nn.Parameter(torch.from_numpy(d[f"{name}/bias"]), requires_grad=True)
def to_ffn_array(pytorch_ffn: nn.Sequential, name: str) -> Dict:
"""Convert a `FFN` layer to a set of arrays
:param pytorch_ffn: An `FFN` layer for Transformers
:param name: The name of the layer
:return: A Dict containing the arrays by key
"""
d = {}
d.update(to_weight_array(pytorch_ffn[0], f"{name}/expansion"))
d.update(to_weight_array(pytorch_ffn[2], f"{name}/squeeze"))
return d
def from_ffn_array(pytorch_ffn: nn.Sequential, d: Dict, name: str):
"""Restore an `FFN` layer's weights from a set of arrays
:param pytorch_ffn: An `FFN` layer for Transformers
:param d: A Dict containing the arrays by key
:param name: The name of the layer
:return: None
"""
from_weight_array(pytorch_ffn[0], d, f"{name}/expansion")
from_weight_array(pytorch_ffn[2], d, f"{name}/squeeze")
def to_mha_array(pytorch_mha: MultiHeadedAttention, name: str) -> Dict:
"""Convert a `MultiHeadedAttention` module to a set of arrays
:param pytorch_mha: A `MultiHeadedAttention` module for Transformers
:param name: The name of the layer
:return: A Dict containing the arrays by key
"""
d = {}
d.update(to_weight_array(pytorch_mha.w_Q, f"{name}/w_Q"))
d.update(to_weight_array(pytorch_mha.w_K, f"{name}/w_K"))
d.update(to_weight_array(pytorch_mha.w_V, f"{name}/w_V"))
d.update(to_weight_array(pytorch_mha.w_O, f"{name}/w_O"))
return d
def from_mha_array(pytorch_mha: MultiHeadedAttention, d: Dict, name: str):
"""Restore a `MultiHeadedAttention` module from a set of keys
:param pytorch_mha: A `MultiHeadedAttention` module for Transformers
:param d: A Dict of arrays by key
:param name: The name of the layer
"""
from_weight_array(pytorch_mha.w_Q, d, f"{name}/w_Q")
from_weight_array(pytorch_mha.w_K, d, f"{name}/w_K")
from_weight_array(pytorch_mha.w_V, d, f"{name}/w_V")
from_weight_array(pytorch_mha.w_O, d, f"{name}/w_O")
def to_encoder_array(pytorch_encoder: TransformerEncoder, name: str) -> Dict:
"""Convert a `TransformerEncoder` layer to an set of numpy arrays
:param pytorch_encoder: A `TransformerEncoder` layer
:param name: The layer name
:return: A Dict of arrays by key
"""
d = {}
d.update(to_weight_array(pytorch_encoder.ln1, f"{name}/ln1"))
d.update(to_weight_array(pytorch_encoder.ln2, f"{name}/ln2"))
d.update(to_mha_array(pytorch_encoder.self_attn, f"{name}/mha"))
d.update(to_ffn_array(pytorch_encoder.ffn, f"{name}/ffn"))
return d
def from_encoder_array(pytorch_encoder: TransformerEncoder, d: Dict, name: str):
"""Restore a `TransformerEncoder` layer from a set of numpy arrays
:param pytorch_encoder: A `TransformerEncoder` layer
:param d: A Dict of arrays by key
:param name: The layer name
:return: None
"""
from_weight_array(pytorch_encoder.ln1, d, f"{name}/ln1")
from_weight_array(pytorch_encoder.ln2, d, f"{name}/ln2")
from_mha_array(pytorch_encoder.self_attn, d, f"{name}/mha")
from_ffn_array(pytorch_encoder.ffn, d, f"{name}/ffn")
def to_embed_array(pytorch_embed: nn.Module, name: str) -> Dict:
"""Convert a simple lookup table embedding to a `weights` array
:param pytorch_embed: An embedding module
:param name: A layer name
:return: A Dict containing the embedding `weights`
"""
weights = pytorch_embed.weight.cpu().detach().numpy()
return {f"{name}/weights": weights}
def from_embed_array(pytorch_embed: nn.Module, d: Dict, name: str):
"""Restore a simple lookup table embedding from a `weights` array
:param pytorch_embed: An embedding module
:param d: A Dict containing a `weights` array to restore
:param name: name of the layer
:return: None
"""
pytorch_embed.weight = torch.nn.Parameter(torch.from_numpy(d[f"{name}/weights"]), requires_grad=True)
def to_tlm_array(pytorch_tlm: nn.Module, embeddings_key: str = 'x', name: str = "TLM") -> Dict:
"""Convert a Transformer LM-type module to a set of weights in a Dict
:param pytorch_tlm: A Transformer LM-type module
:param embeddings_key: A key to get the embeddings from (defaults to `x`)
:param name: A name for this TLM
:return: A Dict containing all the keys to restore from Embeddings and the TransformerEncoderStack
"""
d = {}
d.update(to_encoder_stack_array(pytorch_tlm.transformer, name=f"{name}/TransformerEncoderStack"))
d.update(to_embed_array(pytorch_tlm.embeddings[embeddings_key].embeddings, name=f"{name}/SinusoidalPositionalEmbeddings"))
return d
def save_tlm_npz(pytorch_tlm: nn.Module, npz: str, embeddings_key: str = 'x', name: str = "TLM"):
"""Save a TLM to an NPZ file
:param pytorch_tlm: A Transformer LM-type module
:param npz: A file to save
:param embeddings_key: A key to get embeddings from (defaults to `x`)
:param name: A name for this TLM
:return: None
"""
d = to_tlm_array(pytorch_tlm, embeddings_key, name)
print(d.keys())
np.savez(npz, **d)
def to_encoder_stack_array(pytorch_encoder_stack: TransformerEncoderStack, name: str = "TransformerEncoderStack") -> Dict:
"""Convert a `TransformerEncoderStack` to a set of weigths
:param pytorch_encoder_stack: A transformer encoder stack
:param name: A name
:return: A Dict containing a set of weights
"""
d = {}
d.update(to_weight_array(pytorch_encoder_stack.ln, f"{name}/ln"))
for i, enc_pyt in enumerate(pytorch_encoder_stack.encoders):
d.update(to_encoder_array(enc_pyt, f"{name}/{i}"))
return d
def from_encoder_stack_array(pytorch_encoder_stack: TransformerEncoderStack, d: Dict, name: str = "TransformerEncoderStack"):
"""Restore weights from a `TransformerEncoderStack`
:param pytorch_encoder_stack: A transformer encoder stack
:param d: A Dict containing sets of arrays
:param name: A name for this primitive
:return: None
"""
from_weight_array(pytorch_encoder_stack.ln, d, f"{name}/ln")
for i, enc_pyt in enumerate(pytorch_encoder_stack.encoders):
from_encoder_array(enc_pyt, d, f"{name}/{i}")
def from_tlm_array(pytorch_tlm: nn.Module, d: Dict, embeddings_key: str = 'x', name: str = "TLM"):
"""Restore a TLM-like model (possibly a `nn.Module` for fine-tuning)
We just populate the `TransformerEncoderStack` and the embeddings from weights, all other values remain
uninitialized
:param pytorch_tlm: A TLM-like model
:param d: A Dict of weights to restore for each layer
:param embeddings_key: The name of the embeddings to restore, defaults to `x`
:param name: A name for this primitive
:return:
"""
from_encoder_stack_array(pytorch_tlm.transformer, d, name=f"{name}/TransformerEncoderStack")
from_embed_array(pytorch_tlm.embeddings[embeddings_key].embeddings, d, f"{name}/SinusoidalPositionalEmbeddings")
def load_tlm_npz(pytorch_tlm: nn.Module, npz: str, embeddings_key: str = 'x', name: str = "TLM"):
"""Restore a TLM-like model (possibly a `nn.Module` for fine-tuning
We just populate the `TransformerEncoderStack` and the embeddings from weights, all other values remain
uninitialized
:param pytorch_tlm: A TLM-like model
:param npz: A file to restore the weights from
:param embeddings_key: The name of the embeddings to restore, defaults to `x`
:param name: A name for this primitive
:return:
"""
d = np.load(npz)
from_tlm_array(pytorch_tlm, d, embeddings_key, name)
| [
"[email protected]"
] | |
1d2c136da5504548f741805d0f6272c10e4c9556 | 1707d1199f8110d7cd579f4ee323d8f21f9f682e | /learner/wekaMethods/featuresMethods/analyzeCommsMethods.py | 23cac16c1f8184a91702414d0a47c0010e5033a4 | [] | no_license | BGU-AiDnD/Debugger | 132a10490571c5605ea5ac0d756e40b6bb346684 | e08f84f1b28cc6f864fdfd83a37f3e9ecfe90d9d | refs/heads/master | 2023-02-25T10:33:38.262522 | 2020-09-09T02:59:27 | 2020-09-09T02:59:27 | 37,018,600 | 5 | 1 | null | 2021-02-03T19:27:38 | 2015-06-07T14:01:29 | Java | UTF-8 | Python | false | false | 57,510 | py | __author__ = 'amir'
from wekaMethods.articles import *
import wekaMethods.articles
import featureExtractorBase
best_features=[1,2,4,7,8,10,11,14,15,16,18,19,21,22,24,25,28,42,43,44,46,49,56,60,65,76,79,80,81,83,84,86,87,88,90,91,93,94,95,96,97,98,100,101,102,104,105,107,108,109,111,112,114,115,116,118,119,121,122,123,125,126,128,129,130,132,133,145,146,148,150,151,152,154,155,158,159,160,161,162,163,164,165,166,168,169,171,172,173,175,176,186,187,188,190,191,192,193,194,196,197,199,200,201,202,203,204,206,208,209,217,218,220,223,224,226,230,231,232,234,237,238,240,244,245,247,259,265,266,268,272,273,275,289,290,292,295,296,298,299,302,303,304,306,307,309,310,312,313,316,317,320,330,331,334,337,344,348,353,362,367,368,370,371,374,375,376,378,379,381,382,384,385,388,389,390,391,392,402,403,404,406,407,409,410,412,413,416,417,418,419,420,436,439,440,443,446,447,448,450,451,453,454,456,457,460,461,462,464,474,475,476,478,479,481,482,485,488,489,490,492,506,511,512,514,518,519,520,522,523,525,526,528,532,533,535,546,547,550,553,554,556,560,561,563]
class analyzeCommsMethods(featureExtractorBase.FeatureExtractorBase):
def get_attributes(self):
all= [ ("NCSS_Analyze_sum", "NUMERIC"),("FileLen_Analyze_sum", "NUMERIC"),("sum_fors_Analyze_sum", "NUMERIC"),("sum_ifs_Analyze_sum", "NUMERIC"),
("sum_tries_Analyze_sum", "NUMERIC"),("len_mccab_Analyze_sum", "NUMERIC"),("sum_mccab_Analyze_sum", "NUMERIC"),("mean_mccab_Analyze_sum", "NUMERIC"),
("median_mccab_Analyze_sum", "NUMERIC"),("var_mccab_Analyze_sum", "NUMERIC"),("max_mccab_Analyze_sum", "NUMERIC"),("min_mccab_Analyze_sum", "NUMERIC"),
(" len_fanOut_Analyze_sum", "NUMERIC"),("sum_fanOut_Analyze_sum", "NUMERIC"),("mean_fanOut_Analyze_sum", "NUMERIC"),("median_fanOut_Analyze_sum", "NUMERIC"),
("var_fanOut_Analyze_sum", "NUMERIC"),("max_fanOut_Analyze_sum", "NUMERIC"),("min_fanOut_Analyze_sum", "NUMERIC"),(" len_NPath_Analyze_sum", "NUMERIC"),
("sum_NPath_Analyze_sum", "NUMERIC"),("mean_NPath_Analyze_sum", "NUMERIC"),("median_NPath_Analyze_sum", "NUMERIC"),("var_NPath_Analyze_sum", "NUMERIC"),
("max_NPath_Analyze_sum", "NUMERIC"),("min_NPath_Analyze_sum", "NUMERIC"),(" len_JavaNCSSmet_Analyze_sum", "NUMERIC"),("sum_JavaNCSSmet_Analyze_sum", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_sum", "NUMERIC"),("median_JavaNCSSmet_Analyze_sum", "NUMERIC"),("var_JavaNCSSmet_Analyze_sum", "NUMERIC"),("max_JavaNCSSmet_Analyze_sum", "NUMERIC"),
("min_JavaNCSSmet_Analyze_sum", "NUMERIC"),(" len_thorwsSTM_Analyze_sum", "NUMERIC"),("sum_thorwsSTM_Analyze_sum", "NUMERIC"),("mean_thorwsSTM_Analyze_sum", "NUMERIC"),
("median_thorwsSTM_Analyze_sum", "NUMERIC"),("var_thorwsSTM_Analyze_sum", "NUMERIC"),("max_thorwsSTM_Analyze_sum", "NUMERIC"),("min_thorwsSTM_Analyze_sum", "NUMERIC"),
(" len_coupl_Analyze_sum", "NUMERIC"),("sum_coupl_Analyze_sum", "NUMERIC"),("mean_coupl_Analyze_sum", "NUMERIC"),("median_coupl_Analyze_sum", "NUMERIC"),
("var_coupl_Analyze_sum", "NUMERIC"),("max_coupl_Analyze_sum", "NUMERIC"),("min_coupl_Analyze_sum", "NUMERIC"),(" len_executables_Analyze_sum", "NUMERIC"),
("sum_executables_Analyze_sum", "NUMERIC"),("mean_executables_Analyze_sum", "NUMERIC"),("median_executables_Analyze_sum", "NUMERIC"),("var_executables_Analyze_sum", "NUMERIC"),
("max_executables_Analyze_sum", "NUMERIC"),("min_executables_Analyze_sum", "NUMERIC"),(" len_lens_Analyze_sum", "NUMERIC"),
("sum_lens_Analyze_sum", "NUMERIC"),("mean_lens_Analyze_sum", "NUMERIC"),("median_lens_Analyze_sum", "NUMERIC"),("var_lens_Analyze_sum", "NUMERIC"),
("max_lens_Analyze_sum", "NUMERIC"),("min_lens_Analyze_sum", "NUMERIC"),(" publics_Analyze_sum", "NUMERIC"),("protecteds_Analyze_sum", "NUMERIC"),
("privates_Analyze_sum", "NUMERIC"),("totals _Analyze_sum", "NUMERIC"),("len_params_Analyze_sum", "NUMERIC"),("sum_params_Analyze_sum", "NUMERIC"),
("mean_params_Analyze_sum", "NUMERIC"),("median_params_Analyze_sum", "NUMERIC"),("var_params_Analyze_sum", "NUMERIC"),("max_params_Analyze_sum", "NUMERIC"),
("min_params_Analyze_sum", "NUMERIC"),("NCSS_Analyze_avg", "NUMERIC"),("FileLen_Analyze_avg", "NUMERIC"),("sum_fors_Analyze_avg", "NUMERIC"),("sum_ifs_Analyze_avg", "NUMERIC"),
("sum_tries_Analyze_avg", "NUMERIC"),("len_mccab_Analyze_avg", "NUMERIC"),("sum_mccab_Analyze_avg", "NUMERIC"),("mean_mccab_Analyze_avg", "NUMERIC"),
("median_mccab_Analyze_avg", "NUMERIC"),("var_mccab_Analyze_avg", "NUMERIC"),("max_mccab_Analyze_avg", "NUMERIC"),("min_mccab_Analyze_avg", "NUMERIC"),
(" len_fanOut_Analyze_avg", "NUMERIC"),("sum_fanOut_Analyze_avg", "NUMERIC"),("mean_fanOut_Analyze_avg", "NUMERIC"),("median_fanOut_Analyze_avg", "NUMERIC"),
("var_fanOut_Analyze_avg", "NUMERIC"),("max_fanOut_Analyze_avg", "NUMERIC"),("min_fanOut_Analyze_avg", "NUMERIC"),(" len_NPath_Analyze_avg", "NUMERIC"),
("sum_NPath_Analyze_avg", "NUMERIC"),("mean_NPath_Analyze_avg", "NUMERIC"),("median_NPath_Analyze_avg", "NUMERIC"),("var_NPath_Analyze_avg", "NUMERIC"),
("max_NPath_Analyze_avg", "NUMERIC"),("min_NPath_Analyze_avg", "NUMERIC"),(" len_JavaNCSSmet_Analyze_avg", "NUMERIC"),("sum_JavaNCSSmet_Analyze_avg", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_avg", "NUMERIC"),("median_JavaNCSSmet_Analyze_avg", "NUMERIC"),("var_JavaNCSSmet_Analyze_avg", "NUMERIC"),("max_JavaNCSSmet_Analyze_avg", "NUMERIC"),
("min_JavaNCSSmet_Analyze_avg", "NUMERIC"),(" len_thorwsSTM_Analyze_avg", "NUMERIC"),("sum_thorwsSTM_Analyze_avg", "NUMERIC"),("mean_thorwsSTM_Analyze_avg", "NUMERIC"),
("median_thorwsSTM_Analyze_avg", "NUMERIC"),("var_thorwsSTM_Analyze_avg", "NUMERIC"),("max_thorwsSTM_Analyze_avg", "NUMERIC"),("min_thorwsSTM_Analyze_avg", "NUMERIC"),
(" len_coupl_Analyze_avg", "NUMERIC"),("sum_coupl_Analyze_avg", "NUMERIC"),("mean_coupl_Analyze_avg", "NUMERIC"),("median_coupl_Analyze_avg", "NUMERIC"),
("var_coupl_Analyze_avg", "NUMERIC"),("max_coupl_Analyze_avg", "NUMERIC"),("min_coupl_Analyze_avg", "NUMERIC"),(" len_executables_Analyze_avg", "NUMERIC"),
("sum_executables_Analyze_avg", "NUMERIC"),("mean_executables_Analyze_avg", "NUMERIC"),("median_executables_Analyze_avg", "NUMERIC"),("var_executables_Analyze_avg", "NUMERIC"),
("max_executables_Analyze_avg", "NUMERIC"),("min_executables_Analyze_avg", "NUMERIC"),(" len_lens_Analyze_avg", "NUMERIC"),
("sum_lens_Analyze_avg", "NUMERIC"),("mean_lens_Analyze_avg", "NUMERIC"),("median_lens_Analyze_avg", "NUMERIC"),("var_lens_Analyze_avg", "NUMERIC"),
("max_lens_Analyze_avg", "NUMERIC"),("min_lens_Analyze_avg", "NUMERIC"),(" publics_Analyze_avg", "NUMERIC"),("protecteds_Analyze_avg", "NUMERIC"),
("privates_Analyze_avg", "NUMERIC"),("totals _Analyze_avg", "NUMERIC"),("len_params_Analyze_avg", "NUMERIC"),("sum_params_Analyze_avg", "NUMERIC"),
("mean_params_Analyze_avg", "NUMERIC"),("median_params_Analyze_avg", "NUMERIC"),("var_params_Analyze_avg", "NUMERIC"),("max_params_Analyze_avg", "NUMERIC"),
("min_params_Analyze_avg", "NUMERIC"),
("NCSS_Analyze_countPos", "NUMERIC"),("FileLen_Analyze_countPos", "NUMERIC"),("sum_fors_Analyze_countPos", "NUMERIC"),("sum_ifs_Analyze_countPos", "NUMERIC"),
("sum_tries_Analyze_countPos", "NUMERIC"),("len_mccab_Analyze_countPos", "NUMERIC"),("sum_mccab_Analyze_countPos", "NUMERIC"),("mean_mccab_Analyze_countPos", "NUMERIC"),
("median_mccab_Analyze_countPos", "NUMERIC"),("var_mccab_Analyze_countPos", "NUMERIC"),("max_mccab_Analyze_countPos", "NUMERIC"),("min_mccab_Analyze_countPos", "NUMERIC"),
(" len_fanOut_Analyze_countPos", "NUMERIC"),("sum_fanOut_Analyze_countPos", "NUMERIC"),("mean_fanOut_Analyze_countPos", "NUMERIC"),("median_fanOut_Analyze_countPos", "NUMERIC"),
("var_fanOut_Analyze_countPos", "NUMERIC"),("max_fanOut_Analyze_countPos", "NUMERIC"),("min_fanOut_Analyze_countPos", "NUMERIC"),(" len_NPath_Analyze_countPos", "NUMERIC"),
("sum_NPath_Analyze_countPos", "NUMERIC"),("mean_NPath_Analyze_countPos", "NUMERIC"),("median_NPath_Analyze_countPos", "NUMERIC"),("var_NPath_Analyze_countPos", "NUMERIC"),
("max_NPath_Analyze_countPos", "NUMERIC"),("min_NPath_Analyze_countPos", "NUMERIC"),(" len_JavaNCSSmet_Analyze_countPos", "NUMERIC"),("sum_JavaNCSSmet_Analyze_countPos", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_countPos", "NUMERIC"),("median_JavaNCSSmet_Analyze_countPos", "NUMERIC"),("var_JavaNCSSmet_Analyze_countPos", "NUMERIC"),("max_JavaNCSSmet_Analyze_countPos", "NUMERIC"),
("min_JavaNCSSmet_Analyze_countPos", "NUMERIC"),(" len_thorwsSTM_Analyze_countPos", "NUMERIC"),("sum_thorwsSTM_Analyze_countPos", "NUMERIC"),("mean_thorwsSTM_Analyze_countPos", "NUMERIC"),
("median_thorwsSTM_Analyze_countPos", "NUMERIC"),("var_thorwsSTM_Analyze_countPos", "NUMERIC"),("max_thorwsSTM_Analyze_countPos", "NUMERIC"),("min_thorwsSTM_Analyze_countPos", "NUMERIC"),
(" len_coupl_Analyze_countPos", "NUMERIC"),("sum_coupl_Analyze_countPos", "NUMERIC"),("mean_coupl_Analyze_countPos", "NUMERIC"),("median_coupl_Analyze_countPos", "NUMERIC"),
("var_coupl_Analyze_countPos", "NUMERIC"),("max_coupl_Analyze_countPos", "NUMERIC"),("min_coupl_Analyze_countPos", "NUMERIC"),(" len_executables_Analyze_countPos", "NUMERIC"),
("sum_executables_Analyze_countPos", "NUMERIC"),("mean_executables_Analyze_countPos", "NUMERIC"),("median_executables_Analyze_countPos", "NUMERIC"),("var_executables_Analyze_countPos", "NUMERIC"),
("max_executables_Analyze_countPos", "NUMERIC"),("min_executables_Analyze_countPos", "NUMERIC"),(" len_lens_Analyze_countPos", "NUMERIC"),
("sum_lens_Analyze_countPos", "NUMERIC"),("mean_lens_Analyze_countPos", "NUMERIC"),("median_lens_Analyze_countPos", "NUMERIC"),("var_lens_Analyze_countPos", "NUMERIC"),
("max_lens_Analyze_countPos", "NUMERIC"),("min_lens_Analyze_countPos", "NUMERIC"),(" publics_Analyze_countPos", "NUMERIC"),("protecteds_Analyze_countPos", "NUMERIC"),
("privates_Analyze_countPos", "NUMERIC"),("totals _Analyze_countPos", "NUMERIC"),("len_params_Analyze_countPos", "NUMERIC"),("sum_params_Analyze_countPos", "NUMERIC"),
("mean_params_Analyze_countPos", "NUMERIC"),("median_params_Analyze_countPos", "NUMERIC"),("var_params_Analyze_countPos", "NUMERIC"),("max_params_Analyze_countPos", "NUMERIC"),
("min_params_Analyze_countPos", "NUMERIC"),("NCSS_Analyze_countNeg", "NUMERIC"),("FileLen_Analyze_countNeg", "NUMERIC"),("sum_fors_Analyze_countNeg", "NUMERIC"),("sum_ifs_Analyze_countNeg", "NUMERIC"),
("sum_tries_Analyze_countNeg", "NUMERIC"),("len_mccab_Analyze_countNeg", "NUMERIC"),("sum_mccab_Analyze_countNeg", "NUMERIC"),("mean_mccab_Analyze_countNeg", "NUMERIC"),
("median_mccab_Analyze_countNeg", "NUMERIC"),("var_mccab_Analyze_countNeg", "NUMERIC"),("max_mccab_Analyze_countNeg", "NUMERIC"),("min_mccab_Analyze_countNeg", "NUMERIC"),
(" len_fanOut_Analyze_countNeg", "NUMERIC"),("sum_fanOut_Analyze_countNeg", "NUMERIC"),("mean_fanOut_Analyze_countNeg", "NUMERIC"),("median_fanOut_Analyze_countNeg", "NUMERIC"),
("var_fanOut_Analyze_countNeg", "NUMERIC"),("max_fanOut_Analyze_countNeg", "NUMERIC"),("min_fanOut_Analyze_countNeg", "NUMERIC"),(" len_NPath_Analyze_countNeg", "NUMERIC"),
("sum_NPath_Analyze_countNeg", "NUMERIC"),("mean_NPath_Analyze_countNeg", "NUMERIC"),("median_NPath_Analyze_countNeg", "NUMERIC"),("var_NPath_Analyze_countNeg", "NUMERIC"),
("max_NPath_Analyze_countNeg", "NUMERIC"),("min_NPath_Analyze_countNeg", "NUMERIC"),(" len_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),("sum_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),("median_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),("var_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),("max_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),
("min_JavaNCSSmet_Analyze_countNeg", "NUMERIC"),(" len_thorwsSTM_Analyze_countNeg", "NUMERIC"),("sum_thorwsSTM_Analyze_countNeg", "NUMERIC"),("mean_thorwsSTM_Analyze_countNeg", "NUMERIC"),
("median_thorwsSTM_Analyze_countNeg", "NUMERIC"),("var_thorwsSTM_Analyze_countNeg", "NUMERIC"),("max_thorwsSTM_Analyze_countNeg", "NUMERIC"),("min_thorwsSTM_Analyze_countNeg", "NUMERIC"),
(" len_coupl_Analyze_countNeg", "NUMERIC"),("sum_coupl_Analyze_countNeg", "NUMERIC"),("mean_coupl_Analyze_countNeg", "NUMERIC"),("median_coupl_Analyze_countNeg", "NUMERIC"),
("var_coupl_Analyze_countNeg", "NUMERIC"),("max_coupl_Analyze_countNeg", "NUMERIC"),("min_coupl_Analyze_countNeg", "NUMERIC"),(" len_executables_Analyze_countNeg", "NUMERIC"),
("sum_executables_Analyze_countNeg", "NUMERIC"),("mean_executables_Analyze_countNeg", "NUMERIC"),("median_executables_Analyze_countNeg", "NUMERIC"),("var_executables_Analyze_countNeg", "NUMERIC"),
("max_executables_Analyze_countNeg", "NUMERIC"),("min_executables_Analyze_countNeg", "NUMERIC"),(" len_lens_Analyze_countNeg", "NUMERIC"),
("sum_lens_Analyze_countNeg", "NUMERIC"),("mean_lens_Analyze_countNeg", "NUMERIC"),("median_lens_Analyze_countNeg", "NUMERIC"),("var_lens_Analyze_countNeg", "NUMERIC"),
("max_lens_Analyze_countNeg", "NUMERIC"),("min_lens_Analyze_countNeg", "NUMERIC"),(" publics_Analyze_countNeg", "NUMERIC"),("protecteds_Analyze_countNeg", "NUMERIC"),
("privates_Analyze_countNeg", "NUMERIC"),("totals _Analyze_countNeg", "NUMERIC"),("len_params_Analyze_countNeg", "NUMERIC"),("sum_params_Analyze_countNeg", "NUMERIC"),
("mean_params_Analyze_countNeg", "NUMERIC"),("median_params_Analyze_countNeg", "NUMERIC"),("var_params_Analyze_countNeg", "NUMERIC"),("max_params_Analyze_countNeg", "NUMERIC"),
("min_params_Analyze_countNeg", "NUMERIC"),("NCSS_Analyze_sumPos", "NUMERIC"),("FileLen_Analyze_sumPos", "NUMERIC"),("sum_fors_Analyze_sumPos", "NUMERIC"),("sum_ifs_Analyze_sumPos", "NUMERIC"),
("sum_tries_Analyze_sumPos", "NUMERIC"),("len_mccab_Analyze_sumPos", "NUMERIC"),("sum_mccab_Analyze_sumPos", "NUMERIC"),("mean_mccab_Analyze_sumPos", "NUMERIC"),
("median_mccab_Analyze_sumPos", "NUMERIC"),("var_mccab_Analyze_sumPos", "NUMERIC"),("max_mccab_Analyze_sumPos", "NUMERIC"),("min_mccab_Analyze_sumPos", "NUMERIC"),
(" len_fanOut_Analyze_sumPos", "NUMERIC"),("sum_fanOut_Analyze_sumPos", "NUMERIC"),("mean_fanOut_Analyze_sumPos", "NUMERIC"),("median_fanOut_Analyze_sumPos", "NUMERIC"),
("var_fanOut_Analyze_sumPos", "NUMERIC"),("max_fanOut_Analyze_sumPos", "NUMERIC"),("min_fanOut_Analyze_sumPos", "NUMERIC"),(" len_NPath_Analyze_sumPos", "NUMERIC"),
("sum_NPath_Analyze_sumPos", "NUMERIC"),("mean_NPath_Analyze_sumPos", "NUMERIC"),("median_NPath_Analyze_sumPos", "NUMERIC"),("var_NPath_Analyze_sumPos", "NUMERIC"),
("max_NPath_Analyze_sumPos", "NUMERIC"),("min_NPath_Analyze_sumPos", "NUMERIC"),(" len_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),("sum_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),("median_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),("var_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),("max_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),
("min_JavaNCSSmet_Analyze_sumPos", "NUMERIC"),(" len_thorwsSTM_Analyze_sumPos", "NUMERIC"),("sum_thorwsSTM_Analyze_sumPos", "NUMERIC"),("mean_thorwsSTM_Analyze_sumPos", "NUMERIC"),
("median_thorwsSTM_Analyze_sumPos", "NUMERIC"),("var_thorwsSTM_Analyze_sumPos", "NUMERIC"),("max_thorwsSTM_Analyze_sumPos", "NUMERIC"),("min_thorwsSTM_Analyze_sumPos", "NUMERIC"),
(" len_coupl_Analyze_sumPos", "NUMERIC"),("sum_coupl_Analyze_sumPos", "NUMERIC"),("mean_coupl_Analyze_sumPos", "NUMERIC"),("median_coupl_Analyze_sumPos", "NUMERIC"),
("var_coupl_Analyze_sumPos", "NUMERIC"),("max_coupl_Analyze_sumPos", "NUMERIC"),("min_coupl_Analyze_sumPos", "NUMERIC"),(" len_executables_Analyze_sumPos", "NUMERIC"),
("sum_executables_Analyze_sumPos", "NUMERIC"),("mean_executables_Analyze_sumPos", "NUMERIC"),("median_executables_Analyze_sumPos", "NUMERIC"),("var_executables_Analyze_sumPos", "NUMERIC"),
("max_executables_Analyze_sumPos", "NUMERIC"),("min_executables_Analyze_sumPos", "NUMERIC"),(" len_lens_Analyze_sumPos", "NUMERIC"),
("sum_lens_Analyze_sumPos", "NUMERIC"),("mean_lens_Analyze_sumPos", "NUMERIC"),("median_lens_Analyze_sumPos", "NUMERIC"),("var_lens_Analyze_sumPos", "NUMERIC"),
("max_lens_Analyze_sumPos", "NUMERIC"),("min_lens_Analyze_sumPos", "NUMERIC"),(" publics_Analyze_sumPos", "NUMERIC"),("protecteds_Analyze_sumPos", "NUMERIC"),
("privates_Analyze_sumPos", "NUMERIC"),("totals _Analyze_sumPos", "NUMERIC"),("len_params_Analyze_sumPos", "NUMERIC"),("sum_params_Analyze_sumPos", "NUMERIC"),
("mean_params_Analyze_sumPos", "NUMERIC"),("median_params_Analyze_sumPos", "NUMERIC"),("var_params_Analyze_sumPos", "NUMERIC"),("max_params_Analyze_sumPos", "NUMERIC"),
("min_params_Analyze_sumPos", "NUMERIC"),("NCSS_Analyze_sumNeg", "NUMERIC"),("FileLen_Analyze_sumNeg", "NUMERIC"),("sum_fors_Analyze_sumNeg", "NUMERIC"),("sum_ifs_Analyze_sumNeg", "NUMERIC"),
("sum_tries_Analyze_sumNeg", "NUMERIC"),("len_mccab_Analyze_sumNeg", "NUMERIC"),("sum_mccab_Analyze_sumNeg", "NUMERIC"),("mean_mccab_Analyze_sumNeg", "NUMERIC"),
("median_mccab_Analyze_sumNeg", "NUMERIC"),("var_mccab_Analyze_sumNeg", "NUMERIC"),("max_mccab_Analyze_sumNeg", "NUMERIC"),("min_mccab_Analyze_sumNeg", "NUMERIC"),
(" len_fanOut_Analyze_sumNeg", "NUMERIC"),("sum_fanOut_Analyze_sumNeg", "NUMERIC"),("mean_fanOut_Analyze_sumNeg", "NUMERIC"),("median_fanOut_Analyze_sumNeg", "NUMERIC"),
("var_fanOut_Analyze_sumNeg", "NUMERIC"),("max_fanOut_Analyze_sumNeg", "NUMERIC"),("min_fanOut_Analyze_sumNeg", "NUMERIC"),(" len_NPath_Analyze_sumNeg", "NUMERIC"),
("sum_NPath_Analyze_sumNeg", "NUMERIC"),("mean_NPath_Analyze_sumNeg", "NUMERIC"),("median_NPath_Analyze_sumNeg", "NUMERIC"),("var_NPath_Analyze_sumNeg", "NUMERIC"),
("max_NPath_Analyze_sumNeg", "NUMERIC"),("min_NPath_Analyze_sumNeg", "NUMERIC"),(" len_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),("sum_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),("median_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),("var_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),("max_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),
("min_JavaNCSSmet_Analyze_sumNeg", "NUMERIC"),(" len_thorwsSTM_Analyze_sumNeg", "NUMERIC"),("sum_thorwsSTM_Analyze_sumNeg", "NUMERIC"),("mean_thorwsSTM_Analyze_sumNeg", "NUMERIC"),
("median_thorwsSTM_Analyze_sumNeg", "NUMERIC"),("var_thorwsSTM_Analyze_sumNeg", "NUMERIC"),("max_thorwsSTM_Analyze_sumNeg", "NUMERIC"),("min_thorwsSTM_Analyze_sumNeg", "NUMERIC"),
(" len_coupl_Analyze_sumNeg", "NUMERIC"),("sum_coupl_Analyze_sumNeg", "NUMERIC"),("mean_coupl_Analyze_sumNeg", "NUMERIC"),("median_coupl_Analyze_sumNeg", "NUMERIC"),
("var_coupl_Analyze_sumNeg", "NUMERIC"),("max_coupl_Analyze_sumNeg", "NUMERIC"),("min_coupl_Analyze_sumNeg", "NUMERIC"),(" len_executables_Analyze_sumNeg", "NUMERIC"),
("sum_executables_Analyze_sumNeg", "NUMERIC"),("mean_executables_Analyze_sumNeg", "NUMERIC"),("median_executables_Analyze_sumNeg", "NUMERIC"),("var_executables_Analyze_sumNeg", "NUMERIC"),
("max_executables_Analyze_sumNeg", "NUMERIC"),("min_executables_Analyze_sumNeg", "NUMERIC"),(" len_lens_Analyze_sumNeg", "NUMERIC"),
("sum_lens_Analyze_sumNeg", "NUMERIC"),("mean_lens_Analyze_sumNeg", "NUMERIC"),("median_lens_Analyze_sumNeg", "NUMERIC"),("var_lens_Analyze_sumNeg", "NUMERIC"),
("max_lens_Analyze_sumNeg", "NUMERIC"),("min_lens_Analyze_sumNeg", "NUMERIC"),(" publics_Analyze_sumNeg", "NUMERIC"),("protecteds_Analyze_sumNeg", "NUMERIC"),
("privates_Analyze_sumNeg", "NUMERIC"),("totals _Analyze_sumNeg", "NUMERIC"),("len_params_Analyze_sumNeg", "NUMERIC"),("sum_params_Analyze_sumNeg", "NUMERIC"),
("mean_params_Analyze_sumNeg", "NUMERIC"),("median_params_Analyze_sumNeg", "NUMERIC"),("var_params_Analyze_sumNeg", "NUMERIC"),("max_params_Analyze_sumNeg", "NUMERIC"),
("min_params_Analyze_sumNeg", "NUMERIC"),("NCSS_Analyze_avgPos", "NUMERIC"),("FileLen_Analyze_avgPos", "NUMERIC"),("sum_fors_Analyze_avgPos", "NUMERIC"),("sum_ifs_Analyze_avgPos", "NUMERIC"),
("sum_tries_Analyze_avgPos", "NUMERIC"),("len_mccab_Analyze_avgPos", "NUMERIC"),("sum_mccab_Analyze_avgPos", "NUMERIC"),("mean_mccab_Analyze_avgPos", "NUMERIC"),
("median_mccab_Analyze_avgPos", "NUMERIC"),("var_mccab_Analyze_avgPos", "NUMERIC"),("max_mccab_Analyze_avgPos", "NUMERIC"),("min_mccab_Analyze_avgPos", "NUMERIC"),
(" len_fanOut_Analyze_avgPos", "NUMERIC"),("sum_fanOut_Analyze_avgPos", "NUMERIC"),("mean_fanOut_Analyze_avgPos", "NUMERIC"),("median_fanOut_Analyze_avgPos", "NUMERIC"),
("var_fanOut_Analyze_avgPos", "NUMERIC"),("max_fanOut_Analyze_avgPos", "NUMERIC"),("min_fanOut_Analyze_avgPos", "NUMERIC"),(" len_NPath_Analyze_avgPos", "NUMERIC"),
("sum_NPath_Analyze_avgPos", "NUMERIC"),("mean_NPath_Analyze_avgPos", "NUMERIC"),("median_NPath_Analyze_avgPos", "NUMERIC"),("var_NPath_Analyze_avgPos", "NUMERIC"),
("max_NPath_Analyze_avgPos", "NUMERIC"),("min_NPath_Analyze_avgPos", "NUMERIC"),(" len_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),("sum_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),("median_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),("var_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),("max_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),
("min_JavaNCSSmet_Analyze_avgPos", "NUMERIC"),(" len_thorwsSTM_Analyze_avgPos", "NUMERIC"),("sum_thorwsSTM_Analyze_avgPos", "NUMERIC"),("mean_thorwsSTM_Analyze_avgPos", "NUMERIC"),
("median_thorwsSTM_Analyze_avgPos", "NUMERIC"),("var_thorwsSTM_Analyze_avgPos", "NUMERIC"),("max_thorwsSTM_Analyze_avgPos", "NUMERIC"),("min_thorwsSTM_Analyze_avgPos", "NUMERIC"),
(" len_coupl_Analyze_avgPos", "NUMERIC"),("sum_coupl_Analyze_avgPos", "NUMERIC"),("mean_coupl_Analyze_avgPos", "NUMERIC"),("median_coupl_Analyze_avgPos", "NUMERIC"),
("var_coupl_Analyze_avgPos", "NUMERIC"),("max_coupl_Analyze_avgPos", "NUMERIC"),("min_coupl_Analyze_avgPos", "NUMERIC"),(" len_executables_Analyze_avgPos", "NUMERIC"),
("sum_executables_Analyze_avgPos", "NUMERIC"),("mean_executables_Analyze_avgPos", "NUMERIC"),("median_executables_Analyze_avgPos", "NUMERIC"),("var_executables_Analyze_avgPos", "NUMERIC"),
("max_executables_Analyze_avgPos", "NUMERIC"),("min_executables_Analyze_avgPos", "NUMERIC"),(" len_lens_Analyze_avgPos", "NUMERIC"),
("sum_lens_Analyze_avgPos", "NUMERIC"),("mean_lens_Analyze_avgPos", "NUMERIC"),("median_lens_Analyze_avgPos", "NUMERIC"),("var_lens_Analyze_avgPos", "NUMERIC"),
("max_lens_Analyze_avgPos", "NUMERIC"),("min_lens_Analyze_avgPos", "NUMERIC"),(" publics_Analyze_avgPos", "NUMERIC"),("protecteds_Analyze_avgPos", "NUMERIC"),
("privates_Analyze_avgPos", "NUMERIC"),("totals _Analyze_avgPos", "NUMERIC"),("len_params_Analyze_avgPos", "NUMERIC"),("sum_params_Analyze_avgPos", "NUMERIC"),
("mean_params_Analyze_avgPos", "NUMERIC"),("median_params_Analyze_avgPos", "NUMERIC"),("var_params_Analyze_avgPos", "NUMERIC"),("max_params_Analyze_avgPos", "NUMERIC"),
("min_params_Analyze_avgPos", "NUMERIC"),("NCSS_Analyze_avgNeg", "NUMERIC"),("FileLen_Analyze_avgNeg", "NUMERIC"),("sum_fors_Analyze_avgNeg", "NUMERIC"),("sum_ifs_Analyze_avgNeg", "NUMERIC"),
("sum_tries_Analyze_avgNeg", "NUMERIC"),("len_mccab_Analyze_avgNeg", "NUMERIC"),("sum_mccab_Analyze_avgNeg", "NUMERIC"),("mean_mccab_Analyze_avgNeg", "NUMERIC"),
("median_mccab_Analyze_avgNeg", "NUMERIC"),("var_mccab_Analyze_avgNeg", "NUMERIC"),("max_mccab_Analyze_avgNeg", "NUMERIC"),("min_mccab_Analyze_avgNeg", "NUMERIC"),
(" len_fanOut_Analyze_avgNeg", "NUMERIC"),("sum_fanOut_Analyze_avgNeg", "NUMERIC"),("mean_fanOut_Analyze_avgNeg", "NUMERIC"),("median_fanOut_Analyze_avgNeg", "NUMERIC"),
("var_fanOut_Analyze_avgNeg", "NUMERIC"),("max_fanOut_Analyze_avgNeg", "NUMERIC"),("min_fanOut_Analyze_avgNeg", "NUMERIC"),(" len_NPath_Analyze_avgNeg", "NUMERIC"),
("sum_NPath_Analyze_avgNeg", "NUMERIC"),("mean_NPath_Analyze_avgNeg", "NUMERIC"),("median_NPath_Analyze_avgNeg", "NUMERIC"),("var_NPath_Analyze_avgNeg", "NUMERIC"),
("max_NPath_Analyze_avgNeg", "NUMERIC"),("min_NPath_Analyze_avgNeg", "NUMERIC"),(" len_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),("sum_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),
("mean_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),("median_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),("var_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),("max_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),
("min_JavaNCSSmet_Analyze_avgNeg", "NUMERIC"),(" len_thorwsSTM_Analyze_avgNeg", "NUMERIC"),("sum_thorwsSTM_Analyze_avgNeg", "NUMERIC"),("mean_thorwsSTM_Analyze_avgNeg", "NUMERIC"),
("median_thorwsSTM_Analyze_avgNeg", "NUMERIC"),("var_thorwsSTM_Analyze_avgNeg", "NUMERIC"),("max_thorwsSTM_Analyze_avgNeg", "NUMERIC"),("min_thorwsSTM_Analyze_avgNeg", "NUMERIC"),
(" len_coupl_Analyze_avgNeg", "NUMERIC"),("sum_coupl_Analyze_avgNeg", "NUMERIC"),("mean_coupl_Analyze_avgNeg", "NUMERIC"),("median_coupl_Analyze_avgNeg", "NUMERIC"),
("var_coupl_Analyze_avgNeg", "NUMERIC"),("max_coupl_Analyze_avgNeg", "NUMERIC"),("min_coupl_Analyze_avgNeg", "NUMERIC"),(" len_executables_Analyze_avgNeg", "NUMERIC"),
("sum_executables_Analyze_avgNeg", "NUMERIC"),("mean_executables_Analyze_avgNeg", "NUMERIC"),("median_executables_Analyze_avgNeg", "NUMERIC"),("var_executables_Analyze_avgNeg", "NUMERIC"),
("max_executables_Analyze_avgNeg", "NUMERIC"),("min_executables_Analyze_avgNeg", "NUMERIC"),(" len_lens_Analyze_avgNeg", "NUMERIC"),
("sum_lens_Analyze_avgNeg", "NUMERIC"),("mean_lens_Analyze_avgNeg", "NUMERIC"),("median_lens_Analyze_avgNeg", "NUMERIC"),("var_lens_Analyze_avgNeg", "NUMERIC"),
("max_lens_Analyze_avgNeg", "NUMERIC"),("min_lens_Analyze_avgNeg", "NUMERIC"),(" publics_Analyze_avgNeg", "NUMERIC"),("protecteds_Analyze_avgNeg", "NUMERIC"),
("privates_Analyze_avgNeg", "NUMERIC"),("totals _Analyze_avgNeg", "NUMERIC"),("len_params_Analyze_avgNeg", "NUMERIC"),("sum_params_Analyze_avgNeg", "NUMERIC"),
("mean_params_Analyze_avgNeg", "NUMERIC"),("median_params_Analyze_avgNeg", "NUMERIC"),("var_params_Analyze_avgNeg", "NUMERIC"),("max_params_Analyze_avgNeg", "NUMERIC"),
("min_params_Analyze_avgNeg", "NUMERIC")
]
ret=[]
for i in range(len(all)):
if i+1 in best_features:
ret.append(all[i])
return ret
def sqlToAttributesBest(self,basicAtt, c, files_dict, first,best):
Att_dict = {}
for f in files_dict.keys():
Att_dict[f] = list(basicAtt)
if 72==len(best):
print "len ", best
for row in c.execute(first):
name = row[0]
if (name in Att_dict):
ret=[]
all=list([ x if x!=None else 0 for x in row[1:] ])
for i in range(len(all)):
if i in best:
ret.append(all[i])
if len(ret)!=len(best):
print "len ", len(ret)
Att_dict[name] = ret
for f in Att_dict:
files_dict[f] = files_dict[f] + Att_dict[f]
def get_features(self, c, files_dict,prev_date,start_date,end_date):
analyze='''select name , sum( NCSS ), sum(FileLen ), sum(sum_fors ), sum(sum_ifs ), sum(sum_tries ), sum(
len_mccab ), sum(sum_mccab ), sum(mean_mccab ), sum(median_mccab ), sum(var_mccab ), sum(max_mccab ), sum(min_mccab ), sum(
len_fanOut ), sum(sum_fanOut ), sum(mean_fanOut ), sum(median_fanOut ), sum(var_fanOut ), sum(max_fanOut ), sum(min_fanOut ), sum(
len_NPath ), sum(sum_NPath ), sum(mean_NPath ), sum(median_NPath ), sum(var_NPath ), sum(max_NPath ), sum(min_NPath ), sum(
len_JavaNCSSmet ), sum(sum_JavaNCSSmet ), sum(mean_JavaNCSSmet ), sum(median_JavaNCSSmet ), sum(var_JavaNCSSmet ), sum(max_JavaNCSSmet ), sum(min_JavaNCSSmet ), sum(
len_thorwsSTM ), sum(sum_thorwsSTM ), sum(mean_thorwsSTM ), sum(median_thorwsSTM ), sum(var_thorwsSTM ), sum(max_thorwsSTM ), sum(min_thorwsSTM ), sum(
len_coupl ), sum(sum_coupl ), sum(mean_coupl ), sum(median_coupl ), sum(var_coupl ), sum(max_coupl ), sum(min_coupl ), sum(
len_executables ), sum(sum_executables ), sum(mean_executables ), sum(median_executables ), sum(var_executables ), sum(max_executables ), sum(min_executables ), sum(
len_lens ), sum(sum_lens ), sum(mean_lens ), sum(median_lens ), sum(var_lens ), sum(max_lens ), sum(min_lens ), sum(
publics ), sum(protecteds ), sum(privates ), sum(totals ), sum(len_params ), sum(sum_params ), sum(mean_params ), sum(median_params ), sum(var_params ), sum(max_params ), sum(min_params)
from checkStyleAnalyzeExtends group by name'''
lst=[0,1,3,6,7,9,10,13,14,15,17,18,20,21,23,24,27,41,42,43,45,48,55,59,64]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
analyze='''select name , avg( NCSS ), avg(FileLen ), avg(sum_fors ), avg(sum_ifs ), avg(sum_tries ), avg(
len_mccab ), avg(sum_mccab ), avg(mean_mccab ), avg(median_mccab ), avg(var_mccab ), avg(max_mccab ), avg(min_mccab ), avg(
len_fanOut ), avg(sum_fanOut ), avg(mean_fanOut ), avg(median_fanOut ), avg(var_fanOut ), avg(max_fanOut ), avg(min_fanOut ), avg(
len_NPath ), avg(sum_NPath ), avg(mean_NPath ), avg(median_NPath ), avg(var_NPath ), avg(max_NPath ), avg(min_NPath ), avg(
len_JavaNCSSmet ), avg(sum_JavaNCSSmet ), avg(mean_JavaNCSSmet ), avg(median_JavaNCSSmet ), avg(var_JavaNCSSmet ), avg(max_JavaNCSSmet ), avg(min_JavaNCSSmet ), avg(
len_thorwsSTM ), avg(sum_thorwsSTM ), avg(mean_thorwsSTM ), avg(median_thorwsSTM ), avg(var_thorwsSTM ), avg(max_thorwsSTM ), avg(min_thorwsSTM ), avg(
len_coupl ), avg(sum_coupl ), avg(mean_coupl ), avg(median_coupl ), avg(var_coupl ), avg(max_coupl ), avg(min_coupl ), avg(
len_executables ), avg(sum_executables ), avg(mean_executables ), avg(median_executables ), avg(var_executables ), avg(max_executables ), avg(min_executables ), avg(
len_lens ), avg(sum_lens ), avg(mean_lens ), avg(median_lens ), avg(var_lens ), avg(max_lens ), avg(min_lens ), avg(
publics ), avg(protecteds ), avg(privates ), avg(totals ), avg(len_params ), avg(sum_params ), avg(mean_params ), avg(median_params ), avg(var_params ), avg(max_params ), avg(min_params)
from checkStyleAnalyzeExtends group by name'''
lst=[3,6,7,8,10,11,13,14,15,17,18,20,21,22,23,24,25,27,28,29,31,32,34,35,36,38,39,41,42,43,45,46,48,49,50,52,53,55,56,57,59,60]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
analyze='''select name , Sum(case When NCSS > 0 Then 1 Else 0 End),Sum(case When FileLen > 0 Then 1 Else 0 End),Sum(case When sum_fors > 0 Then 1 Else 0 End),
Sum(case When sum_ifs > 0 Then 1 Else 0 End),Sum(case When sum_tries > 0 Then 1 Else 0 End),Sum(case When len_mccab > 0 Then 1 Else 0 End),
Sum(case When sum_mccab > 0 Then 1 Else 0 End),Sum(case When mean_mccab > 0 Then 1 Else 0 End),Sum(case When median_mccab > 0 Then 1 Else 0 End),
Sum(case When var_mccab > 0 Then 1 Else 0 End),Sum(case When max_mccab > 0 Then 1 Else 0 End),Sum(case When min_mccab > 0 Then 1 Else 0 End),
Sum(case When len_fanOut > 0 Then 1 Else 0 End),Sum(case When sum_fanOut > 0 Then 1 Else 0 End),Sum(case When mean_fanOut > 0 Then 1 Else 0 End),
Sum(case When median_fanOut > 0 Then 1 Else 0 End),Sum(case When var_fanOut > 0 Then 1 Else 0 End),Sum(case When max_fanOut > 0 Then 1 Else 0 End),
Sum(case When min_fanOut > 0 Then 1 Else 0 End),Sum(case When len_NPath > 0 Then 1 Else 0 End),Sum(case When sum_NPath > 0 Then 1 Else 0 End),
Sum(case When mean_NPath > 0 Then 1 Else 0 End),Sum(case When median_NPath > 0 Then 1 Else 0 End),Sum(case When var_NPath > 0 Then 1 Else 0 End),
Sum(case When max_NPath > 0 Then 1 Else 0 End),Sum(case When min_NPath > 0 Then 1 Else 0 End),Sum(case When len_JavaNCSSmet > 0 Then 1 Else 0 End),
Sum(case When sum_JavaNCSSmet > 0 Then 1 Else 0 End),Sum(case When mean_JavaNCSSmet > 0 Then 1 Else 0 End),Sum(case When median_JavaNCSSmet > 0 Then 1 Else 0 End),
Sum(case When var_JavaNCSSmet > 0 Then 1 Else 0 End),Sum(case When max_JavaNCSSmet > 0 Then 1 Else 0 End),Sum(case When min_JavaNCSSmet > 0 Then 1 Else 0 End),
Sum(case When len_thorwsSTM > 0 Then 1 Else 0 End),Sum(case When sum_thorwsSTM > 0 Then 1 Else 0 End),Sum(case When mean_thorwsSTM > 0 Then 1 Else 0 End),
Sum(case When median_thorwsSTM > 0 Then 1 Else 0 End),Sum(case When var_thorwsSTM > 0 Then 1 Else 0 End),Sum(case When max_thorwsSTM > 0 Then 1 Else 0 End),
Sum(case When min_thorwsSTM > 0 Then 1 Else 0 End),Sum(case When len_coupl > 0 Then 1 Else 0 End),Sum(case When sum_coupl > 0 Then 1 Else 0 End),
Sum(case When mean_coupl > 0 Then 1 Else 0 End),Sum(case When median_coupl > 0 Then 1 Else 0 End),Sum(case When var_coupl > 0 Then 1 Else 0 End),
Sum(case When max_coupl > 0 Then 1 Else 0 End),Sum(case When min_coupl > 0 Then 1 Else 0 End),Sum(case When len_executables > 0 Then 1 Else 0 End),
Sum(case When sum_executables > 0 Then 1 Else 0 End),Sum(case When mean_executables > 0 Then 1 Else 0 End),Sum(case When median_executables > 0 Then 1 Else 0 End),
Sum(case When var_executables > 0 Then 1 Else 0 End),
Sum(case When max_executables > 0 Then 1 Else 0 End),Sum(case When min_executables > 0 Then 1 Else 0 End),Sum(case When len_lens > 0 Then 1 Else 0 End),
Sum(case When sum_lens > 0 Then 1 Else 0 End),Sum(case When mean_lens > 0 Then 1 Else 0 End),Sum(case When median_lens > 0 Then 1 Else 0 End),
Sum(case When var_lens > 0 Then 1 Else 0 End),Sum(case When max_lens > 0 Then 1 Else 0 End),Sum(case When min_lens > 0 Then 1 Else 0 End),
Sum(case When publics > 0 Then 1 Else 0 End),Sum(case When protecteds > 0 Then 1 Else 0 End),Sum(case When privates > 0 Then 1 Else 0 End),
Sum(case When totals > 0 Then 1 Else 0 End),Sum(case When len_params > 0 Then 1 Else 0 End),Sum(case When sum_params > 0 Then 1 Else 0 End),
Sum(case When mean_params > 0 Then 1 Else 0 End),Sum(case When median_params > 0 Then 1 Else 0 End),Sum(case When var_params > 0 Then 1 Else 0 End),
Sum(case When max_params > 0 Then 1 Else 0 End),Sum(case When min_params > 0 Then 1 Else 0 End) from checkStyleAnalyzeExtends group by name'''
lst=[0,1,3,5,6,7,9,10,13,14,15,16,17,18,19,20,21,23,24,26,27,28,30,31,41,42,43,45,46,47,48,49,51,52,54,55,56,57,58,59,61,63,64]
self.sqlToAttributesBest(["0" for x in lst ], c, files_dict, analyze,lst)
analyze='''select name , Sum(case When NCSS < 0 Then 1 Else 0 End),Sum(case When FileLen < 0 Then 1 Else 0 End),Sum(case When sum_fors < 0 Then 1 Else 0 End),
Sum(case When sum_ifs < 0 Then 1 Else 0 End),Sum(case When sum_tries < 0 Then 1 Else 0 End),Sum(case When len_mccab < 0 Then 1 Else 0 End),
Sum(case When sum_mccab < 0 Then 1 Else 0 End),Sum(case When mean_mccab < 0 Then 1 Else 0 End),Sum(case When median_mccab < 0 Then 1 Else 0 End),
Sum(case When var_mccab < 0 Then 1 Else 0 End),Sum(case When max_mccab < 0 Then 1 Else 0 End),Sum(case When min_mccab < 0 Then 1 Else 0 End),
Sum(case When len_fanOut < 0 Then 1 Else 0 End),Sum(case When sum_fanOut < 0 Then 1 Else 0 End),Sum(case When mean_fanOut < 0 Then 1 Else 0 End),
Sum(case When median_fanOut < 0 Then 1 Else 0 End),Sum(case When var_fanOut < 0 Then 1 Else 0 End),Sum(case When max_fanOut < 0 Then 1 Else 0 End),
Sum(case When min_fanOut < 0 Then 1 Else 0 End),Sum(case When len_NPath < 0 Then 1 Else 0 End),Sum(case When sum_NPath < 0 Then 1 Else 0 End),
Sum(case When mean_NPath < 0 Then 1 Else 0 End),Sum(case When median_NPath < 0 Then 1 Else 0 End),Sum(case When var_NPath < 0 Then 1 Else 0 End),
Sum(case When max_NPath < 0 Then 1 Else 0 End),Sum(case When min_NPath < 0 Then 1 Else 0 End),Sum(case When len_JavaNCSSmet < 0 Then 1 Else 0 End),
Sum(case When sum_JavaNCSSmet < 0 Then 1 Else 0 End),Sum(case When mean_JavaNCSSmet < 0 Then 1 Else 0 End),Sum(case When median_JavaNCSSmet < 0 Then 1 Else 0 End),
Sum(case When var_JavaNCSSmet < 0 Then 1 Else 0 End),Sum(case When max_JavaNCSSmet < 0 Then 1 Else 0 End),Sum(case When min_JavaNCSSmet < 0 Then 1 Else 0 End),
Sum(case When len_thorwsSTM < 0 Then 1 Else 0 End),Sum(case When sum_thorwsSTM < 0 Then 1 Else 0 End),Sum(case When mean_thorwsSTM < 0 Then 1 Else 0 End),
Sum(case When median_thorwsSTM < 0 Then 1 Else 0 End),Sum(case When var_thorwsSTM < 0 Then 1 Else 0 End),Sum(case When max_thorwsSTM < 0 Then 1 Else 0 End),
Sum(case When min_thorwsSTM < 0 Then 1 Else 0 End),Sum(case When len_coupl < 0 Then 1 Else 0 End),Sum(case When sum_coupl < 0 Then 1 Else 0 End),
Sum(case When mean_coupl < 0 Then 1 Else 0 End),Sum(case When median_coupl < 0 Then 1 Else 0 End),Sum(case When var_coupl < 0 Then 1 Else 0 End),
Sum(case When max_coupl < 0 Then 1 Else 0 End),Sum(case When min_coupl < 0 Then 1 Else 0 End),Sum(case When len_executables < 0 Then 1 Else 0 End),
Sum(case When sum_executables < 0 Then 1 Else 0 End),Sum(case When mean_executables < 0 Then 1 Else 0 End),Sum(case When median_executables < 0 Then 1 Else 0 End),
Sum(case When var_executables < 0 Then 1 Else 0 End),
Sum(case When max_executables < 0 Then 1 Else 0 End),Sum(case When min_executables < 0 Then 1 Else 0 End),Sum(case When len_lens < 0 Then 1 Else 0 End),
Sum(case When sum_lens < 0 Then 1 Else 0 End),Sum(case When mean_lens < 0 Then 1 Else 0 End),Sum(case When median_lens < 0 Then 1 Else 0 End),
Sum(case When var_lens < 0 Then 1 Else 0 End),Sum(case When max_lens < 0 Then 1 Else 0 End),Sum(case When min_lens < 0 Then 1 Else 0 End),
Sum(case When publics < 0 Then 1 Else 0 End),Sum(case When protecteds < 0 Then 1 Else 0 End),Sum(case When privates < 0 Then 1 Else 0 End),
Sum(case When totals < 0 Then 1 Else 0 End),Sum(case When len_params < 0 Then 1 Else 0 End),Sum(case When sum_params < 0 Then 1 Else 0 End),
Sum(case When mean_params < 0 Then 1 Else 0 End),Sum(case When median_params < 0 Then 1 Else 0 End),Sum(case When var_params < 0 Then 1 Else 0 End),
Sum(case When max_params < 0 Then 1 Else 0 End),Sum(case When min_params < 0 Then 1 Else 0 End) from checkStyleAnalyzeExtends group by name'''
lst=[0,1,3,6,7,9,13,14,15,17,20,21,23,27,28,30,42,48,49,51,55,56,58]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
analyze='''select name , Sum(case When NCSS > 0 Then NCSS Else 0 End),Sum(case When FileLen > 0 Then FileLen Else 0 End),Sum(case When sum_fors > 0 Then sum_fors Else 0 End),
Sum(case When sum_ifs > 0 Then sum_ifs Else 0 End),Sum(case When sum_tries > 0 Then sum_tries Else 0 End),Sum(case When len_mccab > 0 Then len_mccab Else 0 End),
Sum(case When sum_mccab > 0 Then sum_mccab Else 0 End),Sum(case When mean_mccab > 0 Then mean_mccab Else 0 End),Sum(case When median_mccab > 0 Then median_mccab Else 0 End),
Sum(case When var_mccab > 0 Then var_mccab Else 0 End),Sum(case When max_mccab > 0 Then max_mccab Else 0 End),Sum(case When min_mccab > 0 Then min_mccab Else 0 End),
Sum(case When len_fanOut > 0 Then len_fanOut Else 0 End),Sum(case When sum_fanOut > 0 Then sum_fanOut Else 0 End),Sum(case When mean_fanOut > 0 Then mean_fanOut Else 0 End),
Sum(case When median_fanOut > 0 Then median_fanOut Else 0 End),Sum(case When var_fanOut > 0 Then var_fanOut Else 0 End),Sum(case When max_fanOut > 0 Then max_fanOut Else 0 End),
Sum(case When min_fanOut > 0 Then min_fanOut Else 0 End),Sum(case When len_NPath > 0 Then len_NPath Else 0 End),Sum(case When sum_NPath > 0 Then sum_NPath Else 0 End),
Sum(case When mean_NPath > 0 Then mean_NPath Else 0 End),Sum(case When median_NPath > 0 Then median_NPath Else 0 End),Sum(case When var_NPath > 0 Then var_NPath Else 0 End),
Sum(case When max_NPath > 0 Then max_NPath Else 0 End),Sum(case When min_NPath > 0 Then min_NPath Else 0 End),Sum(case When len_JavaNCSSmet > 0 Then len_JavaNCSSmet Else 0 End),
Sum(case When sum_JavaNCSSmet > 0 Then sum_JavaNCSSmet Else 0 End),Sum(case When mean_JavaNCSSmet > 0 Then mean_JavaNCSSmet Else 0 End),Sum(case When median_JavaNCSSmet > 0 Then median_JavaNCSSmet Else 0 End),
Sum(case When var_JavaNCSSmet > 0 Then var_JavaNCSSmet Else 0 End),Sum(case When max_JavaNCSSmet > 0 Then max_JavaNCSSmet Else 0 End),Sum(case When min_JavaNCSSmet > 0 Then min_JavaNCSSmet Else 0 End),
Sum(case When len_thorwsSTM > 0 Then len_thorwsSTM Else 0 End),Sum(case When sum_thorwsSTM > 0 Then sum_thorwsSTM Else 0 End),Sum(case When mean_thorwsSTM > 0 Then mean_thorwsSTM Else 0 End),
Sum(case When median_thorwsSTM > 0 Then median_thorwsSTM Else 0 End),Sum(case When var_thorwsSTM > 0 Then var_thorwsSTM Else 0 End),Sum(case When max_thorwsSTM > 0 Then max_thorwsSTM Else 0 End),
Sum(case When min_thorwsSTM > 0 Then min_thorwsSTM Else 0 End),Sum(case When len_coupl > 0 Then len_coupl Else 0 End),Sum(case When sum_coupl > 0 Then sum_coupl Else 0 End),
Sum(case When mean_coupl > 0 Then mean_coupl Else 0 End),Sum(case When median_coupl > 0 Then median_coupl Else 0 End),Sum(case When var_coupl > 0 Then var_coupl Else 0 End),
Sum(case When max_coupl > 0 Then max_coupl Else 0 End),Sum(case When min_coupl > 0 Then min_coupl Else 0 End),Sum(case When len_executables > 0 Then len_executables Else 0 End),
Sum(case When sum_executables > 0 Then sum_executables Else 0 End),Sum(case When mean_executables > 0 Then mean_executables Else 0 End),Sum(case When median_executables > 0 Then median_executables Else 0 End),
Sum(case When var_executables > 0 Then var_executables Else 0 End),Sum(case When max_executables > 0 Then max_executables Else 0 End),Sum(case When min_executables > 0 Then min_executables Else 0 End),
Sum(case When len_lens > 0 Then len_lens Else 0 End),Sum(case When sum_lens > 0 Then sum_lens Else 0 End),Sum(case When mean_lens > 0 Then mean_lens Else 0 End),Sum(case When median_lens > 0 Then median_lens Else 0 End),
Sum(case When var_lens > 0 Then var_lens Else 0 End),Sum(case When max_lens > 0 Then max_lens Else 0 End),Sum(case When min_lens > 0 Then min_lens Else 0 End),
Sum(case When publics > 0 Then publics Else 0 End),Sum(case When protecteds > 0 Then protecteds Else 0 End),Sum(case When privates > 0 Then privates Else 0 End),
Sum(case When totals > 0 Then totals Else 0 End),Sum(case When len_params > 0 Then len_params Else 0 End),Sum(case When sum_params > 0 Then sum_params Else 0 End),
Sum(case When mean_params > 0 Then mean_params Else 0 End),Sum(case When median_params > 0 Then median_params Else 0 End),Sum(case When var_params > 0 Then var_params Else 0 End),
Sum(case When max_params > 0 Then max_params Else 0 End),Sum(case When min_params > 0 Then min_params Else 0 End)
from checkStyleAnalyzeExtends group by name'''
lst=[0,1,3,6,7,9,10,13,14,15,17,18,20,21,23,24,27,28,31,41,42,45,48,55,59,64]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
analyze='''select name , Sum(case When NCSS < 0 Then NCSS Else 0 End),Sum(case When FileLen < 0 Then FileLen Else 0 End),Sum(case When sum_fors < 0 Then sum_fors Else 0 End),
Sum(case When sum_ifs < 0 Then sum_ifs Else 0 End),Sum(case When sum_tries < 0 Then sum_tries Else 0 End),Sum(case When len_mccab < 0 Then len_mccab Else 0 End),
Sum(case When sum_mccab < 0 Then sum_mccab Else 0 End),Sum(case When mean_mccab < 0 Then mean_mccab Else 0 End),Sum(case When median_mccab < 0 Then median_mccab Else 0 End),
Sum(case When var_mccab < 0 Then var_mccab Else 0 End),Sum(case When max_mccab < 0 Then max_mccab Else 0 End),Sum(case When min_mccab < 0 Then min_mccab Else 0 End),
Sum(case When len_fanOut < 0 Then len_fanOut Else 0 End),Sum(case When sum_fanOut < 0 Then sum_fanOut Else 0 End),Sum(case When mean_fanOut < 0 Then mean_fanOut Else 0 End),
Sum(case When median_fanOut < 0 Then median_fanOut Else 0 End),Sum(case When var_fanOut < 0 Then var_fanOut Else 0 End),Sum(case When max_fanOut < 0 Then max_fanOut Else 0 End),
Sum(case When min_fanOut < 0 Then min_fanOut Else 0 End),Sum(case When len_NPath < 0 Then len_NPath Else 0 End),Sum(case When sum_NPath < 0 Then sum_NPath Else 0 End),
Sum(case When mean_NPath < 0 Then mean_NPath Else 0 End),Sum(case When median_NPath < 0 Then median_NPath Else 0 End),Sum(case When var_NPath < 0 Then var_NPath Else 0 End),
Sum(case When max_NPath < 0 Then max_NPath Else 0 End),Sum(case When min_NPath < 0 Then min_NPath Else 0 End),Sum(case When len_JavaNCSSmet < 0 Then len_JavaNCSSmet Else 0 End),
Sum(case When sum_JavaNCSSmet < 0 Then sum_JavaNCSSmet Else 0 End),Sum(case When mean_JavaNCSSmet < 0 Then mean_JavaNCSSmet Else 0 End),Sum(case When median_JavaNCSSmet < 0 Then median_JavaNCSSmet Else 0 End),
Sum(case When var_JavaNCSSmet < 0 Then var_JavaNCSSmet Else 0 End),Sum(case When max_JavaNCSSmet < 0 Then max_JavaNCSSmet Else 0 End),Sum(case When min_JavaNCSSmet < 0 Then min_JavaNCSSmet Else 0 End),
Sum(case When len_thorwsSTM < 0 Then len_thorwsSTM Else 0 End),Sum(case When sum_thorwsSTM < 0 Then sum_thorwsSTM Else 0 End),Sum(case When mean_thorwsSTM < 0 Then mean_thorwsSTM Else 0 End),
Sum(case When median_thorwsSTM < 0 Then median_thorwsSTM Else 0 End),Sum(case When var_thorwsSTM < 0 Then var_thorwsSTM Else 0 End),Sum(case When max_thorwsSTM < 0 Then max_thorwsSTM Else 0 End),
Sum(case When min_thorwsSTM < 0 Then min_thorwsSTM Else 0 End),Sum(case When len_coupl < 0 Then len_coupl Else 0 End),Sum(case When sum_coupl < 0 Then sum_coupl Else 0 End),
Sum(case When mean_coupl < 0 Then mean_coupl Else 0 End),Sum(case When median_coupl < 0 Then median_coupl Else 0 End),Sum(case When var_coupl < 0 Then var_coupl Else 0 End),
Sum(case When max_coupl < 0 Then max_coupl Else 0 End),Sum(case When min_coupl < 0 Then min_coupl Else 0 End),Sum(case When len_executables < 0 Then len_executables Else 0 End),
Sum(case When sum_executables < 0 Then sum_executables Else 0 End),Sum(case When mean_executables < 0 Then mean_executables Else 0 End),Sum(case When median_executables < 0 Then median_executables Else 0 End),
Sum(case When var_executables < 0 Then var_executables Else 0 End),Sum(case When max_executables < 0 Then max_executables Else 0 End),Sum(case When min_executables < 0 Then min_executables Else 0 End),
Sum(case When len_lens < 0 Then len_lens Else 0 End),Sum(case When sum_lens < 0 Then sum_lens Else 0 End),Sum(case When mean_lens < 0 Then mean_lens Else 0 End),Sum(case When median_lens < 0 Then median_lens Else 0 End),
Sum(case When var_lens < 0 Then var_lens Else 0 End),Sum(case When max_lens < 0 Then max_lens Else 0 End),Sum(case When min_lens < 0 Then min_lens Else 0 End),
Sum(case When publics < 0 Then publics Else 0 End),Sum(case When protecteds < 0 Then protecteds Else 0 End),Sum(case When privates < 0 Then privates Else 0 End),
Sum(case When totals < 0 Then totals Else 0 End),Sum(case When len_params < 0 Then len_params Else 0 End),Sum(case When sum_params < 0 Then sum_params Else 0 End),
Sum(case When mean_params < 0 Then mean_params Else 0 End),Sum(case When median_params < 0 Then median_params Else 0 End),Sum(case When var_params < 0 Then var_params Else 0 End),
Sum(case When max_params < 0 Then max_params Else 0 End),Sum(case When min_params < 0 Then min_params Else 0 End)
from checkStyleAnalyzeExtends group by name'''
lst=[1,6,7,9,10,13,14,15,17,18,20,21,23,24,27,28,29,30,31,41,42,43,45,46,48,49,51,52,55,56,57,58,59]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
analyze='''select name , avg(case When NCSS > 0 Then NCSS Else null End),avg(case When FileLen > 0 Then FileLen Else null End),avg(case When sum_fors > 0 Then sum_fors Else null End),
avg(case When sum_ifs > 0 Then sum_ifs Else null End),avg(case When sum_tries > 0 Then sum_tries Else null End),avg(case When len_mccab > 0 Then len_mccab Else null End),
avg(case When sum_mccab > 0 Then sum_mccab Else null End),avg(case When mean_mccab > 0 Then mean_mccab Else null End),avg(case When median_mccab > 0 Then median_mccab Else null End),
avg(case When var_mccab > 0 Then var_mccab Else null End),avg(case When max_mccab > 0 Then max_mccab Else null End),avg(case When min_mccab > 0 Then min_mccab Else null End),
avg(case When len_fanOut > 0 Then len_fanOut Else null End),avg(case When sum_fanOut > 0 Then sum_fanOut Else null End),avg(case When mean_fanOut > 0 Then mean_fanOut Else null End),
avg(case When median_fanOut > 0 Then median_fanOut Else null End),avg(case When var_fanOut > 0 Then var_fanOut Else null End),avg(case When max_fanOut > 0 Then max_fanOut Else null End),
avg(case When min_fanOut > 0 Then min_fanOut Else null End),avg(case When len_NPath > 0 Then len_NPath Else null End),avg(case When sum_NPath > 0 Then sum_NPath Else null End),
avg(case When mean_NPath > 0 Then mean_NPath Else null End),avg(case When median_NPath > 0 Then median_NPath Else null End),avg(case When var_NPath > 0 Then var_NPath Else null End),
avg(case When max_NPath > 0 Then max_NPath Else null End),avg(case When min_NPath > 0 Then min_NPath Else null End),avg(case When len_JavaNCSSmet > 0 Then len_JavaNCSSmet Else null End),
avg(case When sum_JavaNCSSmet > 0 Then sum_JavaNCSSmet Else null End),avg(case When mean_JavaNCSSmet > 0 Then mean_JavaNCSSmet Else null End),avg(case When median_JavaNCSSmet > 0 Then median_JavaNCSSmet Else null End),
avg(case When var_JavaNCSSmet > 0 Then var_JavaNCSSmet Else null End),avg(case When max_JavaNCSSmet > 0 Then max_JavaNCSSmet Else null End),avg(case When min_JavaNCSSmet > 0 Then min_JavaNCSSmet Else null End),
avg(case When len_thorwsSTM > 0 Then len_thorwsSTM Else null End),avg(case When sum_thorwsSTM > 0 Then sum_thorwsSTM Else null End),avg(case When mean_thorwsSTM > 0 Then mean_thorwsSTM Else null End),
avg(case When median_thorwsSTM > 0 Then median_thorwsSTM Else null End),avg(case When var_thorwsSTM > 0 Then var_thorwsSTM Else null End),avg(case When max_thorwsSTM > 0 Then max_thorwsSTM Else null End),
avg(case When min_thorwsSTM > 0 Then min_thorwsSTM Else null End),avg(case When len_coupl > 0 Then len_coupl Else null End),avg(case When sum_coupl > 0 Then sum_coupl Else null End),
avg(case When mean_coupl > 0 Then mean_coupl Else null End),avg(case When median_coupl > 0 Then median_coupl Else null End),avg(case When var_coupl > 0 Then var_coupl Else null End),
avg(case When max_coupl > 0 Then max_coupl Else null End),avg(case When min_coupl > 0 Then min_coupl Else null End),avg(case When len_executables > 0 Then len_executables Else null End),
avg(case When sum_executables > 0 Then sum_executables Else null End),avg(case When mean_executables > 0 Then mean_executables Else null End),avg(case When median_executables > 0 Then median_executables Else null End),
avg(case When var_executables > 0 Then var_executables Else null End),avg(case When max_executables > 0 Then max_executables Else null End),avg(case When min_executables > 0 Then min_executables Else null End),
avg(case When len_lens > 0 Then len_lens Else null End),avg(case When sum_lens > 0 Then sum_lens Else null End),avg(case When mean_lens > 0 Then mean_lens Else null End),
avg(case When median_lens > 0 Then median_lens Else null End),avg(case When var_lens > 0 Then var_lens Else null End),avg(case When max_lens > 0 Then max_lens Else null End),
avg(case When min_lens > 0 Then min_lens Else null End),avg(case When publics > 0 Then publics Else null End),avg(case When protecteds > 0 Then protecteds Else null End),
avg(case When privates > 0 Then privates Else null End),avg(case When totals > 0 Then totals Else null End),avg(case When len_params > 0 Then len_params Else null End),
avg(case When sum_params > 0 Then sum_params Else null End),avg(case When mean_params > 0 Then mean_params Else null End),avg(case When median_params > 0 Then median_params Else null End),
avg(case When var_params > 0 Then var_params Else null End),avg(case When max_params > 0 Then max_params Else null End),avg(case When min_params > 0 Then min_params Else null End)
from checkStyleAnalyzeExtends group by name'''
lst=[3,6,7,10,13,14,15,17,18,20,21,23,24,27,28,29,31,41,42,43,45,46,48,49,52,55,56,57,59]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
analyze='''select name , avg(case When NCSS < 0 Then NCSS Else null End),avg(case When FileLen < 0 Then FileLen Else null End),avg(case When sum_fors < 0 Then sum_fors Else null End),
avg(case When sum_ifs < 0 Then sum_ifs Else null End),avg(case When sum_tries < 0 Then sum_tries Else null End),avg(case When len_mccab < 0 Then len_mccab Else null End),
avg(case When sum_mccab < 0 Then sum_mccab Else null End),avg(case When mean_mccab < 0 Then mean_mccab Else null End),avg(case When median_mccab < 0 Then median_mccab Else null End),
avg(case When var_mccab < 0 Then var_mccab Else null End),avg(case When max_mccab < 0 Then max_mccab Else null End),avg(case When min_mccab < 0 Then min_mccab Else null End),
avg(case When len_fanOut < 0 Then len_fanOut Else null End),avg(case When sum_fanOut < 0 Then sum_fanOut Else null End),avg(case When mean_fanOut < 0 Then mean_fanOut Else null End),
avg(case When median_fanOut < 0 Then median_fanOut Else null End),avg(case When var_fanOut < 0 Then var_fanOut Else null End),avg(case When max_fanOut < 0 Then max_fanOut Else null End),
avg(case When min_fanOut < 0 Then min_fanOut Else null End),avg(case When len_NPath < 0 Then len_NPath Else null End),avg(case When sum_NPath < 0 Then sum_NPath Else null End),
avg(case When mean_NPath < 0 Then mean_NPath Else null End),avg(case When median_NPath < 0 Then median_NPath Else null End),avg(case When var_NPath < 0 Then var_NPath Else null End),
avg(case When max_NPath < 0 Then max_NPath Else null End),avg(case When min_NPath < 0 Then min_NPath Else null End),avg(case When len_JavaNCSSmet < 0 Then len_JavaNCSSmet Else null End),
avg(case When sum_JavaNCSSmet < 0 Then sum_JavaNCSSmet Else null End),avg(case When mean_JavaNCSSmet < 0 Then mean_JavaNCSSmet Else null End),avg(case When median_JavaNCSSmet < 0 Then median_JavaNCSSmet Else null End),
avg(case When var_JavaNCSSmet < 0 Then var_JavaNCSSmet Else null End),avg(case When max_JavaNCSSmet < 0 Then max_JavaNCSSmet Else null End),avg(case When min_JavaNCSSmet < 0 Then min_JavaNCSSmet Else null End),
avg(case When len_thorwsSTM < 0 Then len_thorwsSTM Else null End),avg(case When sum_thorwsSTM < 0 Then sum_thorwsSTM Else null End),avg(case When mean_thorwsSTM < 0 Then mean_thorwsSTM Else null End),
avg(case When median_thorwsSTM < 0 Then median_thorwsSTM Else null End),avg(case When var_thorwsSTM < 0 Then var_thorwsSTM Else null End),avg(case When max_thorwsSTM < 0 Then max_thorwsSTM Else null End),
avg(case When min_thorwsSTM < 0 Then min_thorwsSTM Else null End),avg(case When len_coupl < 0 Then len_coupl Else null End),avg(case When sum_coupl < 0 Then sum_coupl Else null End),
avg(case When mean_coupl < 0 Then mean_coupl Else null End),avg(case When median_coupl < 0 Then median_coupl Else null End),avg(case When var_coupl < 0 Then var_coupl Else null End),
avg(case When max_coupl < 0 Then max_coupl Else null End),avg(case When min_coupl < 0 Then min_coupl Else null End),avg(case When len_executables < 0 Then len_executables Else null End),
avg(case When sum_executables < 0 Then sum_executables Else null End),avg(case When mean_executables < 0 Then mean_executables Else null End),avg(case When median_executables < 0 Then median_executables Else null End),
avg(case When var_executables < 0 Then var_executables Else null End),avg(case When max_executables < 0 Then max_executables Else null End),avg(case When min_executables < 0 Then min_executables Else null End),
avg(case When len_lens < 0 Then len_lens Else null End),avg(case When sum_lens < 0 Then sum_lens Else null End),avg(case When mean_lens < 0 Then mean_lens Else null End),
avg(case When median_lens < 0 Then median_lens Else null End),avg(case When var_lens < 0 Then var_lens Else null End),avg(case When max_lens < 0 Then max_lens Else null End),
avg(case When min_lens < 0 Then min_lens Else null End),avg(case When publics < 0 Then publics Else null End),avg(case When protecteds < 0 Then protecteds Else null End),
avg(case When privates < 0 Then privates Else null End),avg(case When totals < 0 Then totals Else null End),avg(case When len_params < 0 Then len_params Else null End),
avg(case When sum_params < 0 Then sum_params Else null End),avg(case When mean_params < 0 Then mean_params Else null End),avg(case When median_params < 0 Then median_params Else null End),
avg(case When var_params < 0 Then var_params Else null End),avg(case When max_params < 0 Then max_params Else null End),avg(case When min_params < 0 Then min_params Else null End)
from checkStyleAnalyzeExtends group by name'''
lst=[1,6,7,9,13,14,15,17,18,20,21,23,27,28,30,41,42,45,48,49,51,55,56,58]
self.sqlToAttributesBest(["0" for x in lst], c, files_dict, analyze,lst)
| [
"[email protected]"
] | |
ff086b9a60930c6eb551f1675b66986afed25460 | 8602a87e12fe19b28c2e85cfae0bbde27a62855d | /ingenico/connect/sdk/domain/errors/definitions/api_error.py | 378aba23c91ae36081a6e7f6901cce6727671a09 | [
"MIT"
] | permissive | king1212/connect-sdk-python2 | 6a687de7967a411fa802477069c7fc7079d059c2 | 203982559c5c10e3dbbb9dfc71123c269908ed26 | refs/heads/master | 2021-07-04T03:10:55.061416 | 2017-09-27T09:29:49 | 2017-09-27T09:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,311 | py | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
class APIError(DataObject):
__category = None
__code = None
__http_status_code = None
__id = None
__message = None
__property_name = None
__request_id = None
@property
def category(self):
"""
| Category the error belongs to. The category should give an indication of the type of error you are dealing with.Possible values:
* CONNECT_PLATFORM_ERROR - indicating that a functional error has occurred in the Connect platform.
* PAYMENT_PLATFORM_ERROR - indicating that a functional error has occurred in the Payment platform.
* IO_ERROR - indicating that a technical error has occurred within the Connect platform or between Connect and any of the payment platforms or third party systems.
Type: str
"""
return self.__category
@category.setter
def category(self, value):
self.__category = value
@property
def code(self):
"""
| Error code
Type: str
"""
return self.__code
@code.setter
def code(self, value):
self.__code = value
@property
def http_status_code(self):
"""
| HTTP status code for this error that can be used to determine the type of error
Type: int
"""
return self.__http_status_code
@http_status_code.setter
def http_status_code(self, value):
self.__http_status_code = value
@property
def id(self):
"""
| ID of the error. This is a short human-readable message that briefly describes the error.
Type: str
"""
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def message(self):
"""
| Human-readable error message that is not meant to be relayed to consumer as it might tip off people who are trying to commit fraud
Type: str
"""
return self.__message
@message.setter
def message(self, value):
self.__message = value
@property
def property_name(self):
"""
| In case the error was in relation to a property that was missing or not correct the name of the property in question is returned
Type: str
"""
return self.__property_name
@property_name.setter
def property_name(self, value):
self.__property_name = value
@property
def request_id(self):
"""
| ID of the request that can be used for debugging purposes
Type: str
"""
return self.__request_id
@request_id.setter
def request_id(self, value):
self.__request_id = value
def to_dictionary(self):
dictionary = super(APIError, self).to_dictionary()
self._add_to_dictionary(dictionary, 'category', self.category)
self._add_to_dictionary(dictionary, 'code', self.code)
self._add_to_dictionary(dictionary, 'httpStatusCode', self.http_status_code)
self._add_to_dictionary(dictionary, 'id', self.id)
self._add_to_dictionary(dictionary, 'message', self.message)
self._add_to_dictionary(dictionary, 'propertyName', self.property_name)
self._add_to_dictionary(dictionary, 'requestId', self.request_id)
return dictionary
def from_dictionary(self, dictionary):
super(APIError, self).from_dictionary(dictionary)
if 'category' in dictionary:
self.category = dictionary['category']
if 'code' in dictionary:
self.code = dictionary['code']
if 'httpStatusCode' in dictionary:
self.http_status_code = dictionary['httpStatusCode']
if 'id' in dictionary:
self.id = dictionary['id']
if 'message' in dictionary:
self.message = dictionary['message']
if 'propertyName' in dictionary:
self.property_name = dictionary['propertyName']
if 'requestId' in dictionary:
self.request_id = dictionary['requestId']
return self
| [
"[email protected]"
] | |
416bdd664c7972904f6b0112f4f041b2123c7141 | 057a85e94d7a284cda900e6c89bd3da6402206e8 | /tools.py | fcc935962c53518d1ff2d416c1eec10895975b9e | [
"MIT"
] | permissive | nusratsharmin/AFQ-notebooks | 9eb5662dc46a1b67d379f5a6227667272f195184 | de4e2a9fce6f7f3134a2d362e55d3c6ba44fd3a0 | refs/heads/master | 2020-12-31T01:23:24.088489 | 2015-08-05T19:24:54 | 2015-08-05T19:24:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,994 | py | import numpy as np
from dipy.align.metrics import CCMetric, EMMetric, SSDMetric
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
metric_dict = {'CC': CCMetric,
'EM': EMMetric,
'SSD': SSDMetric}
def syn_registration(moving, static, moving_grid2world=None, static_grid2world=None,
metric='CC', dim=3, level_iters = [10, 10, 5], prealign=None):
"""
Register a source image (moving) to a target image (static)
Parameters
----------
moving : ndarray
The source image data to be registered
moving_grid2world : array, shape (4,4)
The affine matrix associated with the moving (source) data.
static : ndarray
The target image data for registration
static_grid2world : array, shape (4,4)
The affine matrix associated with the static (target) data
metric : string, optional
The metric to be optimized. One of `CC`, `EM`, `SSD`, Default: CCMetric.
dim: int (either 2 or 3), optional
The dimensions of the image domain. Default: 3
level_iters : list of int, optional
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used).
Returns
-------
warped_moving : ndarray
The data in `moving`, warped towards the `static` data.
forward : ndarray (..., 3)
The vector field describing the forward warping from the source to the target.
backward : ndarray (..., 3)
The vector field describing the backward warping from the target to the source
"""
use_metric = metric_dict[metric](dim)
sdr = SymmetricDiffeomorphicRegistration(use_metric, level_iters)
mapping = sdr.optimize(static, moving, static_grid2world=static_grid2world,
moving_grid2world=moving_grid2world, prealign=prealign)
warped_moving = mapping.transform(moving)
return warped_moving, mapping
| [
"[email protected]"
] | |
d54a4e0f7123484c988ddc76b0f8a1703075ca2b | 9923e30eb99716bfc179ba2bb789dcddc28f45e6 | /swagger-codegen/python/samsara/models/dispatch_route.py | 4fba4ac645d6492cfd20ebf06f44ddd01d3eeb74 | [] | no_license | silverspace/samsara-sdks | cefcd61458ed3c3753ac5e6bf767229dd8df9485 | c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa | refs/heads/master | 2020-04-25T13:16:59.137551 | 2019-03-01T05:49:05 | 2019-03-01T05:49:05 | 172,804,041 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,391 | py | # coding: utf-8
"""
Samsara API
# Introduction Samsara provides API endpoints for interacting with Samsara Cloud, so that you can build powerful applications and custom solutions with sensor data. Samsara has endpoints available to track and analyze sensors, vehicles, and entire fleets. The Samsara Cloud API is a [RESTful API](https://en.wikipedia.org/wiki/Representational_state_transfer) accessed by an [HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) client such as wget or curl, or HTTP libraries of most modern programming languages including python, ruby, java. We use built-in HTTP features, like HTTP authentication and HTTP verbs, which are understood by off-the-shelf HTTP clients. We allow you to interact securely with our API from a client-side web application (though you should never expose your secret API key). [JSON](http://www.json.org/) is returned by all API responses, including errors. If you’re familiar with what you can build with a REST API, the following API reference guide will be your go-to resource. API access to the Samsara cloud is available to all Samsara administrators. To start developing with Samsara APIs you will need to [obtain your API keys](#section/Authentication) to authenticate your API requests. If you have any questions you can reach out to us on [[email protected]](mailto:[email protected]) # Endpoints All our APIs can be accessed through HTTP requests to URLs like: ```curl https://api.samsara.com/<version>/<endpoint> ``` All our APIs are [versioned](#section/Versioning). If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. # Authentication To authenticate your API request you will need to include your secret token. You can manage your API tokens in the [Dashboard](https://cloud.samsara.com). They are visible under `Settings->Organization->API Tokens`. Your API tokens carry many privileges, so be sure to keep them secure. Do not share your secret API tokens in publicly accessible areas such as GitHub, client-side code, and so on. Authentication to the API is performed via [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Provide your API token as the basic access_token value in the URL. You do not need to provide a password. ```curl https://api.samsara.com/<version>/<endpoint>?access_token={access_token} ``` All API requests must be made over [HTTPS](https://en.wikipedia.org/wiki/HTTPS). Calls made over plain HTTP or without authentication will fail. # Request Methods Our API endpoints use [HTTP request methods](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods) to specify the desired operation to be performed. The documentation below specified request method supported by each endpoint and the resulting action. ## GET GET requests are typically used for fetching data (like data for a particular driver). ## POST POST requests are typically used for creating or updating a record (like adding new tags to the system). With that being said, a few of our POST requests can be used for fetching data (like current location data of your fleet). ## PUT PUT requests are typically used for updating an existing record (like updating all devices associated with a particular tag). ## PATCH PATCH requests are typically used for modifying an existing record (like modifying a few devices associated with a particular tag). ## DELETE DELETE requests are used for deleting a record (like deleting a tag from the system). # Response Codes All API requests will respond with appropriate [HTTP status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). Your API client should handle each response class differently. ## 2XX These are successful responses and indicate that the API request returned the expected response. ## 4XX These indicate that there was a problem with the request like a missing parameter or invalid values. Check the response for specific [error details](#section/Error-Responses). Requests that respond with a 4XX status code, should be modified before retrying. ## 5XX These indicate server errors when the server is unreachable or is misconfigured. In this case, you should retry the API request after some delay. # Error Responses In case of a 4XX status code, the body of the response will contain information to briefly explain the error reported. To help debugging the error, you can refer to the following table for understanding the error message. | Status Code | Message | Description | |-------------|----------------|-------------------------------------------------------------------| | 401 | Invalid token | The API token is invalid and could not be authenticated. Please refer to the [authentication section](#section/Authentication). | | 404 | Page not found | The API endpoint being accessed is invalid. | | 400 | Bad request | Default response for an invalid request. Please check the request to make sure it follows the format specified in the documentation. | # Versioning All our APIs are versioned. Our current API version is `v1` and we are continuously working on improving it further and provide additional endpoints. If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. Thus, you can use our current API version worry free. # FAQs Check out our [responses to FAQs here](https://kb.samsara.com/hc/en-us/sections/360000538054-APIs). Don’t see an answer to your question? Reach out to us on [[email protected]](mailto:[email protected]). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from samsara.models.dispatch_job import DispatchJob # noqa: F401,E501
from samsara.models.dispatch_route_base import DispatchRouteBase # noqa: F401,E501
class DispatchRoute(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'actual_end_ms': 'int',
'actual_start_ms': 'int',
'driver_id': 'int',
'group_id': 'int',
'name': 'str',
'scheduled_end_ms': 'int',
'scheduled_meters': 'int',
'scheduled_start_ms': 'int',
'start_location_address': 'str',
'start_location_address_id': 'int',
'start_location_lat': 'float',
'start_location_lng': 'float',
'start_location_name': 'str',
'trailer_id': 'int',
'vehicle_id': 'int',
'dispatch_jobs': 'list[DispatchJob]',
'id': 'int'
}
attribute_map = {
'actual_end_ms': 'actual_end_ms',
'actual_start_ms': 'actual_start_ms',
'driver_id': 'driver_id',
'group_id': 'group_id',
'name': 'name',
'scheduled_end_ms': 'scheduled_end_ms',
'scheduled_meters': 'scheduled_meters',
'scheduled_start_ms': 'scheduled_start_ms',
'start_location_address': 'start_location_address',
'start_location_address_id': 'start_location_address_id',
'start_location_lat': 'start_location_lat',
'start_location_lng': 'start_location_lng',
'start_location_name': 'start_location_name',
'trailer_id': 'trailer_id',
'vehicle_id': 'vehicle_id',
'dispatch_jobs': 'dispatch_jobs',
'id': 'id'
}
def __init__(self, actual_end_ms=None, actual_start_ms=None, driver_id=None, group_id=None, name=None, scheduled_end_ms=None, scheduled_meters=None, scheduled_start_ms=None, start_location_address=None, start_location_address_id=None, start_location_lat=None, start_location_lng=None, start_location_name=None, trailer_id=None, vehicle_id=None, dispatch_jobs=None, id=None): # noqa: E501
"""DispatchRoute - a model defined in Swagger""" # noqa: E501
self._actual_end_ms = None
self._actual_start_ms = None
self._driver_id = None
self._group_id = None
self._name = None
self._scheduled_end_ms = None
self._scheduled_meters = None
self._scheduled_start_ms = None
self._start_location_address = None
self._start_location_address_id = None
self._start_location_lat = None
self._start_location_lng = None
self._start_location_name = None
self._trailer_id = None
self._vehicle_id = None
self._dispatch_jobs = None
self._id = None
self.discriminator = None
if actual_end_ms is not None:
self.actual_end_ms = actual_end_ms
if actual_start_ms is not None:
self.actual_start_ms = actual_start_ms
if driver_id is not None:
self.driver_id = driver_id
if group_id is not None:
self.group_id = group_id
self.name = name
self.scheduled_end_ms = scheduled_end_ms
if scheduled_meters is not None:
self.scheduled_meters = scheduled_meters
self.scheduled_start_ms = scheduled_start_ms
if start_location_address is not None:
self.start_location_address = start_location_address
if start_location_address_id is not None:
self.start_location_address_id = start_location_address_id
if start_location_lat is not None:
self.start_location_lat = start_location_lat
if start_location_lng is not None:
self.start_location_lng = start_location_lng
if start_location_name is not None:
self.start_location_name = start_location_name
if trailer_id is not None:
self.trailer_id = trailer_id
if vehicle_id is not None:
self.vehicle_id = vehicle_id
self.dispatch_jobs = dispatch_jobs
self.id = id
@property
def actual_end_ms(self):
"""Gets the actual_end_ms of this DispatchRoute. # noqa: E501
The time in Unix epoch milliseconds that the route actually ended. # noqa: E501
:return: The actual_end_ms of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._actual_end_ms
@actual_end_ms.setter
def actual_end_ms(self, actual_end_ms):
"""Sets the actual_end_ms of this DispatchRoute.
The time in Unix epoch milliseconds that the route actually ended. # noqa: E501
:param actual_end_ms: The actual_end_ms of this DispatchRoute. # noqa: E501
:type: int
"""
self._actual_end_ms = actual_end_ms
@property
def actual_start_ms(self):
"""Gets the actual_start_ms of this DispatchRoute. # noqa: E501
The time in Unix epoch milliseconds that the route actually started. # noqa: E501
:return: The actual_start_ms of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._actual_start_ms
@actual_start_ms.setter
def actual_start_ms(self, actual_start_ms):
"""Sets the actual_start_ms of this DispatchRoute.
The time in Unix epoch milliseconds that the route actually started. # noqa: E501
:param actual_start_ms: The actual_start_ms of this DispatchRoute. # noqa: E501
:type: int
"""
self._actual_start_ms = actual_start_ms
@property
def driver_id(self):
"""Gets the driver_id of this DispatchRoute. # noqa: E501
ID of the driver assigned to the dispatch route. Note that driver_id and vehicle_id are mutually exclusive. If neither is specified, then the route is unassigned. # noqa: E501
:return: The driver_id of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._driver_id
@driver_id.setter
def driver_id(self, driver_id):
"""Sets the driver_id of this DispatchRoute.
ID of the driver assigned to the dispatch route. Note that driver_id and vehicle_id are mutually exclusive. If neither is specified, then the route is unassigned. # noqa: E501
:param driver_id: The driver_id of this DispatchRoute. # noqa: E501
:type: int
"""
self._driver_id = driver_id
@property
def group_id(self):
"""Gets the group_id of this DispatchRoute. # noqa: E501
ID of the group if the organization has multiple groups (optional). # noqa: E501
:return: The group_id of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this DispatchRoute.
ID of the group if the organization has multiple groups (optional). # noqa: E501
:param group_id: The group_id of this DispatchRoute. # noqa: E501
:type: int
"""
self._group_id = group_id
@property
def name(self):
"""Gets the name of this DispatchRoute. # noqa: E501
Descriptive name of this route. # noqa: E501
:return: The name of this DispatchRoute. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DispatchRoute.
Descriptive name of this route. # noqa: E501
:param name: The name of this DispatchRoute. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def scheduled_end_ms(self):
"""Gets the scheduled_end_ms of this DispatchRoute. # noqa: E501
The time in Unix epoch milliseconds that the last job in the route is scheduled to end. # noqa: E501
:return: The scheduled_end_ms of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._scheduled_end_ms
@scheduled_end_ms.setter
def scheduled_end_ms(self, scheduled_end_ms):
"""Sets the scheduled_end_ms of this DispatchRoute.
The time in Unix epoch milliseconds that the last job in the route is scheduled to end. # noqa: E501
:param scheduled_end_ms: The scheduled_end_ms of this DispatchRoute. # noqa: E501
:type: int
"""
if scheduled_end_ms is None:
raise ValueError("Invalid value for `scheduled_end_ms`, must not be `None`") # noqa: E501
self._scheduled_end_ms = scheduled_end_ms
@property
def scheduled_meters(self):
"""Gets the scheduled_meters of this DispatchRoute. # noqa: E501
The distance expected to be traveled for this route in meters. # noqa: E501
:return: The scheduled_meters of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._scheduled_meters
@scheduled_meters.setter
def scheduled_meters(self, scheduled_meters):
"""Sets the scheduled_meters of this DispatchRoute.
The distance expected to be traveled for this route in meters. # noqa: E501
:param scheduled_meters: The scheduled_meters of this DispatchRoute. # noqa: E501
:type: int
"""
self._scheduled_meters = scheduled_meters
@property
def scheduled_start_ms(self):
"""Gets the scheduled_start_ms of this DispatchRoute. # noqa: E501
The time in Unix epoch milliseconds that the route is scheduled to start. # noqa: E501
:return: The scheduled_start_ms of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._scheduled_start_ms
@scheduled_start_ms.setter
def scheduled_start_ms(self, scheduled_start_ms):
"""Sets the scheduled_start_ms of this DispatchRoute.
The time in Unix epoch milliseconds that the route is scheduled to start. # noqa: E501
:param scheduled_start_ms: The scheduled_start_ms of this DispatchRoute. # noqa: E501
:type: int
"""
if scheduled_start_ms is None:
raise ValueError("Invalid value for `scheduled_start_ms`, must not be `None`") # noqa: E501
self._scheduled_start_ms = scheduled_start_ms
@property
def start_location_address(self):
"""Gets the start_location_address of this DispatchRoute. # noqa: E501
The address of the route's starting location, as it would be recognized if provided to maps.google.com. Optional if a valid start location address ID is provided. # noqa: E501
:return: The start_location_address of this DispatchRoute. # noqa: E501
:rtype: str
"""
return self._start_location_address
@start_location_address.setter
def start_location_address(self, start_location_address):
"""Sets the start_location_address of this DispatchRoute.
The address of the route's starting location, as it would be recognized if provided to maps.google.com. Optional if a valid start location address ID is provided. # noqa: E501
:param start_location_address: The start_location_address of this DispatchRoute. # noqa: E501
:type: str
"""
self._start_location_address = start_location_address
@property
def start_location_address_id(self):
"""Gets the start_location_address_id of this DispatchRoute. # noqa: E501
ID of the start location associated with an address book entry. Optional if valid values are provided for start location address or latitude/longitude. If a valid start location address ID is provided, address/latitude/longitude will be used from the address book entry. Name of the address book entry will only be used if the start location name is not provided. # noqa: E501
:return: The start_location_address_id of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._start_location_address_id
@start_location_address_id.setter
def start_location_address_id(self, start_location_address_id):
"""Sets the start_location_address_id of this DispatchRoute.
ID of the start location associated with an address book entry. Optional if valid values are provided for start location address or latitude/longitude. If a valid start location address ID is provided, address/latitude/longitude will be used from the address book entry. Name of the address book entry will only be used if the start location name is not provided. # noqa: E501
:param start_location_address_id: The start_location_address_id of this DispatchRoute. # noqa: E501
:type: int
"""
self._start_location_address_id = start_location_address_id
@property
def start_location_lat(self):
"""Gets the start_location_lat of this DispatchRoute. # noqa: E501
Latitude of the start location in decimal degrees. Optional if a valid start location address ID is provided. # noqa: E501
:return: The start_location_lat of this DispatchRoute. # noqa: E501
:rtype: float
"""
return self._start_location_lat
@start_location_lat.setter
def start_location_lat(self, start_location_lat):
"""Sets the start_location_lat of this DispatchRoute.
Latitude of the start location in decimal degrees. Optional if a valid start location address ID is provided. # noqa: E501
:param start_location_lat: The start_location_lat of this DispatchRoute. # noqa: E501
:type: float
"""
self._start_location_lat = start_location_lat
@property
def start_location_lng(self):
"""Gets the start_location_lng of this DispatchRoute. # noqa: E501
Longitude of the start location in decimal degrees. Optional if a valid start location address ID is provided. # noqa: E501
:return: The start_location_lng of this DispatchRoute. # noqa: E501
:rtype: float
"""
return self._start_location_lng
@start_location_lng.setter
def start_location_lng(self, start_location_lng):
"""Sets the start_location_lng of this DispatchRoute.
Longitude of the start location in decimal degrees. Optional if a valid start location address ID is provided. # noqa: E501
:param start_location_lng: The start_location_lng of this DispatchRoute. # noqa: E501
:type: float
"""
self._start_location_lng = start_location_lng
@property
def start_location_name(self):
"""Gets the start_location_name of this DispatchRoute. # noqa: E501
The name of the route's starting location. If provided, it will take precedence over the name of the address book entry. # noqa: E501
:return: The start_location_name of this DispatchRoute. # noqa: E501
:rtype: str
"""
return self._start_location_name
@start_location_name.setter
def start_location_name(self, start_location_name):
"""Sets the start_location_name of this DispatchRoute.
The name of the route's starting location. If provided, it will take precedence over the name of the address book entry. # noqa: E501
:param start_location_name: The start_location_name of this DispatchRoute. # noqa: E501
:type: str
"""
self._start_location_name = start_location_name
@property
def trailer_id(self):
"""Gets the trailer_id of this DispatchRoute. # noqa: E501
ID of the trailer assigned to the dispatch route. Note that trailers can only be assigned to routes that have a Vehicle or Driver assigned to them. # noqa: E501
:return: The trailer_id of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._trailer_id
@trailer_id.setter
def trailer_id(self, trailer_id):
"""Sets the trailer_id of this DispatchRoute.
ID of the trailer assigned to the dispatch route. Note that trailers can only be assigned to routes that have a Vehicle or Driver assigned to them. # noqa: E501
:param trailer_id: The trailer_id of this DispatchRoute. # noqa: E501
:type: int
"""
self._trailer_id = trailer_id
@property
def vehicle_id(self):
"""Gets the vehicle_id of this DispatchRoute. # noqa: E501
ID of the vehicle assigned to the dispatch route. Note that vehicle_id and driver_id are mutually exclusive. If neither is specified, then the route is unassigned. # noqa: E501
:return: The vehicle_id of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._vehicle_id
@vehicle_id.setter
def vehicle_id(self, vehicle_id):
"""Sets the vehicle_id of this DispatchRoute.
ID of the vehicle assigned to the dispatch route. Note that vehicle_id and driver_id are mutually exclusive. If neither is specified, then the route is unassigned. # noqa: E501
:param vehicle_id: The vehicle_id of this DispatchRoute. # noqa: E501
:type: int
"""
self._vehicle_id = vehicle_id
@property
def dispatch_jobs(self):
"""Gets the dispatch_jobs of this DispatchRoute. # noqa: E501
The dispatch jobs associated with this route. # noqa: E501
:return: The dispatch_jobs of this DispatchRoute. # noqa: E501
:rtype: list[DispatchJob]
"""
return self._dispatch_jobs
@dispatch_jobs.setter
def dispatch_jobs(self, dispatch_jobs):
"""Sets the dispatch_jobs of this DispatchRoute.
The dispatch jobs associated with this route. # noqa: E501
:param dispatch_jobs: The dispatch_jobs of this DispatchRoute. # noqa: E501
:type: list[DispatchJob]
"""
if dispatch_jobs is None:
raise ValueError("Invalid value for `dispatch_jobs`, must not be `None`") # noqa: E501
self._dispatch_jobs = dispatch_jobs
@property
def id(self):
"""Gets the id of this DispatchRoute. # noqa: E501
ID of the Samsara dispatch route. # noqa: E501
:return: The id of this DispatchRoute. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DispatchRoute.
ID of the Samsara dispatch route. # noqa: E501
:param id: The id of this DispatchRoute. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DispatchRoute, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DispatchRoute):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
200340de9c7d3d668b43028bd73db5ca64648433 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03282/s933456835.py | 554270dfd4662d9ae4de392ee9c913edc15fbb2a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | s=input()
k=int(input())
c=0
for i in range(len(s)):
if s[i]=="1":
c+=1
else:
break
if k<=c:
print("1")
else:
print(s[c]) | [
"[email protected]"
] | |
dd8c33559a0eae9643ac31ea199756615170cc53 | 985242058ea4cdc7f42dde4ff60ec96e19669ef0 | /aleph/analyze/polyglot_entity.py | 2547d9b2a9c6344486e03e65477aa4a67c7c4c1f | [
"MIT"
] | permissive | KarrieK/aleph | 4f4eff8892e145f1b0ad085bca45a19382854150 | d8c1895339c1bfd3ad265237feb411cef9dd114b | refs/heads/master | 2021-04-28T07:34:03.385829 | 2018-02-16T16:41:56 | 2018-02-16T16:41:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,727 | py | from __future__ import absolute_import
import regex
import logging
from polyglot.text import Text
from polyglot.downloader import downloader
from normality import collapse_spaces
from aleph import settings
from aleph.analyze.analyzer import Analyzer
from aleph.model import Document, DocumentTag, DocumentTagCollector
log = logging.getLogger(__name__)
class PolyglotEntityAnalyzer(Analyzer):
ORIGIN = 'polyglot'
MIN_LENGTH = 100
CLEAN = regex.compile('(^[^\w]*|[^\w]*$)')
TYPES = {
'I-PER': DocumentTag.TYPE_PERSON,
'I-ORG': DocumentTag.TYPE_ORGANIZATION,
}
IGNORED = [
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_IMAGE,
Document.SCHEMA_TABLE
]
def __init__(self):
self.active = settings.ANALYZE_POLYGLOT
@property
def languages(self):
cls = type(self)
if not hasattr(cls, '_languages'):
try:
packages = downloader.packages()
packages = [p for p in packages if p.task == 'ner2']
cls._languages = [p.language for p in packages]
except Exception:
log.info("Cannot load polyglot language list.")
return cls._languages
def tag_text(self, text, languages):
for language in languages:
parsed = Text(text, hint_language_code=language)
for entity in parsed.entities:
if entity.tag == 'I-LOC':
continue
label = ' '.join(entity)
label = self.CLEAN.sub(' ', label)
label = collapse_spaces(label)
if ' ' not in label or len(label) < 4 or len(label) > 200:
continue
yield label, entity.tag
def analyze(self, document):
if document.schema in self.IGNORED:
return
collector = DocumentTagCollector(document, self.ORIGIN)
try:
languages = set(document.languages)
if len(self.languages):
languages = languages.intersection(self.languages)
if not len(languages):
languages = [settings.DEFAULT_LANGUAGE]
for text in document.texts:
if len(text) <= self.MIN_LENGTH:
continue
for label, tag in self.tag_text(text, languages):
# log.info("Entity [%s]: %s", document.id, label)
collector.emit(label, self.TYPES[tag])
except ValueError as ve:
log.warning('NER value error: %r', ve)
collector.save()
if len(collector):
log.info('Polyglot extracted %s entities.', len(collector))
| [
"[email protected]"
] | |
3cd48988c3167fcab3f06686d166211a8933beca | 9791c7cd589c5c53aa7e1dbf929d69ba99f7c526 | /myapp/migrations/0001_initial.py | ac89857986b89af875fefb96ade4f759a637aabe | [] | no_license | okprogrammer/Calener-App | 3d717ff9eac3e8ddebd6f0b5e95caf513e3fa429 | a15e71eebed670b7203960e86f88e8da92dca1b7 | refs/heads/master | 2020-03-06T23:44:54.761954 | 2018-03-30T12:34:59 | 2018-03-30T12:34:59 | 127,140,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Generated by Django 2.0.2 on 2018-03-28 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('date', models.DateTimeField()),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
ac28828be9a051149f7f7ae8b2cefe16d974f496 | a37c48267bfb8476476dad7219c4e3329f9e2991 | /Packs/qualys/Scripts/QualysCreateIncidentFromReport/QualysCreateIncidentFromReport.py | c2f994a169a3651f86f8693d07c61805e6dc09f8 | [
"MIT"
] | permissive | adambaumeister/content | 611ce9fba412a5eb28fbefa8a43282e98d3f9327 | 01b57f8c658c2faed047313d3034e8052ffa83ce | refs/heads/master | 2023-03-09T18:16:18.623380 | 2022-07-13T18:11:09 | 2022-07-13T18:11:09 | 274,290,989 | 2 | 0 | MIT | 2023-03-06T12:22:17 | 2020-06-23T02:36:53 | Python | UTF-8 | Python | false | false | 5,183 | py | import json
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def get_asset_id_for_ip(ip):
resp = demisto.executeCommand("qualys-host-list", {"ips": ip})
if isError(resp[0]):
demisto.results(resp)
sys.exit(0)
if isinstance(resp_dict := resp[0], dict) and isinstance(xml_string := resp_dict['Contents'], str):
json_string: str = xml2json(xml_string)
asset_id = demisto.get(json.loads(json_string), 'HOST_LIST_OUTPUT.RESPONSE.HOST_LIST.HOST.ID')
else:
asset_id = demisto.get(resp[0], 'Contents.HOST_LIST_OUTPUT.RESPONSE.HOST_LIST.HOST.ID')
return asset_id
def main():
incident_type = demisto.args().get("incidentType", "Vulnerability")
max_file_size = int(demisto.args().get("maxFileSize", 1024 ** 2))
min_severity = int(demisto.args().get("minSeverity", 1))
file_entry = demisto.getFilePath(demisto.args().get("entryID"))
with open(file_entry['path'], 'r') as f:
data = f.read(max_file_size)
if data:
report = json.loads(xml2json(data))
generation_date = demisto.get(report, "ASSET_DATA_REPORT.HEADER.GENERATION_DATETIME")
# Get asset list
asset_list = demisto.get(report, "ASSET_DATA_REPORT.HOST_LIST.HOST")
if not asset_list:
demisto.results({
"Type": entryTypes["note"],
"ContentsFormat": formats["text"],
"Contents": 'No vulnerable assets were found'
})
sys.exit(0)
if not isinstance(asset_list, list):
asset_list = [asset_list]
# Get QIDs only if over relevant severity
general_vulnerabilities = argToList(
demisto.get(report, "ASSET_DATA_REPORT.GLOSSARY.VULN_DETAILS_LIST.VULN_DETAILS"))
if not isinstance(general_vulnerabilities, list):
general_vulnerabilities = [general_vulnerabilities]
# Get list of QID with severity >= min_severity
qid_severity = [demisto.get(vulnerability, "QID.#text") for vulnerability in general_vulnerabilities if
demisto.get(vulnerability, 'SEVERITY')
and (int(demisto.get(vulnerability, 'SEVERITY')) >= min_severity)]
for asset in asset_list:
# Get Asset ID from Qualys
ip = demisto.get(asset, "IP")
if not ip:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'No IP was found for asset {0}'.format(str(asset))
})
sys.exit(0)
asset_id = get_asset_id_for_ip(ip)
if not asset_id:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'No ID was found for asset {0}'.format(str(asset))
})
sys.exit(0)
# Get Asset vulnerability list
vulnerabilities = argToList(demisto.get(asset, "VULN_INFO_LIST.VULN_INFO"))
if not isinstance(vulnerabilities, list):
vulnerabilities = [vulnerabilities]
qids = map(lambda vulnerability: demisto.get(vulnerability, "QID.#text"), vulnerabilities)
# Get only the QIDs that exists in asset and has severity >= min_severity
qids = list(set(qids) & set(qid_severity))
for qid in qids:
# Search for existing open incidents with the same Vendor ID and Asset ID.
# Will open a new incident only if such an incident not exists.
resp = demisto.executeCommand(
"getIncidents",
{"query": "vendorid: {0} and assetid: {1} and --status:Closed".format(qid, asset_id)})
if isError(resp[0]):
demisto.results(resp)
sys.exit(0)
incident_number = demisto.get(resp[0], "Contents.total")
try:
incident_number = int(incident_number)
except Exception:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'Error while searching the incident repository'
})
sys.exit(0)
if incident_number == 0:
# Create incident
demisto.executeCommand("createNewIncident", {
"name": "Vulnerability - Asset {0} QID {1} - {2}".format(asset_id, qid, generation_date),
"vendorid": str(qid),
"type": incident_type,
"assetid": str(asset_id)
})
demisto.results("Done.")
else:
demisto.results({
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": 'No data could be read.'
})
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"[email protected]"
] | |
5cf98dc217e3b3653ec425bd9e14754deebe825a | 85310a2bc538af472dc5a03dcdbfed507a5784ff | /face_recog.py | 5030f9642cfe64f84be6ff7fa9249e200056e5da | [] | no_license | hieubkvn123/FaceRecognitionPyTorch | 8ed864405dc82e05406d67e846b4c08e0eb5c5ec | ee04d4f1070734fb9018337faecf9ba6c2d5441a | refs/heads/master | 2022-10-24T00:49:00.233493 | 2020-06-19T10:52:08 | 2020-06-19T10:52:08 | 267,055,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,837 | py | import os
import cv2
import torch
import pickle
import time
import imutils
import numpy as np
import face_recognition
import matplotlib.pyplot as plt
### All the custom modules ###
from triplet_net import TripletLossNet
from arcface_net import ArcFaceNet
from face_align import align
from scipy.spatial.distance import cosine
from imutils.video import WebcamVideoStream
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-v", "--video", required = False, help = "Path to input video")
args = vars(parser.parse_args())
HEIGHT, WIDTH, CHANNELS = 128, 128, 3
print("[INFO] Starting camera stream ... ")
IMG_PREPROCESSING_METHOD = 'lab'
VEC_PREPROCESSING_METHOD = 'normalize'
FACE_FILE_HEADER = 'faces_' + IMG_PREPROCESSING_METHOD + "_" + VEC_PREPROCESSING_METHOD + ".pickle"
LABL_FILE_HEADER = 'labels_' + IMG_PREPROCESSING_METHOD + "_" + VEC_PREPROCESSING_METHOD + ".pickle"
### Video or camera stream ###
video = False
if(args['video'] == None):
vs = WebcamVideoStream(src=0).start()
else:
vs = cv2.VideoCapture(args['video'])
video = True
print("[INFO] Warming up the camera ... ")
time.sleep(2.0)
### loading dnn model for detection ###
print("[INFO] Loading detection model ...")
prototxt = 'deploy.prototxt'
caffe_model = 'dnn_model.caffemodel'
net = cv2.dnn.readNetFromCaffe(prototxt, caffe_model)
### loading recognizer model ###
print("[INFO] Loading recognizer model ...")
model = ArcFaceNet()
model.load_state_dict(torch.load('arcface_pytorch.pt', map_location=torch.device('cpu')))
model.eval()
# include a classifier
#clf = SVC(kernel='rbf', C = 1.0, class_weight='balanced', probability=True, gamma='auto')
#clf = LinearDiscriminantAnalysis(n_components=2)
clf = LinearSVC()
known_faces = list()
known_names = list()
DATA_DIR = 'faces/'
print("[INFO] Loading known faces ... ")
def preprocessing_encoding(enc, operation='normalize'):
def normalize(a):
length = np.linalg.norm(a)
a_norm = a/length
return a_norm
def standardize(a):
mean = a.mean()
std = a.std()
a_std = (a - mean)/std
return a_std
if(operation == 'standardize'):
enc = standardize(enc)
elif(operation == 'normalize'):
enc = normalize(enc)
return enc
def preprocessing_image(img, operation = 'rgb', width=128, height=128):
# neutralizes the lumination of the image
img = cv2.resize(img, (width, height))
def lumination_correct(img):
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
# applying histogram equalization to the l-channel
clahe = cv2.createCLAHE(clipLimit = 3.0, tileGridSize=(8,8))
l_clahe = clahe.apply(l)
# merge the image again
lab_clahe = cv2.merge((l_clahe, a, b))
# convert back to bgr
bgr = cv2.cvtColor(lab_clahe, cv2.COLOR_LAB2BGR)
return bgr
final = img
if(operation == 'rgb'):
final = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
elif(operation == 'lab'):
final = lumination_correct(img)
elif(operation == 'gray'):
gray = lambda x : [np.mean(x), np.mean(x), np.mean(x)]
final = [gray(x) for x in img]
return final
### Define the operations on images and vectors ###
img_preprocessing = lambda img : preprocessing_image(img, operation=IMG_PREPROCESSING_METHOD, width=WIDTH, height=HEIGHT)
vec_preprocessing = lambda vec : preprocessing_encoding(vec, operation = VEC_PREPROCESSING_METHOD)
### Loading sample images and generating vectors ###
### If the known faces and known labels are not loaded ###
if(not os.path.exists(FACE_FILE_HEADER) or not os.path.exists(LABL_FILE_HEADER)):
for (dir, dirs, files) in os.walk(DATA_DIR):
if(dir != DATA_DIR or dir == DATA_DIR):
for file in files:
abs_path = dir + "/" + file
img = cv2.imread(abs_path)
img2 = cv2.resize(img, (0,0), fx=0.5,fy=0.5)
(H, W) = img.shape[:2]
# detect the face inside the image
blob = cv2.dnn.blobFromImage(img, 1.0, (300,300), [104,111,123])
net.setInput(blob)
detections = net.forward()
face = None
face3 = None
num_faces = 0
for i in range(detections.shape[2]):
confidence = detections[0,0,i,2]
if(confidence < 0.5):
continue
num_faces += 1
box = np.array([W,H,W,H]) * detections[0,0,i,3:7]
(startX, startY, endX, endY) = box.astype("int")
face = img[startY:endY,startX:endX]
box2 = np.array([W/2,H/2,W/2,H/2])*detections[0,0,i,3:7]
(startX, startY, endX, endY) = box2.astype("int")
face3 = img2[startY:endY,startX:endX]
print(" [INFO] Face detected at " + str(abs_path))
if(num_faces == 0): continue
### First augmentation ###
face1 = img_preprocessing(face) # cv2.resize(face, (WIDTH, HEIGHT))
face1 = np.array([face1])
face1 = torch.Tensor(face1).reshape(1, CHANNELS, HEIGHT, WIDTH)
embedding = model(face1)
embedding = embedding.detach().numpy()[0]
embedding = vec_preprocessing(embedding)
label = file.split(".")[0]
label = label.split("_")[0]
known_faces.append(embedding)
known_names.append(label)
### Second augmentation ###
face3 = img_preprocessing(face3)
face3 = np.array([face3])
face3 = torch.Tensor(face3).reshape(1, CHANNELS, HEIGHT, WIDTH)
embedding = model(face3)
embedding = embedding.detach().numpy()[0]
embedding = vec_preprocessing(embedding)
label = file.split(".")[0]
label = label.split("_")[0]
known_faces.append(embedding)
known_names.append(label)
pickle.dump(known_faces, open(FACE_FILE_HEADER, 'wb'))
pickle.dump(known_names, open(LABL_FILE_HEADER, 'wb'))
else:
known_faces = pickle.load(open(FACE_FILE_HEADER, 'rb'))
known_names = pickle.load(open(LABL_FILE_HEADER, 'rb'))
known_faces = np.array(known_faces)
known_names = np.array(known_names)
clf.fit(known_faces, known_names)
print(known_names)
pca = PCA(n_components = 3)
out = pca.fit_transform(known_faces)
out /= np.linalg.norm(out, axis=1, keepdims=True)
ax = plt.axes(projection='3d')
for label in np.unique(known_names):
cluster = out[np.where(known_names == label)]
x = cluster[:,0]
y = cluster[:,1]
z = cluster[:,2]
ax.scatter3D(x, y, z, alpha=0.3, label=label)
# clf.fit(known_faces, known_names)
def recognize(img, tolerance = 1.0):
label = "Unkown"
global model # load the model from outside
global pca
face = img_preprocessing(img)
face = np.array([face])
face = torch.Tensor(face).reshape(1, CHANNELS, HEIGHT, WIDTH)
outputs = model(face)
outputs = outputs.detach().numpy()[0] # the validating vector
outputs = vec_preprocessing(outputs)
point = pca.transform([outputs])
point /= np.linalg.norm(point)
# now compare to the known faces
matches = face_recognition.compare_faces(known_faces, outputs, tolerance=tolerance)
distances = face_recognition.face_distance(known_faces, outputs)
print(distances)
best_match = np.argmin(distances)
label = 'Unknown'
if(matches[best_match]):
cosine_sim = 1 - cosine(known_faces[best_match], outputs)
if(cosine_sim >= 0.99):
label = known_names[best_match]
return label, point
print("-------------------------------------------------")
print("[INFO] Running recognition app ... ")
PROCESS_FRAME = True
face_locations = list()
face_names = list()
while(True):
if(not video):
frame = vs.read()
else:
ret, frame = vs.read()
(H, W) = frame.shape[:2]
if(PROCESS_FRAME):
face_locations = list()
face_names = list()
# convert image to blob for detection
blob = cv2.dnn.blobFromImage(frame, 1.0, (300,300), (104,111.0,123.0))
net.setInput(blob)
detections = net.forward()
for i in range(detections.shape[2]):
confidence = detections[0,0,i,2]
if(confidence < 0.5):
continue
box = np.array([W,H,W,H]) * detections[0,0,i,3:7]
(startX, startY, endX, endY) = box.astype("int")
face_locations.append((startX, startY, endX, endY))
face = frame[max(startY,0):min(endY,H), max(startX,0):min(endX,W)]
label, point = recognize(face, tolerance = 0.3)
face_names.append(label)
ax.scatter3D(point[:,0], point[:,1], point[:,2], color='brown', alpha=0.3)
for (startX, startY, endX, endY), label in zip(face_locations, face_names):
cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,0), 2)
cv2.putText(frame, label, (startX,startY), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255), 2)
PROCESS_FRAME = not PROCESS_FRAME
frame = imutils.resize(frame, width=1000)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if(key == ord("q")):
break
if(not video):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows()
plt.legend()
plt.show()
print("-------------------------------------------------")
print("[INFO] App stopped ...")
| [
"[email protected]"
] | |
3c1a833c4193975a401497e8419894b0aee1bd54 | dabb4e35a66197188fec4bd5ecf4954dce85c695 | /coloredlogs/tests.py | a42efdfa570b39d43e2fca7ee68abd28895d8aeb | [
"MIT"
] | permissive | mastak/python-coloredlogs | 53f1415e95aa2fa8b4b3f8c3e5c15aca93497be9 | 399af2a2199629c613b64c92d9d6c2462973ff86 | refs/heads/master | 2021-04-27T16:55:10.720537 | 2018-01-17T19:00:43 | 2018-01-17T19:00:43 | 122,312,246 | 0 | 0 | MIT | 2018-02-21T08:51:25 | 2018-02-21T08:51:24 | null | UTF-8 | Python | false | false | 26,678 | py | # Automated tests for the `coloredlogs' package.
#
# Author: Peter Odding <[email protected]>
# Last Change: January 17, 2018
# URL: https://coloredlogs.readthedocs.io
"""Automated tests for the `coloredlogs` package."""
# Standard library modules.
import contextlib
import imp
import logging
import logging.handlers
import os
import re
import subprocess
import sys
import tempfile
# External dependencies.
from humanfriendly.compat import StringIO
from humanfriendly.terminal import ANSI_COLOR_CODES, ansi_style, ansi_wrap
from humanfriendly.testing import PatchedItem, TestCase, retry
from humanfriendly.text import format, random_string
from mock import MagicMock
# The module we're testing.
import coloredlogs
import coloredlogs.cli
from coloredlogs import (
CHROOT_FILES,
ColoredFormatter,
NameNormalizer,
decrease_verbosity,
find_defined_levels,
find_handler,
find_hostname,
find_program_name,
get_level,
increase_verbosity,
install,
is_verbose,
level_to_number,
match_stream_handler,
parse_encoded_styles,
set_level,
walk_propagation_tree,
)
from coloredlogs.syslog import SystemLogging, match_syslog_handler
from coloredlogs.converter import (
ColoredCronMailer,
EIGHT_COLOR_PALETTE,
capture,
convert,
)
# External test dependencies.
from capturer import CaptureOutput
from verboselogs import VerboseLogger
# Compiled regular expression that matches a single line of output produced by
# the default log format (does not include matching of ANSI escape sequences).
PLAIN_TEXT_PATTERN = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
# Compiled regular expression that matches a single line of output produced by
# the default log format with milliseconds=True.
PATTERN_INCLUDING_MILLISECONDS = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2},\d{3} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
def setUpModule():
"""Speed up the tests by disabling the demo's artificial delay."""
os.environ['COLOREDLOGS_DEMO_DELAY'] = '0'
coloredlogs.demo.DEMO_DELAY = 0
class ColoredLogsTestCase(TestCase):
"""Container for the `coloredlogs` tests."""
def find_system_log(self):
"""Find the system log file or skip the current test."""
filename = ('/var/log/system.log' if sys.platform == 'darwin' else (
'/var/log/syslog' if 'linux' in sys.platform else None
))
if not filename:
self.skipTest("Location of system log file unknown!")
elif not os.path.isfile(filename):
self.skipTest("System log file not found! (%s)" % filename)
elif not os.access(filename, os.R_OK):
self.skipTest("Insufficient permissions to read system log file! (%s)" % filename)
else:
return filename
def test_level_to_number(self):
"""Make sure :func:`level_to_number()` works as intended."""
# Make sure the default levels are translated as expected.
assert level_to_number('debug') == logging.DEBUG
assert level_to_number('info') == logging.INFO
assert level_to_number('warning') == logging.WARNING
assert level_to_number('error') == logging.ERROR
assert level_to_number('fatal') == logging.FATAL
# Make sure bogus level names don't blow up.
assert level_to_number('bogus-level') == logging.INFO
def test_find_hostname(self):
"""Make sure :func:`~find_hostname()` works correctly."""
assert find_hostname()
# Create a temporary file as a placeholder for e.g. /etc/debian_chroot.
fd, temporary_file = tempfile.mkstemp()
try:
with open(temporary_file, 'w') as handle:
handle.write('first line\n')
handle.write('second line\n')
CHROOT_FILES.insert(0, temporary_file)
# Make sure the chroot file is being read.
assert find_hostname() == 'first line'
finally:
# Clean up.
CHROOT_FILES.pop(0)
os.unlink(temporary_file)
# Test that unreadable chroot files don't break coloredlogs.
try:
CHROOT_FILES.insert(0, temporary_file)
# Make sure that a usable value is still produced.
assert find_hostname()
finally:
# Clean up.
CHROOT_FILES.pop(0)
def test_host_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.HostNameFilter()`."""
install(fmt='%(hostname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_hostname() in output
def test_program_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.ProgramNameFilter()`."""
install(fmt='%(programname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_program_name() in output
def test_colorama_enabled(self):
"""Test that colorama is enabled (through mocking)."""
init_function = MagicMock()
with mocked_colorama_module(init_function):
# Configure logging to the terminal.
coloredlogs.install()
# Ensure that our mock method was called.
assert init_function.called
def test_colorama_missing(self):
"""Test that colorama is missing (through mocking)."""
def init_function():
raise ImportError
with mocked_colorama_module(init_function):
# Configure logging to the terminal. It is expected that internally
# an ImportError is raised, but the exception is caught and colored
# output is disabled.
coloredlogs.install()
# Find the handler that was created by coloredlogs.install().
handler, logger = find_handler(logging.getLogger(), match_stream_handler)
# Make sure that logging to the terminal was initialized.
assert isinstance(handler.formatter, logging.Formatter)
# Make sure colored logging is disabled.
assert not isinstance(handler.formatter, ColoredFormatter)
def test_system_logging(self):
"""Make sure the :class:`coloredlogs.syslog.SystemLogging` context manager works."""
system_log_file = self.find_system_log()
expected_message = random_string(50)
with SystemLogging(programname='coloredlogs-test-suite') as syslog:
if not syslog:
return self.skipTest("couldn't connect to syslog daemon")
# When I tried out the system logging support on macOS 10.13.1 on
# 2018-01-05 I found that while WARNING and ERROR messages show up
# in the system log DEBUG and INFO messages don't. This explains
# the importance of the level of the log message below.
logging.error("%s", expected_message)
# Retry the following assertion (for up to 60 seconds) to give the
# logging daemon time to write our log message to disk. This
# appears to be needed on MacOS workers on Travis CI, see:
# https://travis-ci.org/xolox/python-coloredlogs/jobs/325245853
retry(lambda: check_contents(system_log_file, expected_message, True))
def test_syslog_shortcut_simple(self):
"""Make sure that ``coloredlogs.install(syslog=True)`` works."""
system_log_file = self.find_system_log()
expected_message = random_string(50)
with cleanup_handlers():
# See test_system_logging() for the importance of this log level.
coloredlogs.install(syslog=True)
logging.error("%s", expected_message)
# See the comments in test_system_logging() on why this is retried.
retry(lambda: check_contents(system_log_file, expected_message, True))
def test_syslog_shortcut_enhanced(self):
"""Make sure that ``coloredlogs.install(syslog='warning')`` works."""
system_log_file = self.find_system_log()
the_expected_message = random_string(50)
not_an_expected_message = random_string(50)
with cleanup_handlers():
# See test_system_logging() for the importance of these log levels.
coloredlogs.install(syslog='error')
logging.warning("%s", not_an_expected_message)
logging.error("%s", the_expected_message)
# See the comments in test_system_logging() on why this is retried.
retry(lambda: check_contents(system_log_file, the_expected_message, True))
retry(lambda: check_contents(system_log_file, not_an_expected_message, False))
def test_name_normalization(self):
"""Make sure :class:`~coloredlogs.NameNormalizer` works as intended."""
nn = NameNormalizer()
for canonical_name in ['debug', 'info', 'warning', 'error', 'critical']:
assert nn.normalize_name(canonical_name) == canonical_name
assert nn.normalize_name(canonical_name.upper()) == canonical_name
assert nn.normalize_name('warn') == 'warning'
assert nn.normalize_name('fatal') == 'critical'
def test_style_parsing(self):
"""Make sure :func:`~coloredlogs.parse_encoded_styles()` works as intended."""
encoded_styles = 'debug=green;warning=yellow;error=red;critical=red,bold'
decoded_styles = parse_encoded_styles(encoded_styles, normalize_key=lambda k: k.upper())
assert sorted(decoded_styles.keys()) == sorted(['debug', 'warning', 'error', 'critical'])
assert decoded_styles['debug']['color'] == 'green'
assert decoded_styles['warning']['color'] == 'yellow'
assert decoded_styles['error']['color'] == 'red'
assert decoded_styles['critical']['color'] == 'red'
assert decoded_styles['critical']['bold'] is True
def test_is_verbose(self):
"""Make sure is_verbose() does what it should :-)."""
set_level(logging.INFO)
assert not is_verbose()
set_level(logging.DEBUG)
assert is_verbose()
set_level(logging.VERBOSE)
assert is_verbose()
def test_increase_verbosity(self):
"""Make sure increase_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> VERBOSE.
increase_verbosity()
assert get_level() == logging.VERBOSE
# VERBOSE -> DEBUG.
increase_verbosity()
assert get_level() == logging.DEBUG
# DEBUG -> SPAM.
increase_verbosity()
assert get_level() == logging.SPAM
# SPAM -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
# NOTSET -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
def test_decrease_verbosity(self):
"""Make sure decrease_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> NOTICE.
decrease_verbosity()
assert get_level() == logging.NOTICE
# NOTICE -> WARNING.
decrease_verbosity()
assert get_level() == logging.WARNING
# WARNING -> SUCCESS.
decrease_verbosity()
assert get_level() == logging.SUCCESS
# SUCCESS -> ERROR.
decrease_verbosity()
assert get_level() == logging.ERROR
# ERROR -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
# CRITICAL -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
def test_level_discovery(self):
"""Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
defined_levels = find_defined_levels()
level_values = defined_levels.values()
for number in (0, 10, 20, 30, 40, 50):
assert number in level_values
def test_walk_propagation_tree(self):
"""Make sure walk_propagation_tree() properly walks the tree of loggers."""
root, parent, child, grand_child = self.get_logger_tree()
# Check the default mode of operation.
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child, parent, root]
# Now change the propagation (non-default mode of operation).
child.propagate = False
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child]
def test_find_handler(self):
"""Make sure find_handler() works as intended."""
root, parent, child, grand_child = self.get_logger_tree()
# Add some handlers to the tree.
stream_handler = logging.StreamHandler()
syslog_handler = logging.handlers.SysLogHandler()
child.addHandler(stream_handler)
parent.addHandler(syslog_handler)
# Make sure the first matching handler is returned.
matched_handler, matched_logger = find_handler(grand_child, lambda h: isinstance(h, logging.Handler))
assert matched_handler is stream_handler
# Make sure the first matching handler of the given type is returned.
matched_handler, matched_logger = find_handler(child, lambda h: isinstance(h, logging.handlers.SysLogHandler))
assert matched_handler is syslog_handler
def get_logger_tree(self):
"""Create and return a tree of loggers."""
# Get the root logger.
root = logging.getLogger()
# Create a top level logger for ourselves.
parent_name = random_string()
parent = logging.getLogger(parent_name)
# Create a child logger.
child_name = '%s.%s' % (parent_name, random_string())
child = logging.getLogger(child_name)
# Create a grand child logger.
grand_child_name = '%s.%s' % (child_name, random_string())
grand_child = logging.getLogger(grand_child_name)
return root, parent, child, grand_child
def test_support_for_milliseconds(self):
"""Make sure milliseconds are hidden by default but can be easily enabled."""
# Check that the default log format doesn't include milliseconds.
stream = StringIO()
install(reconfigure=True, stream=stream)
logging.info("This should not include milliseconds.")
assert all(map(PLAIN_TEXT_PATTERN.match, stream.getvalue().splitlines()))
# Check that milliseconds can be enabled via a shortcut.
stream = StringIO()
install(milliseconds=True, reconfigure=True, stream=stream)
logging.info("This should include milliseconds.")
assert all(map(PATTERN_INCLUDING_MILLISECONDS.match, stream.getvalue().splitlines()))
def test_plain_text_output_format(self):
"""Inspect the plain text output of coloredlogs."""
logger = VerboseLogger(random_string(25))
stream = StringIO()
install(level=logging.NOTSET, logger=logger, stream=stream)
# Test that filtering on severity works.
logger.setLevel(logging.INFO)
logger.debug("No one should see this message.")
assert len(stream.getvalue().strip()) == 0
# Test that the default output format looks okay in plain text.
logger.setLevel(logging.NOTSET)
for method, severity in ((logger.debug, 'DEBUG'),
(logger.info, 'INFO'),
(logger.verbose, 'VERBOSE'),
(logger.warning, 'WARNING'),
(logger.error, 'ERROR'),
(logger.critical, 'CRITICAL')):
# Prepare the text.
text = "This is a message with severity %r." % severity.lower()
# Log the message with the given severity.
method(text)
# Get the line of output generated by the handler.
output = stream.getvalue()
lines = output.splitlines()
last_line = lines[-1]
assert text in last_line
assert severity in last_line
assert PLAIN_TEXT_PATTERN.match(last_line)
def test_html_conversion(self):
"""Check the conversion from ANSI escape sequences to HTML."""
# Check conversion of colored text.
for color_name, ansi_code in ANSI_COLOR_CODES.items():
ansi_encoded_text = 'plain text followed by %s text' % ansi_wrap(color_name, color=color_name)
expected_html = format(
'<code>plain text followed by <span style="color:{css}">{name}</span> text</code>',
css=EIGHT_COLOR_PALETTE[ansi_code], name=color_name,
)
self.assertEquals(expected_html, convert(ansi_encoded_text))
# Check conversion of bright colored text.
expected_html = '<code><span style="color:#FF0">bright yellow</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('bright yellow', color='yellow', bright=True)))
# Check conversion of text with a background color.
expected_html = '<code><span style="background-color:#DE382B">red background</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('red background', background='red')))
# Check conversion of text with a bright background color.
expected_html = '<code><span style="background-color:#F00">bright red background</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('bright red background', background='red', bright=True)))
# Check conversion of text that uses the 256 color mode palette as a foreground color.
expected_html = '<code><span style="color:#FFAF00">256 color mode foreground</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('256 color mode foreground', color=214)))
# Check conversion of text that uses the 256 color mode palette as a background color.
expected_html = '<code><span style="background-color:#AF0000">256 color mode background</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('256 color mode background', background=124)))
# Check that invalid 256 color mode indexes don't raise exceptions.
expected_html = '<code>plain text expected</code>'
self.assertEquals(expected_html, convert('\x1b[38;5;256mplain text expected\x1b[0m'))
# Check conversion of bold text.
expected_html = '<code><span style="font-weight:bold">bold text</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('bold text', bold=True)))
# Check conversion of underlined text.
expected_html = '<code><span style="text-decoration:underline">underlined text</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('underlined text', underline=True)))
# Check conversion of strike-through text.
expected_html = '<code><span style="text-decoration:line-through">strike-through text</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('strike-through text', strike_through=True)))
# Check conversion of inverse text.
expected_html = '<code><span style="background-color:#FFC706;color:#000">inverse</span></code>'
self.assertEquals(expected_html, convert(ansi_wrap('inverse', color='yellow', inverse=True)))
# Check conversion of URLs.
for sample_text in 'www.python.org', 'http://coloredlogs.rtfd.org', 'https://coloredlogs.rtfd.org':
sample_url = sample_text if '://' in sample_text else ('http://' + sample_text)
expected_html = '<code><a href="%s" style="color:inherit">%s</a></code>' % (sample_url, sample_text)
self.assertEquals(expected_html, convert(sample_text))
# Check that the capture pattern for URLs doesn't match ANSI escape
# sequences and also check that the short hand for the 0 reset code is
# supported. These are tests for regressions of bugs found in
# coloredlogs <= 8.0.
reset_short_hand = '\x1b[0m'
blue_underlined = ansi_style(color='blue', underline=True)
ansi_encoded_text = '<%shttps://coloredlogs.readthedocs.io%s>' % (blue_underlined, reset_short_hand)
expected_html = (
'<code><<span style="color:#006FB8;text-decoration:underline">'
'<a href="https://coloredlogs.readthedocs.io" style="color:inherit">'
'https://coloredlogs.readthedocs.io'
'</a></span>></code>'
)
self.assertEquals(expected_html, convert(ansi_encoded_text))
def test_output_interception(self):
"""Test capturing of output from external commands."""
expected_output = 'testing, 1, 2, 3 ..'
actual_output = capture(['echo', expected_output])
assert actual_output.strip() == expected_output.strip()
def test_enable_colored_cron_mailer(self):
"""Test that automatic ANSI to HTML conversion when running under ``cron`` can be enabled."""
with PatchedItem(os.environ, 'CONTENT_TYPE', 'text/html'):
with ColoredCronMailer() as mailer:
assert mailer.is_enabled
def test_disable_colored_cron_mailer(self):
"""Test that automatic ANSI to HTML conversion when running under ``cron`` can be disabled."""
with PatchedItem(os.environ, 'CONTENT_TYPE', 'text/plain'):
with ColoredCronMailer() as mailer:
assert not mailer.is_enabled
def test_auto_install(self):
"""Test :func:`coloredlogs.auto_install()`."""
needle = random_string()
command_line = [sys.executable, '-c', 'import logging; logging.info(%r)' % needle]
# Sanity check that log messages aren't enabled by default.
with CaptureOutput() as capturer:
os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'false'
subprocess.call(command_line)
output = capturer.get_text()
assert needle not in output
# Test that the $COLOREDLOGS_AUTO_INSTALL environment variable can be
# used to automatically call coloredlogs.install() during initialization.
with CaptureOutput() as capturer:
os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'true'
subprocess.call(command_line)
output = capturer.get_text()
assert needle in output
def test_cli_demo(self):
"""Test the command line colored logging demonstration."""
with CaptureOutput() as capturer:
main('coloredlogs', '--demo')
output = capturer.get_text()
# Make sure the output contains all of the expected logging level names.
for name in 'debug', 'info', 'warning', 'error', 'critical':
assert name.upper() in output
def test_cli_conversion(self):
"""Test the command line HTML conversion."""
output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)
# Make sure the output is encoded as HTML.
assert '<span' in output
def test_empty_conversion(self):
"""
Test that conversion of empty output produces no HTML.
This test was added because I found that ``coloredlogs --convert`` when
used in a cron job could cause cron to send out what appeared to be
empty emails. On more careful inspection the body of those emails was
``<code></code>``. By not emitting the wrapper element when no other
HTML is generated, cron will not send out an email.
"""
output = main('coloredlogs', '--convert', 'true', capture=True)
assert not output.strip()
def test_implicit_usage_message(self):
"""Test that the usage message is shown when no actions are given."""
assert 'Usage:' in main('coloredlogs', capture=True)
def test_explicit_usage_message(self):
"""Test that the usage message is shown when ``--help`` is given."""
assert 'Usage:' in main('coloredlogs', '--help', capture=True)
def check_contents(filename, contents, match):
"""Check if a line in a file contains an expected string."""
with open(filename) as handle:
assert any(contents in line for line in handle) == match
def main(*arguments, **options):
"""Wrap the command line interface to make it easier to test."""
capture = options.get('capture', False)
saved_argv = sys.argv
saved_stdout = sys.stdout
try:
sys.argv = arguments
if capture:
sys.stdout = StringIO()
coloredlogs.cli.main()
if capture:
return sys.stdout.getvalue()
finally:
sys.argv = saved_argv
sys.stdout = saved_stdout
@contextlib.contextmanager
def mocked_colorama_module(init_function):
"""Context manager to ease testing of colorama integration."""
module_name = 'colorama'
# Create a fake module shadowing colorama.
fake_module = imp.new_module(module_name)
setattr(fake_module, 'init', init_function)
# Temporarily reconfigure coloredlogs to use colorama.
need_colorama = coloredlogs.NEED_COLORAMA
coloredlogs.NEED_COLORAMA = True
# Install the fake colorama module.
saved_module = sys.modules.get(module_name, None)
sys.modules[module_name] = fake_module
# We've finished setting up, yield control.
yield
# Restore the original setting.
coloredlogs.NEED_COLORAMA = need_colorama
# Clean up the mock module.
if saved_module is not None:
sys.modules[module_name] = saved_module
else:
sys.modules.pop(module_name, None)
@contextlib.contextmanager
def cleanup_handlers():
"""Context manager to cleanup output handlers."""
# There's nothing to set up so we immediately yield control.
yield
# After the with block ends we cleanup any output handlers.
for match_func in match_stream_handler, match_syslog_handler:
handler, logger = find_handler(logging.getLogger(), match_func)
if handler and logger:
logger.removeHandler(handler)
| [
"[email protected]"
] | |
c235d6570542073a628b108ff849420cd261fbff | 19375a18719e44eee7c596e72ef8915d3fcbff92 | /day02_spider/07_pymongo.py | 5f075d08ba8928c549c17868f7535f1d3477baa2 | [] | no_license | J-shan0903/AID1912 | 6c617fa26751c31ff05a63050a320122e3ca044e | 0797f3d8ef0e96b8eb6908dffbec8193c9614973 | refs/heads/master | 2021-03-23T12:21:32.480026 | 2020-05-23T08:36:21 | 2020-05-23T08:36:21 | 247,452,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import pymongo
conn = pymongo.MongoClient('localhost', 27017)
db = conn['maoyandb']
myset = db['maoyanset']
myset.insert_one({'name': '战狼', 'star': '123', 'time': '2017-2-15'})
| [
"[email protected]"
] | |
fd159e49eeca3c25d2f16637e57bd79297ec1b34 | 63428f4bec80630523355f8a05bcbdbb1cf31dbf | /lambda/pseudotest/upload_results.py | de10c76d32ed2589bc3cb081079f381d85a27488 | [] | no_license | pniedzwiedzinski/pseudotest | fe314222619ebae55467acf4def14fa5619ad2eb | 011800879cac43ded439370fa9ed8539f8e98ae5 | refs/heads/master | 2021-07-05T09:30:36.203945 | 2019-12-02T10:03:19 | 2019-12-02T10:03:19 | 182,046,647 | 2 | 2 | null | 2020-10-01T07:08:10 | 2019-04-18T08:19:19 | Python | UTF-8 | Python | false | false | 250 | py | import os
import pymysql
from .db import execute
def upload_results(results, job_id):
execute(
"UPDATE pseudo_test_score SET score = %s WHERE file_id = %s",
(pymysql.escape_string(results), pymysql.escape_string(job_id)),
)
| [
"[email protected]"
] | |
a6918cdd6386b5e72a785cdeaabed8970021c91c | 15240e0e187788f1114cac98c534004ab4793cbf | /influxdb_client/domain/telegraf_plugin_output_file_config_files.py | 38f742691f37962daf3966ab2ea0118da675af41 | [
"MIT"
] | permissive | semyont/influxdb-client-python | dddb6a1309fd424c9b1e0bec7c67a442cfcfe0b5 | 7b685fda2030b22697b6096cc3161fbbc01e7bee | refs/heads/master | 2020-09-19T18:39:52.132739 | 2019-11-27T07:11:23 | 2019-11-27T07:11:23 | 224,266,106 | 2 | 0 | MIT | 2019-11-26T19:06:17 | 2019-11-26T19:06:09 | null | UTF-8 | Python | false | false | 3,738 | py | # coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TelegrafPluginOutputFileConfigFiles(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'path': 'str'
}
attribute_map = {
'type': 'type',
'path': 'path'
}
def __init__(self, type=None, path=None): # noqa: E501
"""TelegrafPluginOutputFileConfigFiles - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._path = None
self.discriminator = None
if type is not None:
self.type = type
if path is not None:
self.path = path
@property
def type(self):
"""Gets the type of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:return: The type of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this TelegrafPluginOutputFileConfigFiles.
:param type: The type of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:type: str
"""
self._type = type
@property
def path(self):
"""Gets the path of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:return: The path of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this TelegrafPluginOutputFileConfigFiles.
:param path: The path of this TelegrafPluginOutputFileConfigFiles. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TelegrafPluginOutputFileConfigFiles):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
78ceabab5d75b42b5c63200eb8a66c9e6df33609 | da29f1f5b4459fbfec968bb694bedb9586f87b14 | /new_algs/Graph+algorithms/Hungarian+algorithm/yolo_matt.py | e5f0db03c5a4f92b6fa9d352d25e8307e7d01c18 | [] | no_license | coolsnake/JupyterNotebook | 547806a45a663f090f313dc3e70f779ad9b213c0 | 20d8df6172906337f81583dabb841d66b8f31857 | refs/heads/master | 2023-01-13T18:55:38.615312 | 2020-11-17T22:55:12 | 2020-11-17T22:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,575 | py | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image,out_boxes, out_scores, out_classes
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
| [
"[email protected]"
] | |
8983621f1e158a63c0ca20b5f9d5fc10719a86ec | 960b3a17a4011264a001304e64bfb76d669b8ac5 | /mstrio/users_and_groups/contact_group.py | cb886bee18e3b5e15944b01baea86951680e83c6 | [
"Apache-2.0"
] | permissive | MicroStrategy/mstrio-py | 012d55df782a56dab3a32e0217b9cbfd0b59b8dd | c6cea33b15bcd876ded4de25138b3f5e5165cd6d | refs/heads/master | 2023-08-08T17:12:07.714614 | 2023-08-03T12:30:11 | 2023-08-03T12:30:11 | 138,627,591 | 84 | 60 | Apache-2.0 | 2023-07-31T06:43:33 | 2018-06-25T17:23:55 | Python | UTF-8 | Python | false | false | 12,144 | py | # NOSONAR
import logging
from collections import defaultdict
from collections.abc import Iterable
from enum import auto
from typing import TYPE_CHECKING, Union
from mstrio import config
from mstrio.api import contact_groups
from mstrio.users_and_groups.user import User
from mstrio.utils.entity import DeleteMixin, EntityBase, auto_match_args_entity
from mstrio.utils.enum_helper import AutoName
from mstrio.utils.helper import (
Dictable,
fetch_objects_async,
get_args_from_func,
get_default_args_from_func,
get_objects_id,
)
from mstrio.utils.version_helper import class_version_handler, method_version_handler
if TYPE_CHECKING:
from mstrio.connection import Connection
from mstrio.users_and_groups.contact import Contact
logger = logging.getLogger(__name__)
class ContactGroupMemberType(AutoName):
CONTACT = auto()
CONTACT_GROUP = auto()
USER = auto()
class ContactGroupMember(Dictable):
"""ContactGroupMember class, representing either Contact or ContactGroup
Attributes:
name: member's name
type: type of a member, instance of ContactGroupMemberType
id: id of member, optional
description: member's description, optional
enabled: specifies if a member is enabled
"""
_FROM_DICT_MAP = {"type": ContactGroupMemberType}
def __init__(
self,
name: str,
type: str | ContactGroupMemberType,
id: str | None = None,
description: str | None = None,
enabled: bool = True,
):
self.name = name
self.type = ContactGroupMemberType(type) if isinstance(type, str) else type
self.id = id
self.description = description
self.enabled = enabled
def __repr__(self) -> str:
param_dict = auto_match_args_entity(
self.__init__, self, exclude=['self'], include_defaults=False
)
params = [
f"{param}={self.type}" if param == 'type' else f'{param}={repr(value)}'
for param, value in param_dict.items()
]
formatted_params = ', '.join(params)
return f'ContactGroupMember({formatted_params})'
@classmethod
def from_contact_or_contact_group(
cls, obj: Union['Contact', 'ContactGroup']
) -> 'ContactGroupMember':
"""Initialize instance of class ContactGroupMember
Args:
obj: object to used as base for initializing instance of
ContactGroupMember
Returns:
ContactGroupMember object
"""
from mstrio.users_and_groups.contact import Contact
if isinstance(obj, Contact):
return cls(id=obj.id, name=obj.name, type=ContactGroupMemberType.CONTACT)
if isinstance(obj, ContactGroup):
return cls(
id=obj.id, name=obj.name, type=ContactGroupMemberType.CONTACT_GROUP
)
@method_version_handler('11.3.0200')
def list_contact_groups(
connection: "Connection",
to_dictionary: bool = False,
limit: int | None = None,
**filters,
) -> list["ContactGroup"] | list[dict]:
"""Get all contact groups as list of ContactGroup objects or
dictionaries.
Optionally filter the contact groups by specifying filters.
Args:
connection(object): MicroStrategy connection object
to_dictionary: If True returns a list of contact group dicts,
otherwise returns a list of contact group objects
limit: limit the number of elements returned. If `None` (default), all
objects are returned.
**filters: Available filter parameters: ['id', 'name', 'description',
'enabled', 'linked_user']
"""
return ContactGroup._list_contact_groups(
connection=connection, to_dictionary=to_dictionary, limit=limit, **filters
)
@class_version_handler('11.3.0200')
class ContactGroup(EntityBase, DeleteMixin):
"""Object representation of Microstrategy Contact Group object
Attributes:
name: contact group's name
id: contact group's id
description: contact group's description
enabled: specifies if a contact group is enabled
linked_user: user linked to contact group, instance of User
members: list of contact group's members, instances of
ContactGroupMember
memberships: list of Contact Groups that the Contact Group belongs to
connection: instance of Connection class, represents connection
to MicroStrategy Intelligence Server
"""
_FROM_DICT_MAP = {
**EntityBase._FROM_DICT_MAP,
'linked_user': User.from_dict,
'members': [ContactGroupMember.from_dict],
}
_API_GETTERS = {
(
'id',
'name',
'description',
'enabled',
'linked_user',
'members',
'memberships',
): contact_groups.get_contact_group
}
_API_DELETE = staticmethod(contact_groups.delete_contact_group)
_API_PATCH = {
('name', 'description', 'enabled', 'linked_user', 'members'): (
contact_groups.update_contact_group,
'put',
)
}
_PATCH_PATH_TYPES = {
'name': str,
'description': str,
'enabled': bool,
'linked_user': dict,
'members': list,
'memberships': list,
}
def __init__(
self,
connection: 'Connection',
id: str | None = None,
name: str | None = None,
):
"""Initialize Contact Group object by passing id or name.
When `id` is provided, name is omitted.
Args:
connection: MicroStrategy connection object
id: ID of Contact
name: name of Contact Group
"""
if id is None and name is None:
raise ValueError(
"Please specify either 'id' or 'name' parameter in the constructor."
)
if id is None:
result = ContactGroup._list_contact_groups(
connection=connection,
name=name,
to_dictionary=True,
)
if result:
object_data, object_data['connection'] = result[0], connection
self._init_variables(**object_data)
else:
raise ValueError(f"There is no Contact Group named: '{name}'")
else:
super().__init__(connection, id)
def _init_variables(self, **kwargs) -> None:
super()._init_variables(**kwargs)
self.description = kwargs.get('description')
self.enabled = kwargs.get('enabled')
linked_user = kwargs.get("linked_user")
self.linked_user = (
User.from_dict(linked_user, self.connection) if linked_user else None
)
members = kwargs.get('members')
self.members = (
[ContactGroupMember.from_dict(member) for member in members]
if members
else None
)
memberships = kwargs.get('memberships')
self._memberships = (
[self.from_dict(m, self.connection) for m in memberships]
if memberships
else None
)
@classmethod
def create(
cls,
connection: "Connection",
name: str,
linked_user: str | User,
members: list[dict | ContactGroupMember],
description: str | None = None,
enabled: bool = True,
) -> 'ContactGroup':
"""Create a new contact group.
Args:
connection: MicroStrategy connection object
returned by `connection.Connection()`
name: contact group name
linked_user: user linked to contact
members: list of members
description: description of contact
enabled: specifies if contact should be enabled
Returns:
`ContactGroup` object
"""
members = [
m.to_dict() if isinstance(m, ContactGroupMember) else m for m in members
]
linked_user = get_objects_id(linked_user, User)
body = {
'name': name,
'description': description,
'enabled': enabled,
'linkedUser': {'id': linked_user},
'members': members,
}
res = contact_groups.create_contact_group(connection, body).json()
if config.verbose:
logger.info(
f"Successfully created contact group named: '{res.get('name')}' "
f"with ID: '{res.get('id')}'"
)
return cls.from_dict(res, connection)
def alter(
self,
name: str | None = None,
description: str | None = None,
enabled: bool | None = None,
linked_user: Union['User', str] | None = None,
members: Iterable[Union['ContactGroupMember', dict]] | None = None,
):
"""Update properties of a contact group
Args:
name: name of a contact
description: description of a contact
enabled: specifies if a contact is enabled
linked_user: an object of class User linked to the contact
members: list of contact group members, instances of
`ContactGroupMember`
"""
linked_user = {'id': get_objects_id(linked_user, User)} if linked_user else None
func = self.alter
args = get_args_from_func(func)
defaults = get_default_args_from_func(func)
defaults_dict = dict(zip(args[-len(defaults) :], defaults)) if defaults else {}
local = locals()
properties = defaultdict(dict)
for property_key in defaults_dict:
if local[property_key] is not None:
properties[property_key] = local[property_key]
self._alter_properties(**properties)
@classmethod
def _list_contact_groups(
cls,
connection: "Connection",
to_dictionary: bool = False,
limit: int | None = None,
offset: int | None = None,
**filters,
) -> list["ContactGroup"] | list[dict]:
objects = fetch_objects_async(
connection=connection,
api=contact_groups.get_contact_groups,
async_api=contact_groups.get_contact_groups_async,
limit=limit,
offset=offset,
chunk_size=1000,
filters=filters,
dict_unpack_value='contactGroups',
)
if to_dictionary:
return objects
return [
ContactGroup.from_dict(source=obj, connection=connection) for obj in objects
]
def _set_object_attributes(self, **kwargs) -> None:
super()._set_object_attributes(**kwargs)
memberships = kwargs.get("memberships")
memberships_objs = (
[self.from_dict(m, self.connection) for m in memberships]
if memberships
else []
)
self._memberships = memberships_objs
def add_members(
self, members: Iterable[Union['ContactGroupMember', 'Contact', 'ContactGroup']]
):
"""Add member
Args:
members: list of members to add to contact group
"""
members_ids = [member.id for member in self.members]
new_members = [
ContactGroupMember.from_contact_or_contact_group(obj)
if not isinstance(obj, ContactGroupMember)
else obj
for obj in members
if obj.id not in members_ids
]
self.alter(members=new_members + self.members)
def remove_members(
self, members: Iterable[Union['ContactGroupMember', 'Contact', 'ContactGroup']]
):
"""Remove member
Args:
members: list of members to remove from contact group
"""
ids_to_remove = [member.id for member in members]
new_member_list = [
member for member in self.members if member.id not in ids_to_remove
]
self.alter(members=new_member_list)
@property
def memberships(self):
return self._memberships
| [
"[email protected]"
] | |
b484145d63d254f278fb2c36cbab484831c7e745 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02984/s206130983.py | f20b1996e5c69cf6026085eef48328162e7f21d4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | N = int(input())
A = list(map(int, input().split()))
now = sum(A)
for i in range(N//2):
now -= 2*A[2*i+1]
for a in A:
print(now, end=' ')
X = 2*a - now
now = X
print()
| [
"[email protected]"
] | |
9db46716b6f44e4912f2a47e91a80b23070e5d7f | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201019102708.py | 61f5e48858ce8dbe0af570ab942e18fa784b868b | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from wagtail.core import blocks
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class Cards Block(bloc) | [
"[email protected]"
] | |
b2792f1e2803d83a7bd248ee8ea357b94ed0badb | 8c6816435093cb8e9e45593d3ffdd67028a011b6 | /Tree/is_valid_bst.py | 7a38d7d17976d7e01ce6e8b7a4be9e91ad0e5208 | [] | no_license | Keeady/daily-coding-challenge | 6ee74a5fe639a1f5b4753dd4848d0696bef15c28 | 31eebbf4c1d0eb88a00f71bd5741adf5e07d0e94 | refs/heads/master | 2020-03-27T07:58:05.713290 | 2019-03-08T15:03:05 | 2019-03-08T15:03:05 | 146,210,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import sys
class Solution:
def isValidBST(self, root):
if not root:
return True
return self.isValidSubTree(root, -sys.maxsize -1, sys.maxsize)
def isValidSubTree(self, root, min, max):
if root is None:
return True
if root.val > min and root.val < max:
return self.isValidSubTree(root.left, min, root.val) and self.isValidSubTree(root.right, root.val, max)
else:
return False
# left child > parent's min and < parent.val
# right child > parent.val and < max from parent
# | [
"[email protected]"
] | |
c6dfc35a86ffd65768a932003f5e30de191624e6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /YDgtdP69Mn9pC73xN_7.py | 204a19d7df1cb840639dc9daeacb8f81fa7f26df | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | """
This challenge is based on the game Minesweeper.
Create a function that takes a grid of `#` and `-`, where each hash (#)
represents a mine and each dash (-) represents a mine-free spot. Return a list
where each dash is replaced by a digit indicating the number of mines
immediately adjacent to the spot (horizontally, vertically, and diagonally).
### Examples
num_grid([
["-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-"],
["-", "-", "#", "-", "-"],
["-", "-", "-", "-", "-"],
["-", "-", "-", "-", "-"]
]) ➞ [
["0", "0", "0", "0", "0"],
["0", "1", "1", "1", "0"],
["0", "1", "#", "1", "0"],
["0", "1", "1", "1", "0"],
["0", "0", "0", "0", "0"],
]
num_grid([
["-", "-", "-", "-", "#"],
["-", "-", "-", "-", "-"],
["-", "-", "#", "-", "-"],
["-", "-", "-", "-", "-"],
["#", "-", "-", "-", "-"]
]) ➞ [
["0", "0", "0", "1", "#"],
["0", "1", "1", "2", "1"],
["0", "1", "#", "1", "0"],
["1", "2", "1", "1", "0"],
["#", "1", "0", "0", "0"]
]
num_grid([
["-", "-", "-", "#", "#"],
["-", "#", "-", "-", "-"],
["-", "-", "#", "-", "-"],
["-", "#", "#", "-", "-"],
["-", "-", "-", "-", "-"]
]) ➞ [
["1", "1", "2", "#", "#"],
["1", "#", "3", "3", "2"],
["2", "4", "#", "2", "0"],
["1", "#", "#", "2", "0"],
["1", "2", "2", "1", "0"],
]
### Notes
N/A
"""
def num_grid(lst):
for i in range(len(lst)):
for j in range(len(lst[i])):
if lst[i][j] == '#':
continue
lst[i][j] = str(sum(lst[k][l] == '#' for k in range(len(lst)) for l in range(len(lst[0]))
if abs(i - k) < 2 and abs(j - l) < 2))
return lst
| [
"[email protected]"
] | |
ede9675c9b6afbc4a54e4081f519e4ef3376ae81 | e65d16ea1e8d412bac75a809be6d390126bdf528 | /tests/components/remote/test_device_action.py | d652f4d869d060039e222b8a6d3d595b5bec83bd | [
"Apache-2.0"
] | permissive | syssi/home-assistant | 6347d57866cb16ab9d4499ad38e2be6f0399077f | fd43687833741b21221769d46b4d1ecef8a94711 | refs/heads/dev | 2023-08-17T09:31:52.680518 | 2023-06-11T14:22:12 | 2023-06-11T14:22:12 | 97,874,495 | 6 | 16 | Apache-2.0 | 2023-09-13T06:31:21 | 2017-07-20T20:12:37 | Python | UTF-8 | Python | false | false | 6,168 | py | """The test for remote device automation."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.remote import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
@pytest.fixture(autouse=True, name="stub_blueprint_populate")
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None:
"""Stub copying the blueprints to the config folder."""
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_actions(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected actions from a remote."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_actions = [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
for action in ["turn_off", "turn_on", "toggle"]
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
@pytest.mark.parametrize(
("hidden_by", "entity_category"),
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_actions_hidden_auxiliary(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
hidden_by,
entity_category,
) -> None:
"""Test we get the expected actions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for action in ["turn_off", "turn_on", "toggle"]
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
async def test_action(
hass: HomeAssistant, calls, enable_custom_integrations: None
) -> None:
"""Test for turn_on and turn_off actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_off",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_on",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "toggle",
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
| [
"[email protected]"
] | |
b0e061e86c4f7c24b1e4804f54d62f09523bc0d8 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/tflearn_tflearn/tflearn-master/tflearn/layers/recurrent.py | 0613c164951d1ce45a6a8108e4fd449726fa5949 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 32,464 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.contrib.rnn.python.ops.core_rnn import static_rnn as _rnn, \
static_bidirectional_rnn as _brnn
from tensorflow.python.ops.rnn import rnn_cell_impl as _rnn_cell, \
dynamic_rnn as _drnn
from tensorflow.python.util.nest import is_sequence
from tensorflow.contrib.framework.python.ops.variables import model_variable
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from .. import config
from .. import utils
from .. import activations
from .. import initializations
from .. import variables as va
from .normalization import batch_normalization
# --------------------------
# RNN Layers
# --------------------------
def _rnn_template(incoming, cell, dropout=None, return_seq=False,
return_state=False, initial_state=None, dynamic=False,
scope=None, reuse=False, name="LSTM"):
""" RNN Layer Template. """
sequence_length = None
if dynamic:
sequence_length = retrieve_seq_length_op(
incoming if isinstance(incoming, tf.Tensor) else tf.stack(incoming))
input_shape = utils.get_incoming_shape(incoming)
with tf.variable_scope(scope, default_name=name, values=[incoming],
reuse=reuse) as scope:
name = scope.name
_cell = cell
# Apply dropout
if dropout:
if type(dropout) in [tuple, list]:
in_keep_prob = dropout[0]
out_keep_prob = dropout[1]
elif isinstance(dropout, float):
in_keep_prob, out_keep_prob = dropout, dropout
else:
raise Exception("Invalid dropout type (must be a 2-D tuple of "
"float)")
cell = DropoutWrapper(cell, in_keep_prob, out_keep_prob)
inference = incoming
# If a tensor given, convert it to a per timestep list
if type(inference) not in [list, np.array]:
ndim = len(input_shape)
assert ndim >= 3, "Input dim should be at least 3."
axes = [1, 0] + list(range(2, ndim))
inference = tf.transpose(inference, (axes))
inference = tf.unstack(inference)
outputs, state = _rnn(cell, inference, dtype=tf.float32,
initial_state=initial_state, scope=name,
sequence_length=sequence_length)
# Retrieve RNN Variables
c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
for v in [_cell.W, _cell.b]:
if hasattr(v, "__len__"):
for var in v: tf.add_to_collection(c, var)
else:
tf.add_to_collection(c, v)
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])
if dynamic:
if return_seq:
o = outputs
else:
outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
o = advanced_indexing_op(outputs, sequence_length)
else:
o = outputs if return_seq else outputs[-1]
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)
return (o, state) if return_state else o
def simple_rnn(incoming, n_units, activation='sigmoid', dropout=None,
bias=True, weights_init=None, return_seq=False,
return_state=False, initial_state=None, dynamic=False,
trainable=True, restore=True, reuse=False, scope=None,
name="SimpleRNN"):
""" Simple RNN.
Simple Recurrent Layer.
Input:
3-D Tensor [samples, timesteps, input dim].
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor [samples, output dim].
Arguments:
incoming: `Tensor`. Incoming 3-D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'sigmoid'.
dropout: `tuple` of `float`: (input_keep_prob, output_keep_prob). The
input and output keep probability.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(See tflearn.initializations)
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_state: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state: `Tensor`. An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: `str`. A name for this layer (optional).
"""
cell = BasicRNNCell(n_units, activation=activation, bias=bias,
weights_init=weights_init, trainable=trainable,
restore=restore, reuse=reuse)
x = _rnn_template(incoming, cell=cell, dropout=dropout,
return_seq=return_seq, return_state=return_state,
initial_state=initial_state, dynamic=dynamic,
scope=scope, name=name)
return x
def lstm(incoming, n_units, activation='tanh', inner_activation='sigmoid',
dropout=None, bias=True, weights_init=None, forget_bias=1.0,
return_seq=False, return_state=False, initial_state=None,
dynamic=False, trainable=True, restore=True, reuse=False,
scope=None, name="LSTM"):
""" LSTM.
Long Short Term Memory Recurrent Layer.
Input:
3-D Tensor [samples, timesteps, input dim].
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor [samples, output dim].
Arguments:
incoming: `Tensor`. Incoming 3-D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'tanh'.
inner_activation: `str` (name) or `function` (returning a `Tensor`).
LSTM inner activation. Default: 'sigmoid'.
dropout: `tuple` of `float`: (input_keep_prob, output_keep_prob). The
input and output keep probability.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(See tflearn.initializations).
forget_bias: `float`. Bias of the forget gate. Default: 1.0.
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_state: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state: `Tensor`. An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: `str`. A name for this layer (optional).
References:
Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber,
Neural Computation 9(8): 1735-1780, 1997.
Links:
[http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf]
(http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
"""
cell = BasicLSTMCell(n_units, activation=activation,
inner_activation=inner_activation,
forget_bias=forget_bias, bias=bias,
weights_init=weights_init, trainable=trainable,
restore=restore, reuse=reuse)
x = _rnn_template(incoming, cell=cell, dropout=dropout,
return_seq=return_seq, return_state=return_state,
initial_state=initial_state, dynamic=dynamic,
scope=scope, name=name)
return x
def gru(incoming, n_units, activation='tanh', inner_activation='sigmoid',
dropout=None, bias=True, weights_init=None, return_seq=False,
return_state=False, initial_state=None, dynamic=False,
trainable=True, restore=True, reuse=False, scope=None, name="GRU"):
""" GRU.
Gated Recurrent Unit Layer.
Input:
3-D Tensor Layer [samples, timesteps, input dim].
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor [samples, output dim].
Arguments:
incoming: `Tensor`. Incoming 3-D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'tanh'.
inner_activation: `str` (name) or `function` (returning a `Tensor`).
GRU inner activation. Default: 'sigmoid'.
dropout: `tuple` of `float`: (input_keep_prob, output_keep_prob). The
input and output keep probability.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(See tflearn.initializations).
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_state: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state: `Tensor`. An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: `str`. A name for this layer (optional).
References:
Learning Phrase Representations using RNN Encoder–Decoder for
Statistical Machine Translation, K. Cho et al., 2014.
Links:
[http://arxiv.org/abs/1406.1078](http://arxiv.org/abs/1406.1078)
"""
cell = GRUCell(n_units, activation=activation,
inner_activation=inner_activation, bias=bias,
weights_init=weights_init, trainable=trainable,
restore=restore, reuse=reuse)
x = _rnn_template(incoming, cell=cell, dropout=dropout,
return_seq=return_seq, return_state=return_state,
initial_state=initial_state, dynamic=dynamic,
scope=scope, name=name)
return x
def bidirectional_rnn(incoming, rnncell_fw, rnncell_bw, return_seq=False,
return_states=False, initial_state_fw=None,
initial_state_bw=None, dynamic=False, scope=None,
name="BiRNN"):
""" Bidirectional RNN.
Build a bidirectional recurrent neural network, it requires 2 RNN Cells
to process sequence in forward and backward order. Any RNN Cell can be
used i.e. SimpleRNN, LSTM, GRU... with its own parameters. But the two
cells number of units must match.
Input:
3-D Tensor Layer [samples, timesteps, input dim].
Output:
if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
else: 2-D Tensor Layer [samples, output dim].
Arguments:
incoming: `Tensor`. The incoming Tensor.
rnncell_fw: `RNNCell`. The RNN Cell to use for foward computation.
rnncell_bw: `RNNCell`. The RNN Cell to use for backward computation.
return_seq: `bool`. If True, returns the full sequence instead of
last sequence output only.
return_states: `bool`. If True, returns a tuple with output and
states: (output, states).
initial_state_fw: `Tensor`. An initial state for the forward RNN.
This must be a tensor of appropriate type and shape [batch_size
x cell.state_size].
initial_state_bw: `Tensor`. An initial state for the backward RNN.
This must be a tensor of appropriate type and shape [batch_size
x cell.state_size].
dynamic: `bool`. If True, dynamic computation is performed. It will not
compute RNN steps above the sequence length. Note that because TF
requires to feed sequences of same length, 0 is used as a mask.
So a sequence padded with 0 at the end must be provided. When
computation is performed, it will stop when it meets a step with
a value of 0.
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: `str`. A name for this layer (optional).
"""
assert (rnncell_fw._num_units == rnncell_bw._num_units), \
"RNN Cells number of units must match!"
sequence_length = None
if dynamic:
sequence_length = retrieve_seq_length_op(
incoming if isinstance(incoming, tf.Tensor) else tf.stack(incoming))
input_shape = utils.get_incoming_shape(incoming)
with tf.variable_scope(scope, default_name=name, values=[incoming]) as scope:
name = scope.name
# TODO: DropoutWrapper
inference = incoming
# If a tensor given, convert it to a per timestep list
if type(inference) not in [list, np.array]:
ndim = len(input_shape)
assert ndim >= 3, "Input dim should be at least 3."
axes = [1, 0] + list(range(2, ndim))
inference = tf.transpose(inference, (axes))
inference = tf.unstack(inference)
outputs, states_fw, states_bw = _brnn(
rnncell_fw, rnncell_bw, inference,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
dtype=tf.float32)
c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
for v in [rnncell_fw.W, rnncell_fw.b, rnncell_bw.W, rnncell_bw.b]:
if hasattr(v, "__len__"):
for var in v: tf.add_to_collection(c, var)
else:
tf.add_to_collection(c, v)
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])
if dynamic:
if return_seq:
o = outputs
else:
outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
o = advanced_indexing_op(outputs, sequence_length)
else:
o = outputs if return_seq else outputs[-1]
sfw = states_fw
sbw = states_bw
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)
return (o, sfw, sbw) if return_states else o
# --------------------------
# RNN Cells
# --------------------------
class BasicRNNCell(core_rnn_cell.RNNCell):
""" TF basic RNN cell with extra customization params. """
def __init__(self, num_units, input_size=None, activation=tf.nn.tanh,
bias=True, weights_init=None, trainable=True, restore=True,
reuse=False):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
if isinstance(activation, str):
self._activation = activations.get(activation)
elif hasattr(activation, '__call__'):
self._activation = activation
else:
raise ValueError("Invalid Activation.")
self.bias = bias
self.weights_init = weights_init
if isinstance(weights_init, str):
self.weights_init = initializations.get(weights_init)()
self.trainable = trainable
self.restore = restore
self.reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with tf.variable_scope(scope or type(self).__name__):
# "BasicRNNCell"
output = self._activation(
_linear([inputs, state], self._num_units, True, 0.,
self.weights_init, self.trainable, self.restore,
self.reuse))
# Retrieve RNN Variables
with tf.variable_scope('Linear', reuse=True):
self.W = tf.get_variable('Matrix')
self.b = tf.get_variable('Bias')
return output, output
class BasicLSTMCell(core_rnn_cell.RNNCell):
""" TF Basic LSTM recurrent network cell with extra customization params.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=True, activation=tf.tanh,
inner_activation=tf.sigmoid, bias=True, weights_init=None,
trainable=True, restore=True, reuse=False, batch_norm = False):
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self.batch_norm = batch_norm
if isinstance(activation, str):
self._activation = activations.get(activation)
elif hasattr(activation, '__call__'):
self._activation = activation
else:
raise ValueError("Invalid Activation.")
if isinstance(inner_activation, str):
self._inner_activation = activations.get(inner_activation)
elif hasattr(inner_activation, '__call__'):
self._inner_activation = inner_activation
else:
raise ValueError("Invalid Activation.")
self.bias = bias
self.weights_init = weights_init
if isinstance(weights_init, str):
self.weights_init = initializations.get(weights_init)()
self.trainable = trainable
self.restore = restore
self.reuse = reuse
@property
def state_size(self):
return (core_rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True, 0.,
self.weights_init, self.trainable, self.restore,
self.reuse)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4,
axis=1)
# apply batch normalization to inner state and gates
if self.batch_norm == True:
i = batch_normalization(i, gamma=0.1, trainable=self.trainable, restore=self.restore, reuse=self.reuse)
j = batch_normalization(j, gamma=0.1, trainable=self.trainable, restore=self.restore, reuse=self.reuse)
f = batch_normalization(f, gamma=0.1, trainable=self.trainable, restore=self.restore, reuse=self.reuse)
o = batch_normalization(o, gamma=0.1, trainable=self.trainable, restore=self.restore, reuse=self.reuse)
new_c = (c * self._inner_activation(f + self._forget_bias) +
self._inner_activation(i) *
self._activation(j))
# hidden-to-hidden batch normalizaiton
if self.batch_norm == True:
batch_norm_new_c = batch_normalization(new_c, gamma=0.1, trainable=self.trainable, restore=self.restore, reuse=self.reuse)
new_h = self._activation(batch_norm_new_c) * self._inner_activation(o)
else:
new_h = self._activation(new_c) * self._inner_activation(o)
if self._state_is_tuple:
new_state = core_rnn_cell.LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
# Retrieve RNN Variables
with tf.variable_scope('Linear', reuse=True):
self.W = tf.get_variable('Matrix')
self.b = tf.get_variable('Bias')
return new_h, new_state
class GRUCell(core_rnn_cell.RNNCell):
""" TF GRU Cell with extra customization params. """
def __init__(self, num_units, input_size=None, activation=tf.tanh,
inner_activation=tf.sigmoid, bias=True, weights_init=None,
trainable=True, restore=True, reuse=False):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
if isinstance(activation, str):
self._activation = activations.get(activation)
elif hasattr(activation, '__call__'):
self._activation = activation
else:
raise ValueError("Invalid Activation.")
if isinstance(inner_activation, str):
self._inner_activation = activations.get(inner_activation)
elif hasattr(inner_activation, '__call__'):
self._inner_activation = inner_activation
else:
raise ValueError("Invalid Activation.")
self.bias = bias
self.weights_init = weights_init
if isinstance(weights_init, str):
self.weights_init = initializations.get(weights_init)()
self.trainable = trainable
self.restore = restore
self.reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(value=_linear([inputs, state],
2 * self._num_units, True, 1.0, self.weights_init,
self.trainable, self.restore, self.reuse),
num_or_size_splits=2, axis=1)
r, u = self._inner_activation(r), self._inner_activation(u)
with tf.variable_scope("Candidate"):
c = self._activation(
_linear([inputs, r * state], self._num_units, True, 0.,
self.weights_init, self.trainable, self.restore,
self.reuse))
new_h = u * state + (1 - u) * c
self.W, self.b = list(), list()
# Retrieve RNN Variables
with tf.variable_scope('Gates/Linear', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
with tf.variable_scope('Candidate/Linear', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
return new_h, new_h
class DropoutWrapper(core_rnn_cell.RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Arguments:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError(
"Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError(
"Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
is_training = config.get_training_mode()
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs,
self._input_keep_prob,
seed=self._seed),
lambda: inputs)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = tf.cond(is_training,
lambda: tf.nn.dropout(output,
self._output_keep_prob,
seed=self._seed),
lambda: output)
return output, new_state
# --------------------
# TensorFlow Utils
# --------------------
def _linear(args, output_size, bias, bias_start=0.0, weights_init=None,
trainable=True, restore=True, reuse=False, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Arguments:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear", reuse=reuse):
matrix = va.variable("Matrix", [total_arg_size, output_size],
initializer=weights_init, trainable=trainable,
restore=restore)
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(array_ops.concat(args, 1), matrix)
if not bias:
return res
bias_term = va.variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start),
trainable=trainable, restore=restore)
return res + bias_term
def retrieve_seq_length_op(data):
""" An op to compute the length of a sequence. 0 are masked. """
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
def advanced_indexing_op(input, index):
""" Advanced Indexing for Sequences. """
batch_size = tf.shape(input)[0]
max_length = int(input.get_shape()[1])
dim_size = int(input.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (index - 1)
flat = tf.reshape(input, [-1, dim_size])
relevant = tf.gather(flat, index)
return relevant
| [
"[email protected]"
] | |
3a15a5b18a062442591358d0a5eb5d0c26f7290e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /MojpPTZYQyN5L2i4a_5.py | 82668008b83f18c70018cafee43b7e375d184f1a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | """
You work in a toy car workshop, and your job is to build toy cars from a
collection of parts. Each toy car needs 4 wheels, 1 car body, and 2 figures of
people to be placed inside. Given the total number of wheels, car bodies and
figures available, how many _complete_ toy cars can you make?
### Examples
cars(2, 48, 76) ➞ 0
# 2 wheels, 48 car bodies, 76 figures
cars(43, 15, 87) ➞ 10
cars(88, 37, 17) ➞ 8
### Notes
N/A
"""
def cars(wheels, bodies, figures):
totalcars = 0
check = True
while check:
if wheels >= 4 and bodies >= 1 and figures >= 2:
wheels -= 4
bodies -= 1
figures -= 2
totalcars += 1
else:
check = False
return totalcars
| [
"[email protected]"
] | |
97166a441c2e8850d56c5fa2603f79d4a7e7062f | b5ce03fad3c14b07e8ded6258716eb63a8ba1525 | /.history/app_20210909053206.py | e5a44b75c3d4447f9f9c949de72114677c200f2d | [] | no_license | saraalmuraytib/FSND-Capstone-Project | 0d70058a080d3d91004e7d8bfbf38dfd3f9092fc | 4a18217c7aa83899cc3f134c6caa710a2521a8fd | refs/heads/main | 2023-07-28T01:20:03.838641 | 2021-09-10T01:33:26 | 2021-09-10T01:33:26 | 402,197,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,972 | py | '''
* General Specifications *
** Models will include at least…
✓ Two classes with primary keys at at least two attributes each
✓ [Optional but encouraged] One-to-many or many-to-many relationships between classes
** Endpoints will include at least…
✓ Two GET requests
✓ One POST request
✓ One PATCH request
✓ One DELETE request
** Roles will include at least…
✓ Two roles with different permissions
✓ Permissions specified for all endpoints
** Tests will include at least….
* One test for success behavior of each endpoint
* One test for error behavior of each endpoint
* At least two tests of RBAC for each role
'''
import os
from flask import Flask, request, abort, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from datetime import datetime
# ------------------------
from Database.models import *
from auth.auth import AuthError, requires_auth
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app)
Migrate(app, db)
'''
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
!! Running this funciton will add one
'''
#db_drop_and_create_all()
# ----------------------- ROUTES -----------------------
# -------------------- Get Requests --------------------
@app.route('/', methods=['GET'])
def index():
return '<h1>Welcome to Virtual Tutor</h1>'
# It should be a public endpoint
@app.route('/subjects')
def get_subjects():
subjects = Subject.query.all()
if len(subjects) == 0:
abort(404)
return jsonify({
'success': True,
'Subjects': {subject.id: subject.name for subject in subjects}
})
# It should be a public endpoint
@app.route('/subjects/<int:subject_id>/tutors', methods=['GET'])
def get_tutors_based_on_subject(subject_id):
subject = Subject.query.filter(Subject.id == subject_id).one_or_none()
if subject is None:
abort(404)
else:
tutors = Tutor.query.filter(Tutor.subject_id == str(subject_id)).all()
return jsonify({
'success': True,
'Tutors': [tutor.format() for tutor in tutors],
'total_Tutor': len(tutors),
'Subject': subject.name
})
# It should require the 'get:appointments_tutor' permission
@app.route('/tutor/<int:tutor_id>/appointments', methods=['GET'])
@requires_auth('get:appointments_tutor')
def get_appointments_tutor(payload,tutor_id):
tutor = Tutor.query.filter(Tutor.id == tutor_id).one_or_none()
if tutor is None:
abort(404)
else:
appointments = Appointments.query.filter(
Appointments.tutor_id == str(tutor_id)).all()
if len(appointments) == 0:
return jsonify({
'success': True,
'Total Appointments': len(appointments)
})
else:
upcoming_appointments = []
for appointment in tutor.upcoming_appointments:
student = Student.query.get(appointment.student_id)
upcoming_appointments.append({
'Appointment ID': appointment.id,
"Student ID": appointment.student_id,
"Student name": student.name,
'Start Time': appointment.start_time,
'Duration in minutes': appointment.duration,
'confirmation': "Confirmed" if appointment.confirmation in (True, 't', 'True') else "Not Confirmed"
})
return jsonify({
'success': True,
'Total Appointments': len(appointments),
'Total of Upcoming Appointments': tutor.num_upcoming_appointments,
'Upcoming Appointments': upcoming_appointments
})
# It should require the 'get:appointments_student' permission
@app.route('/student/<int:student_id>/appointments', methods=['GET'])
@requires_auth('get:appointments_student')
def get_appointments_student(payload,student_id):
student = Student.query.filter(Student.id == student_id).one_or_none()
if student is None:
abort(404)
else:
appointments = Appointments.query.filter(
Appointments.student_id == str(student_id)).all()
if len(appointments) == 0:
return jsonify({
'success': True,
'Total Appointments': len(appointments)
})
else:
upcoming_appointments = []
for appointment in student.upcoming_appointments:
tutor = Tutor.query.get(appointment.tutor_id)
upcoming_appointments.append({
'Appointment ID': appointment.id,
"Tutor ID": appointment.student_id,
"Tutor name": tutor.name,
'Start Time': appointment.start_time,
'Duration in minutes': appointment.duration,
'confirmation': "Confirmed" if appointment.confirmation in (True, 't', 'True') else "Not Confirmed"
})
return jsonify({
'success': True,
'Total Appointments': len(appointments),
'Total of Upcoming Appointments': student.num_upcoming_appointments,
'Upcoming Appointments': upcoming_appointments
})
# -------------------- POST Requests ---------------------
# It should require the 'post:create_tutor' permission
@app.route('/tutor', methods=['POST'])
@requires_auth('post:create_tutor')
def create_tutor(payload):
body = request.get_json()
name = body.get('name')
intro = body.get('intro')
subject_id = body.get('subject_id')
availableTime=body.get('availableTime')
# Check if the subject exist or not
subject = Subject.query.filter(Subject.id == subject_id).one_or_none()
if subject is None:
abort(404)
else:
try:
new_tutor = Tutor(name=name,
intro=intro,subject_id=subject_id,availableTime=availableTime)
new_tutor.insert()
return jsonify({
'success': True,
'Tutor': new_tutor.format()
})
except:
abort(422)
@app.route('/student', methods=['POST'])
def create_student():
body = request.get_json()
name = body.get('name')
email = body.get('email')
age = body.get('age')
grade=body.get('grade')
try:
new_student = Student(name=name,
email=email,age=age,grade=grade)
new_student.insert()
return jsonify({
'success': True,
'Student': new_student.format()
})
except:
abort(422)
# It should require the 'post:create_appointment' permission
@app.route("/student/<int:student_id>/appointments/create", methods=['POST'])
@requires_auth('post:create_appointment')
def create_appointment(payload,student_id):
student = Student.query.filter(Student.id == student_id).one_or_none()
if student is None:
abort(404)
else:
# Fetch the request body
body = request.get_json()
# Get start_time, duration, and tutor_id to create the appointment
start_time = body.get('start_time')
duration = body.get('duration')
tutor_id = body.get('tutor_id')
# Check if the tutor exist or not
tutor = Tutor.query.filter(Tutor.id == tutor_id).one_or_none()
if tutor is None:
abort(404)
else:
try:
new_appointment = Appointments(
start_time=(datetime.strptime(
start_time, '%d/%m/%y %H:%M:%S')),
duration=duration, tutor_id=tutor_id, student_id=student_id)
new_appointment.insert()
return jsonify({
'success': True,
'Appointment': new_appointment.format()
})
except:
abort(422)
# -------------------- PATCH Requests --------------------
# It should require the 'patch:update_appointment' permission
@app.route("/appointments/edit/<int:appointment_id>", methods=['PATCH'])
@requires_auth('patch:update_appointment')
def update_appointment(payload,appointment_id):
appointment = Appointments.query.filter(
Appointments.id == appointment_id).one_or_none()
if appointment is None:
abort(404)
else:
try:
body = request.get_json()
confirmation = body.get('confirmation')
appointment.confirmation = confirmation
appointment.update()
return jsonify({
'success': True,
'Appointment Confirmation': "Confirmed" if appointment.confirmation in (True, 't', 'True') else "Not Confirmed"
})
except:
abort(422)
# -------------------- DELETE Requests --------------------
# It should require the 'delete:delete_appointment' permission
@app.route("/appointments/delete/<int:appointment_id>", methods=['DELETE'])
@requires_auth('delete:delete_appointment')
def delete_appointment(payload,appointment_id):
appointment = Appointments.query.filter(Appointments.id == appointment_id).one_or_none()
if appointment is None:
abort(404)
else:
try:
appointment.delete()
return jsonify({
'success': True,
'deleted id': appointment_id
})
except:
abort(422)
# -------------------- Error Handling --------------------
'''
Error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
'''
Error handler for 404
error handler should conform to general task above
'''
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
Error handler for AuthError
error handler should conform to general task above
'''
@app.errorhandler(AuthError)
def handle_auth_error(error):
return jsonify({
"success": False,
"error": error.status_code,
'message': error.error
}), 401
return app
if __name__ == '__main__':
create_app().run(host='0.0.0.0', port=8080, debug=True)
| [
"[email protected]"
] | |
cdd263ea94ba391933c5aa44be7a4aad74ca9bdb | f854751c12afc48401ddcf0590ea70c72a2b7c58 | /Canny_findContours.py | a1faaabc01f51f5dea466c49023c70c3d4fcf9d0 | [] | no_license | mu-777/image_processing_tests | 4326ebe3b321f9b900a02b63d0a3189659439244 | 2b6ee72e10065fd2dfb1b7e430bf5ccfc26a0c95 | refs/heads/master | 2021-01-17T10:21:09.473142 | 2016-03-18T13:00:41 | 2016-03-18T13:00:41 | 42,449,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,260 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# アニメ顔分類器
# https://github.com/nagadomi/lbpcascade_animeface
# 動画で検出サンプル
# http://www.takunoko.com/blog/python%E3%81%A7%E9%81%8A%E3%82%93%E3%81%A7%E3%81%BF%E3%82%8B-part1-opencv%E3%81%A7%E9%A1%94%E8%AA%8D%E8%AD%98/
import cv2
import numpy as np
import time
# カスケード分類器の特徴量を取得する
CASCADE_PATH = "./cascade/lbpcascade_animeface.xml"
IN_IMG_PATHS = ["./test_imgs/face_detecting" + str(i + 1) + ".png" for i in range(9)]
OVERLAY_IMG_PATH = "./test_imgs/face_up5.jpg"
OUT_IMG_PATH = "./test_imgs/face_detecting_out.png"
overlay_color = (0, 187, 254)
rect_color = (0, 0, 0)
def check_img(img):
cv2.imshow('a', img)
cv2.waitKey(0)
def cutoff_hsv(src_img, diff_threshold=6):
(h, w) = src_img.shape[:2]
hsv_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2HSV)
ret_img = np.zeros((h, w, 3), np.uint8)
(c_h, c_s, c_v) = hsv_img[h / 2.0, w / 2.0]
for i, j in [(i, j) for i in range(h) for j in range(w)]:
(h, s, v) = hsv_img[i, j]
if abs(c_h - h) < diff_threshold:
ret_img[i, j] = src_img[i, j]
return ret_img
def cutoff_rgb(src_img, diff_threshold=20):
(h, w) = src_img.shape[:2]
ret_img = np.zeros((h, w, 3), np.uint8)
center_color = src_img[h / 2.0, w / 2.0]
for i, j in [(i, j) for i in range(h) for j in range(w)]:
color = src_img[i, j]
if all([abs(diff) < diff_threshold for diff in center_color - color]):
ret_img[i, j] = src_img[i, j]
return ret_img
def main(in_img_path):
rgb_img = cv2.imread(in_img_path)
cascade = cv2.CascadeClassifier(CASCADE_PATH)
faces = cascade.detectMultiScale(cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))
if len(faces) > 0:
# 検出した顔を囲む矩形の作成
for (x, y, w, h) in faces:
print(w, h)
over_img_temp = rgb_img[y:y + h, x:x + w]
gray = cv2.cvtColor(over_img_temp, cv2.COLOR_BGR2GRAY)
gray_smooth = cv2.GaussianBlur(gray, (5, 5), 0)
# edge_img = cv2.Canny(gray_smooth, 1000, 1500, apertureSize=5)
edge_img = cv2.Canny(gray_smooth, 1600, 1600, apertureSize=5)
check_img(edge_img)
dilated_img = cv2.dilate(edge_img, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2)), iterations=3)
check_img(dilated_img)
# cv2.imwrite('./'+str(x)+'dilated_img.jpg', dilated_img)
contours, hierarchy = cv2.findContours(dilated_img, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
c_len = len(contours)
contours.reverse()
for i, contour in enumerate(contours):
cv2.drawContours(over_img_temp, [contour], -1, (0, 255 * float(i) / c_len, 0), thickness=-1)
check_img(over_img_temp)
# cv2.imwrite('./'+str(x)+'over_img.jpg', over_img_temp)
# contour_img = over_img_temp.copy()
# for i, contour in enumerate(contours):
# arclen = cv2.arcLength(contour, True)
# approx = cv2.approxPolyDP(contour, 0.02 * arclen, True)
# cv2.drawContours(contour_img, [approx], -1,
# (0, 0, 255 * (1 - float(i) / len(contours))), 2)
# check_img(contour_img)
# contour = reduce(lambda c1, c2: np.r_[c1, c2], contours)
# cv2.fillConvexPoly(over_img_temp, contour, (255, 0, 0))
# for contour in contours:
# if len(contour) > 10:
# box = cv2.fitEllipse(contour)
# cv2.ellipse(over_img_temp, box, (255, 255, 0), 2)
# check_img(over_img_temp)
# over_img_temp = cutoff_rgb(x, y, w, h)
# over_img_temp = cutoff_hsv(x, y, w, h)
# kernel_l = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
# kernel_m = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# kernel_s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
# ret, binary_img = cv2.threshold(over_img_temp, 130, 255, cv2.THRESH_BINARY)
# first = cv2.dilate(binary_img, kernel_l)
# second = cv2.erode(first, kernel_s, iterations=5)
# first = cv2.dilate(binary_img, kernel_l)
# second = cv2.erode(first, kernel_s, iterations=5)
# check_img(binary_img)
# check_img(first)
# check_img(second)
# gray = cv2.cvtColor(over_img_temp, cv2.COLOR_BGR2GRAY)
# gray_smooth = cv2.GaussianBlur(gray, (31, 31), 0)
# ret, th1 = cv2.threshold(gray_smooth, 130, 255, cv2.THRESH_BINARY)
# contours, hierarchy = cv2.findContours(th1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(over_img_temp, contours, 0, overlay_color, thickness=5)
cv2.rectangle(rgb_img, (x, y), (x + w, y + h), (0, 187, 254), thickness=7)
# cv2.imwrite(out_img_path, rgb_img)
# --------------------------------------------
if __name__ == '__main__':
for img_path in IN_IMG_PATHS:
main(img_path)
| [
"[email protected]"
] | |
ddfb237f379f7f85a5e2b0103a72f256ceb013c0 | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/s5_getparser_20210210014110.py | 37d12714c7c733a9146fea52dd6026540c7acd3a | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,801 | py | import shutil
from fonduer.parser.preprocessors import html_doc_preprocessor
from sqlalchemy import exc
import pdftotree
import re
from sen_parser_usable import *
from config import config
import json
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
import json
import uuid
import sys
import logging
import errno
from fonduer.parser.models import Document, Sentence, Table
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser import Parser
from pprint import pprint
from fonduer import Meta, init_logging
from fonduer.candidates import CandidateExtractor
from fonduer.candidates import MentionNgrams
from fonduer.candidates import MentionExtractor
from fonduer.candidates.models import Mention
from fonduer.candidates.models import mention_subclass
from fonduer.candidates.models import candidate_subclass
from fonduer.candidates.matchers import RegexMatchSpan, DictionaryMatch, LambdaFunctionMatcher, Intersect, Union
from fonduer.features import Featurizer
import inspect
import matchers as matchers
from extract_html import *
PII_KEYLIST = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/model/pii-keylist.json'
PARALLEL = 4 # assuming a quad-core machine
# ATTRIBUTE = "ns8s_invoice_poc_stage"
# check that the databases mentioned below already exist
getdbref = __import__('s1_2_getdbref')
# Will return <module '1_2_getdbref' from '/home/dsie/Developer/sandbox/3ray/server/backend/python/kbc_process/1_2_getdbref.py'>
pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/'
docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/'
# Configure logging for Fonduer
init_logging(log_dir="logs", level=logging.ERROR)
max_docs = 1000
PARALLEL = 4
doc_preprocessor = None
execution_stack = ["1. Get session object..."]
# exc_context = 'passport_number'
# doc_context = 'Test-Document'
exc_context = 'email_id' if json.loads(sys.argv[1])['context'] is None else json.loads(sys.argv[1])['context']
doc_context = 'Test-Document' if json.loads(sys.argv[1])['doc_name'] is None else json.loads(sys.argv[1])['doc_name']
# exc_context = 'phone_number'
try:
session = getdbref.get_session()
sessType = type(session) # Will return <class 'sqlalchemy.orm.session.Session'>
execution_stack.append("Done.")
execution_stack.append("2. Processing layout...")
except Exception as session_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {session_exception}')
except exc.SQLAlchemyError as sql_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {sql_exception}')
def do_prepare_mentions_batch(candidate_mentions, config):
for index, data in enumerate(config):
mention_subclass_list = list()
max_ngrams = None
for key in data.keys():
if key == 'Candidates':
for c in data.get(key):
# if c in candidate_mentions.keys(): #TODO verify this condition
candidate_mentions[c]['mention_names'].append(data['MentionName'])
candidate_mentions[c]['mention_ngrams'].append(data['MentionNGrams'])
candidate_mentions[c]['mention_matchers'].append(matchers.matcher[data.get('Context')])
if 'mention_subclass' in candidate_mentions[c].keys():
candidate_mentions[c]['mention_subclass'].append(mention_subclass(data['MentionName']))
else:
candidate_mentions[c]['mention_subclass'] = [mention_subclass(data['MentionName'])]
if 'max_ngrams' in candidate_mentions[c].keys():
candidate_mentions[c]['max_ngrams'].append(MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams')))
else:
candidate_mentions[c]['max_ngrams'] = [MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams'))]
candidate_mentions[c]['throttler_function'] = data.get('ThrottlerFunctions')[0].get('tf')
return candidate_mentions
def do_prepare_mentions(candidate_mentions, config, context):
mention_subclass_list = list()
max_ngrams = None
ctx = {
"mention_names": [],
"mention_ngrams": [],
"mention_matchers": [],
"mention_subclass": [],
"max_ngrams": [],
"throttler_function": None
}
ctx['mention_names'].append(config[context].get('MentionName'))
ctx['mention_ngrams'].append(config[context]['MentionNGrams'])
ctx['mention_matchers'].append(matchers.matcher[config[context].get('Context')])
ctx['mention_subclass'].append(mention_subclass(config[context]['MentionName']))
ctx['max_ngrams'].append(MentionNgrams(n_max=config[context].get('MaxNGrams')))
ctx['throttler_function'] = config[context].get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[context] = ctx
return candidate_mentions
def do_train(candidate_mentions):
from sqlalchemy import desc
docs = session.query(Document).order_by(Document.name).all()
# docs = session.query(Document).order_by(desc(Document.id)).limit(1)
total_mentions = session.query(Mention).count()
splits = (1, 0.0, 0.0)
train_cands = []
for candidate_key in candidate_mentions.keys():
train_docs = set()
dev_docs = set()
test_docs = set()
'''print('Mention Subclass {}, Ngrams {} and Matchers {}'
.format(candidate_mentions[candidate_key]['mention_subclass'],
candidate_mentions[candidate_key]['max_ngrams'],
candidate_mentions[candidate_key]['mention_matchers']))
'''
mention_extractor = MentionExtractor(session, candidate_mentions[candidate_key]['mention_subclass'], candidate_mentions[candidate_key]['max_ngrams'], candidate_mentions[candidate_key]['mention_matchers'])
mention_extractor.apply(docs, parallelism=PARALLEL)
candidate_mentions[candidate_key]['candidate_subclass'] = candidate_subclass(candidate_key, candidate_mentions[candidate_key].get('mention_subclass'), table_name=candidate_mentions[candidate_key]['mention_names'][0])
candidate_extractor = CandidateExtractor(session, [candidate_mentions[candidate_key]['candidate_subclass']], throttlers=[candidate_mentions[candidate_key]['throttler_function']])
data = [(doc.name, doc) for doc in docs]
data.sort(key=lambda x: x[0])
for i, (doc_name, doc) in enumerate(data):
train_docs.add(doc)
for i, docs in enumerate([train_docs, dev_docs, test_docs]):
candidate_extractor.apply(docs, split=i, parallelism=PARALLEL)
# train_cands = candidate_extractor.get_candidates(split = 0)
# train_cands.append(candidate_extractor.get_candidates(split = 0))
candidate_mentions[candidate_key]['train_cands'] = candidate_extractor.get_candidates(split = 0)
for index, item in enumerate(candidate_mentions[candidate_key]['train_cands']):
if len(item) > 0:
featurizer = Featurizer(session, [candidate_mentions[candidate_key]['candidate_subclass']])
featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# %time featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# %time F_train = featurizer.get_feature_matrices(candidate_mentions[candidate_key]['train_cands'])
else:
candidate_mentions[candidate_key]['train_cands'].pop(index)
# candidate[candidate_key]['train_cands'] = train_cands
return candidate_mentions
def do_process_get_candidates(candidate_mentions=None):
train_cands = do_train(candidate_mentions)
return train_cands
def handle_return(generator, func):
contextInfoDict = yield from generator
func(contextInfoDict)
def get_context_async(sm, document_context='', search_context=''):
pass
# star_char_index = sm.char_start
# end_char_index = sm.char_end
# star_char_index = sm['applicant_name_context'].char_start
# end_char_index = sm['applicant_name_context'].char_end
# contextInfoDictionary = {
# 'label': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# },
# 'value': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# }
# }
# yield contextInfoDictionary
def print_values(value):
print('returned: {}'.format(json.dumps(value)))
def do_get_docs_values(candidates=None, document_context=None, search_context=None):
'''
"<class 'fonduer.parser.models.document.Document'>"
"<class 'fonduer.parser.models.section.Section'>"
"<class 'fonduer.parser.models.sentence.Sentence'>"
"<class 'fonduer.candidates.models.span_mention.SpanMention'>"
"<class 'fonduer.candidates.models.mention.ApplicationNameLabel'>"
'''
train_cands = None
docs_and_values = []
all_docs_and_values = []
search_types = ['all_docs_and_pii', 'all_doc_and_'+search_context, 'all_pii_for_'+document_context, search_context+'_for_'+document_context]
search_type = ''
if document_context == None and search_context == None:
'''Entire KB'''
search_type = search_types[0]
elif document_context == None and search_context is not None:
''' Send entire KB '''
search_type = search_types[1]
elif document_context is not None and search_context == None:
''' Send KB for document'''
search_type = search_types[2]
else:
''' Send KB for match in Doc'''
search_type = search_types[4]
for index, item in enumerate(candidates):
train_cands = candidates.get(item).get('train_cands')
if train_cands is not None:
for instances in train_cands:
for candidate in instances:
for key, value in enumerate(candidate):
all_docs_and_values.append({
"documentName": value.context.sentence.document.name,
"page": value.context.sentence.page,
"piiFound": value.context.sentence.text
})
for item in all_docs_and_values:
if search_type == 0:
docs_and_values.append(item)
elif search_type == 1:
docs
if document_context is None:
print([])
else:
if document_context is None and search_context is None:
pass
elif document_context is None:
for item in docs_and_values:
if item.get('applicant_name_context.sentence.section.document.name') == document_context:
docs_and_values = [item]
return docs_and_values
elif search_context is None:
for item in docs_and_values:
if item.get('applicant_name_context.sentence.section.document.name') == document_context:
docs_and_values = [item]
return docs_and_values
else:
for item in docs_and_values:
# if item.get('applicant_name_context.sentence.section.document.name') == document_context:
if item.get('name') == document_context:
docs_and_values = [item]
# logging.info(f'docs_and_values: {docs_and_values}')
return docs_and_values
def train_and_test_experiment(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
span_mention = None
span_mention_list = do_get_docs_values(candidates, document_context, context_label)
if len(span_mention_list) > 0:
span_mention = span_mention_list[0]
returned_contexts = handle_return(get_context_async(span_mention, document_context, context_label), print_values)
for x in returned_contexts:
results.append(x)
else:
# TODO
pass
return results
def train_and_test(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
results = do_get_docs_values(candidates, document_context, context_label)
return results
print(json.dumps(train_and_test(document_context=doc_context, context_label=exc_context))) | [
"{[email protected]}"
] | |
38345091f0f4fc6d048048415eef8af7eff32537 | 914ca4921c114c917267214e0987ebecf30b3510 | /Programming_Practice/Python/Python_Scraping/Scraping_004/open_api_2.py | 0729766be8fd65dbcdf4e6bca7e239c092a0c883 | [] | no_license | BurnFaithful/KW | 52535030ea57f1489a0d108d599b66ffee50a1f4 | 15deb50449b8f902f623f20b97448c0f473a9342 | refs/heads/master | 2022-12-20T16:06:01.827398 | 2020-09-12T08:51:23 | 2020-09-12T08:51:23 | 294,897,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | from urllib.parse import quote
import requests
import bs4
endpoint = "http://apis.data.go.kr/B552657/ErmctInsttInfoInqireService/getParmacyListInfoInqire?"
serviceKey = "xqhT19uqLKmUuUxiUk6By%2FkUkZHlqQfalicqhc3oYnPy4KoA%2FK%2BM8EQVYGOaBBtfRMfqs6SQ1ei%2F8VPZgE6VlA%3D%3D"
Q0 = quote("서울특별시") # 한글 인코딩 메서드
# Q1 = quote("강남구")
# QT = "1"
# QN = quote("삼성약국")
ORD = "NAME"
pageNo = "1"
startPage = "1"
numOfRows = "5000"
pageSize = "10"
paramset = "serviceKey=" + serviceKey \
+ "&numOfRows=" + numOfRows \
+ "&pageSize=" + pageSize \
+ "&pageNo=" + pageNo \
+ "&startPage=" + startPage \
+ "&Q0=" + Q0 \
+ "&ORD=" + ORD #\
# + "&Q1=" + Q1 \
# + "&QT=" + QT \
# + "&QN=" + QN \
# + "&_type=json"
url = endpoint + paramset
print(url)
result = requests.get(url)
bs_obj = bs4.BeautifulSoup(result.content, "html.parser")
# print(bs_obj)
items = bs_obj.findAll("item")
count = 0
for item in items:
tagged_item = item.find("dutytime1c")
if tagged_item != None:
close_time = int(tagged_item.text)
if close_time > 2100:
count += 1
print(item.find("dutyname").text)
# print(tagged_item)
# print("서울특별시 내 월요일 9시 이후까지 하는 약국의 수 : " + str(count)) | [
"[email protected]"
] | |
b78217f75d6e278638d0e9c281e604eab9e625fd | a8750439f200e4efc11715df797489f30e9828c6 | /LeetCodeContests/93/871_minimum_refill_required.py | 8573ce433e9d77f48451ccfa85747b65c8a1c2b3 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,408 | py | '''
871. Minimum Number of Refueling Stops
User Accepted: 234
User Tried: 563
Total Accepted: 247
Total Submissions: 1546
Difficulty: Hard
A car travels from a starting position to a destination which is target miles east of the starting position.
Along the way, there are gas stations. Each station[i] represents a gas station that is station[i][0] miles east
of the starting position, and has station[i][1] liters of gas.
The car starts with an infinite tank of gas, which initially has startFuel liters of fuel in it.
It uses 1 liter of gas per 1 mile that it drives.
When the car reaches a gas station, it may stop and refuel, transferring all the gas from the station into the car.
What is the least number of refueling stops the car must make in order to reach its destination?
If it cannot reach the destination, return -1.
Note that if the car reaches a gas station with 0 fuel left, the car can still refuel there.
If the car reaches the destination with 0 fuel left, it is still considered to have arrived.
Example 1:
Input: target = 1, startFuel = 1, stations = []
Output: 0
Explanation: We can reach the target without refueling.
Example 2:
Input: target = 100, startFuel = 1, stations = [[10,100]]
Output: -1
Explanation: We can't reach the target (or even the first gas station).
Example 3:
Input: target = 100, startFuel = 10, stations = [[10,60],[20,30],[30,30],[60,40]]
Output: 2
Explanation:
We start with 10 liters of fuel.
We drive to position 10, expending 10 liters of fuel. We refuel from 0 liters to 60 liters of gas.
Then, we drive from position 10 to position 60 (expending 50 liters of fuel),
and refuel from 10 liters to 50 liters of gas. We then drive to and reach the target.
We made 2 refueling stops along the way, so we return 2.
Note:
1 <= target, startFuel, stations[i][1] <= 10^9
0 <= stations.length <= 500
0 < stations[0][0] < stations[1][0] < ... < stations[stations.length-1][0] < target
#solution by https://leetcode.com/yangzhenjian
class Solution:
def minRefuelStops(self, target, startFuel, stations):
"""
:type target: int
:type startFuel: int
:type stations: List[List[int]]
:rtype: int
"""
stations.append([target, 0])
n = len(stations)
MINF = - 10 ** 15
dp = [startFuel] + [MINF] * n
px = 0
for i, (x, f) in enumerate(stations, 1):
dp_next = [MINF] * (n + 1)
for j in range(i + 1):
if dp[j] >= x - px:
dp_next[j] = dp[j] - (x - px)
if j > 0 and dp[j-1] >= x - px:
dp_next[j] = max(dp_next[j], dp[j-1] - (x - px) + f)
px = x
dp = dp_next
for j in range(n):
if dp[j] >= 0:
return j
return -1
# cpp solutin by https://leetcode.com/shdut
#include <iostream>
#include <string>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <ctime>
#include <set>
#include <map>
#include <unordered_map>
#include <queue>
#include <algorithm>
#include <cmath>
#include <assert.h>
using namespace std;
#define vi vector<int>
#define pii pair<int,int>
#define x first
#define y second
#define all(x) x.begin(),x.end()
#define pb push_back
#define mp make_pair
#define SZ(x) int(x.size())
#define rep(i,a,b) for(int i=a;i<b;i++)
#define per(i,a,b) for(int i=b-1;i>=a;i--)
#define pi acos(-1)
#define mod 998244353 //1000000007
#define inf 1000000007
#define ll long long
#define DBG(x) cerr<<(#x)<<"="<<x<<"\n";
#define N 100010
template <class U,class T> void Max(U &x, T y){if(x<y)x=y;}
template <class U,class T> void Min(U &x, T y){if(x>y)x=y;}
template <class T> void add(int &a,T b){a=(a+b)%mod;}
int pow(int a,int b){
int ans=1;
while(b){
if(b&1)ans=1LL*ans*a%mod;
a=1LL*a*a%mod;b>>=1;
}
return ans;
}
pii a[510];
ll dp[510][510];
class Solution {
public:
int minRefuelStops(int target, int startFuel, vector<vector<int>>& stations) {
int sz=0;
a[sz++]={0,startFuel};
for(auto &o:stations)a[sz++]={o[0],o[1]};
a[sz++]={target,0};
rep(i,0,sz)rep(j,0,i+1)dp[i][j]=-1;
dp[0][0]=0;
rep(i,0,sz-1){
rep(j,0,i+1){
if(dp[i][j]>=0){
ll w=dp[i][j]-(a[i+1].x-a[i].x);
if(w>=0)Max(dp[i+1][j],w);
w+=a[i].y;
if(w>=0)Max(dp[i+1][j+1],w);
}
}
}
rep(i,0,sz)if(dp[sz-1][i]>=0)return i-1;
return -1;
}
};
'''
# my version of https://leetcode.com/shdut solution in cpp
# TLE solution
class Solution(object):
def minRefuelStops(self, target, startFuel, stations):
"""
:type target: int
:type startFuel: int
:type stations: List[List[int]]
:rtype: int
"""
size = 0
a = []
a.append((0, startFuel))
for x in stations:a.append((x[0],x[1]))
a.append((target, 0))
size = len(a)
dp = [[-1 for x in range(size+1)] for y in range(size+1)]
dp[0][0] = 0
for i in range(size-1):
for j in range(i+1):
if dp[i][j] >= 0:
w = dp[i][j] - (a[i+1][0] - a[i][0])
if w >= 0:dp[i+1][j] = max(dp[i+1][j], w)
w += a[i][1]
if w >= 0:dp[i+1][j+1] = max(dp[i+1][j+1],w)
for i in range(size):
if dp[size-1][i] >=0:return i-1
return -1
#awice solution
#
class Solution(object):
def minRefuelStops(self, target, startFuel, stations):
dp = [startFuel] + [0] * len(stations)
for i, (location, capacity) in enumerate(stations):
for t in xrange(i, -1, -1):
if dp[t] >= location:
dp[t+1] = max(dp[t+1], dp[t] + capacity)
for i, d in enumerate(dp):
if d >= target: return i
return -1
class Solution(object):
def refuelStops(self, target, tank, stations):
pq = [] # A maxheap is simulated using negative values
stations.append((target, float('inf')))
ans = prev = 0
for location, capacity in stations:
tank -= location - prev
while pq and tank < 0: # must refuel in past
tank += -heapq.heappop(pq)
ans += 1
if tank < 0: return -1
heapq.heappush(pq, -capacity)
prev = location
return ans
print(Solution().minRefuelStops(1, 1, []))
| [
"[email protected]"
] | |
95223a9cb75f866ad335207924223e191993dc69 | 71e18daf9e567792a6ce1ae243ba793d1c3527f0 | /ApplicationsAlgo/maze.py | 67d5c62ac216e8e824085d0bc616c55b195917da | [] | no_license | ohjooyeong/python_algorithm | 67b18d92deba3abd94f9e239227acd40788140aa | d63d7087988e61bc72900014b0e72603d0150600 | refs/heads/master | 2020-06-22T18:10:41.613155 | 2020-05-27T13:12:03 | 2020-05-27T13:12:03 | 197,767,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | def solve_maze(g, start, end):
qu = [] # 기억 장소 1: 앞으로 처리해야할 이동 경로를 큐에 저장
done = set() # 기억 장소 2: 이미 큐에 추가한 꼭짓점들을 집합에 기록
qu.append(start)
done.add(start)
while qu: #큐에 처리할 경로가 남아있으면
p = qu.pop(0) # 큐에서 처리 대상을 꺼냄
v = p[-1] # 큐에 저장된 이동 경로의 마지막 문자가 현재 처리해야할 꼭짓점
print('---')
print('p :', p)
print('v :', v)
if v == end: # 처리해야할 꼭짓점이 도착점이면 종료
return p #지금까지의 전체 이동 경로를 돌려주고 종료
for x in g[v]: # 대상 꼭짓점에 연결된 꼭짓점들 중에
if x not in done: # 아직 큐에 추가된 적이 없는 꼭짓점을
qu.append(p + x) # 이동 경로에 새 꼭짓점으로 추가하여 큐에 저장하고
done.add(x) #집합에도 추가
print('qu: ', qu)
return "?"
maze = {
'a': ['e'],
'b': ['c', 'f'],
'c': ['b', 'd'],
'd': ['c'],
'e': ['a', 'i'],
'f': ['b', 'g', 'j'],
'g': ['f', 'h'],
'h': ['g', 'l'],
'i': ['e', 'm'],
'j': ['f', 'k', 'n'],
'k': ['j', 'o'],
'l': ['h', 'p'],
'm': ['i', 'n'],
'n': ['m', 'j'],
'o': ['k'],
'p': ['l']
}
print(solve_maze(maze, 'a', 'p')) | [
"[email protected]"
] | |
e507584e2e9212c610c211af0fbe2ff4b93f932e | 5173c3e3956387a3f2ae8fcf4aed7c7a600dac78 | /Programmers/Programmers_카펫.py | 3db67116d2c77a7a87b7fe9e13797f0a6d27976f | [] | no_license | ma0723/Min_Algorithm | df75f53f6e89b7817d4b52d686effb8236a4ddac | b02d1043008cb32e22daa9d4207b9a45f111d66f | refs/heads/master | 2023-07-25T11:00:15.397093 | 2021-08-30T02:08:05 | 2021-08-30T02:08:05 | 375,613,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | def solution(brown, yellow):
answer = []
# 노란색과 갈색으로 색칠된 격자의 개수
y_lst = []
for i in range(1, yellow+1):
# 1부터 yellow 개수까지
if yellow%i==0:
# 노란색 직사각형 경우의 약수들의 집합 (나누어 떨어지는 경우)
# 24 (1, 24) (2, 12) (3, 8) (4, 6) 등 중복 제거
row = yellow//i
col = i
if row >= col:
# 가로 길이는 세로 길이와 같거나, 세로 길이보다 깁니다
y_lst.append([row, col])
for i in y_lst:
b_row = (i[0] + 2)*2
# 가로 양쪽 가로+2만큼 2번씩
b_col = i[1]*2
# 세로 양쪽 노란색 세로만큼 2번씩
if b_row + b_col == brown:
# 테두리 1줄은 갈색으로 칠해져 있는 격자 모양 카펫
answer = [b_row//2, b_col//2+2]
# 카펫의 가로(2로 나누기), 세로 크기(2로 나누고 위아래 +1씩 총 +2) | [
"[email protected]"
] | |
ecdd5dd43ced15b6ba50c76d6d12a296b7c3b2dc | 529e713a78e82de2ae5d44cfb8ef209e0894d72a | /arcade-platformer/arcade_platformer/01_game_skeleton.py | 862af40f1113621b43a6afd046e81429f8a5f7f8 | [
"MIT"
] | permissive | realpython/materials | cd2f548276be2c82f134ca03eadb1cd279e0f26e | d2d62756d3854f54a12a767f2bf9470486c0ceef | refs/heads/master | 2023-09-05T22:12:29.806738 | 2023-08-31T20:56:28 | 2023-08-31T20:56:28 | 132,374,697 | 4,678 | 6,482 | MIT | 2023-09-12T22:22:06 | 2018-05-06T20:46:18 | HTML | UTF-8 | Python | false | false | 1,217 | py | """
Arcade Platformer
Demonstrating the capbilities of arcade in a platformer game
Supporting the Arcade Platformer article on https://realpython.com
All game artwork from www.kenney.nl
Game sounds and tile maps by author
"""
import arcade
class Platformer(arcade.Window):
def __init__(self):
pass
def setup(self):
"""Sets up the game for the current level"""
pass
def on_key_press(self, key: int, modifiers: int):
"""Processes key presses
Arguments:
key {int} -- Which key was pressed
modifiers {int} -- Which modifiers were down at the time
"""
def on_key_release(self, key: int, modifiers: int):
"""Processes key releases
Arguments:
key {int} -- Which key was released
modifiers {int} -- Which modifiers were down at the time
"""
def on_update(self, delta_time: float):
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
pass
def on_draw(self):
pass
if __name__ == "__main__":
window = Platformer()
window.setup()
arcade.run()
| [
"[email protected]"
] | |
10460745daf408d4f3cb18983ec6bad8fdd4a296 | 88bf6991bc8f291e16b792df729d58d7eeee1b2b | /proteome_tools_data/prep.py | ff3521cb31d81fe8771efab91ef33a15cf6b2e0c | [] | no_license | MatteoLacki/proteome_tools_data | 7ac3c335831911adab116cf6ce0bb08e785e733c | 84101923f95787d1ac48e47101b94b22b3301667 | refs/heads/master | 2020-08-05T02:21:06.805640 | 2019-10-29T12:16:01 | 2019-10-29T12:16:01 | 212,360,930 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | """Prepartion of files done on Linux. Execution on Windows."""
from pathlib import Path, PureWindowsPath as PWP
import pandas as pd
import json
import re
pd.set_option('display.max_rows', 4)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_colwidth', -1)#display whole column without truncation
net = Path("/mnt/ms/restoredData/proteome_tools/net/")
ms_win = Path("//MSSERVER")
def iter_raw_folders(net):
for comp in ("idefix", "synapt"):
yield from net.glob('{}/WIRD_GESICHERT/*/*.raw'.format(comp))
res = pd.DataFrame({'Raw_File': p.stem,
'path': str(ms_win/"/".join(p.parts[3:]))
} for p in iter_raw_folders(net))
# project description
data_on_project = Path('/home/matteo/Projects/proteome_tools')
plates = pd.read_excel(data_on_project/"Sample_RAWFile_List.xlsx")
plates.columns = [p.replace(' ','_') for p in plates.columns]
plates = plates.iloc[:,0:4]
DDA = plates[plates.MS_Methode.str.contains('DDA')]
plates = plates[~plates.MS_Methode.str.contains('DDA')].copy()
def pad(s, k, v='0'):
"""Pad stringv s to the left with v to match length k."""
return v*(k-len(s)) + s
def get_fasta_file(s):
"""Get the name of the fasta file from the sample name."""
f = s.split('-')[-1]
return pad(f, 3) + '.fasta'
fastas_pool_1 = ms_win/"restoredData/proteome_tools/automation/db_jorg_pool1"
fastas_pool_2 = ms_win/"restoredData/proteome_tools/automation/db_jorg_pool2"
fasta_paths = {'Pools Plate 1': fastas_pool_1,
'Pools Plate 2': fastas_pool_2,
'missing first Plate 2': Path(''),
'Second Pool Plate 1': ms_win/"restoredData/proteome_tools/automation/db_jorg_pool2",
'Second Pool Plate 2': ms_win/"restoredData/proteome_tools/automation/db_jorg_pool2",
'Third Pool Plate 2': Path('')}
plates['parsed_name'] = [re.sub(' \d\d\d\d-\d\d\d-\d+','', sn).replace('TUM ','') for sn in plates.Sample_Name]
counts = Counter(plates.parsed_name)
plates['fasta_file'] = plates.Sample_Name.apply(get_fasta_file)
plates['fasta_fold'] = plates.parsed_name.map(fasta_paths)
plates['fasta_file'] = [ff/f for ff, f in zip(plates.fasta_fold, plates.fasta_file)]
plates = plates.merge(res, 'left', validate='one_to_one')
plates['top_fold'] = [Path(p).parent.name + '/' + Path(p).name for p in plates.path]
plates = plates.set_index('Raw_File')
pool1_bothplates = plates[plates.Sample_Name.str.contains('-054-')]
pool2_bothplates = plates[plates.Sample_Name.str.contains('-086-')]
db2 = set(p.name for p in Path("/mnt/ms/restoredData/proteome_tools/automation/db_jorg_pool2").glob("*.fasta"))
assert all(p.name in db2 for p in pool2_bothplates.fasta_file), "Some fasta files are missing."
# COMPARING WITH THE OLD LIST
# with (data_on_project/'plate1.json').open('r', encoding ="utf-8") as f:
# plate1 = json.load(f)
# analysed = {Path(p).stem for p,f in plate1}
# A = plates.loc[analysed]
# A_ok = A[A.Sample_Name.str.contains('-054-')]
# '127' in {Path(f).stem for f in A_ok.fasta_file}
# with (data_on_project/'good_files.json').open('w', encoding ="utf-8") as f:
# json.dump(list(A_ok.top_fold), f, indent=2)
pool1 = list(zip(pool1_bothplates.path, (str(f) for f in pool1_bothplates.fasta_file)))
pool2 = list(zip(pool2_bothplates.path, (str(f) for f in pool2_bothplates.fasta_file)))
with (data_on_project/'pool1.json').open('w', encoding ="utf-8") as f:
json.dump(pool1, f, indent=4)
with (data_on_project/'pool2.json').open('w', encoding ="utf-8") as f:
json.dump(pool2, f, indent=4)
net_folder = Path('/mnt/ms/users/Matteo/poligono')
# {Path(p).stem for p,f in pool2 if Path(p).stem[0] == 'S'}
# copy fasta files to the existing folders
| [
"[email protected]"
] | |
824d7fa924c868bae983b6fa5132365e22b602c5 | 1ecde4178548f331f15717f245e3f657b58b9993 | /yyx_crawler/scrapySchool_England/scrapySchool_England/spiders/UniversityOfSurrey_R.py | c9aa92d5ecee52a7474f655775a5c8c3f24da68f | [] | no_license | gasbarroni8/python_spider | 296dcb7c3fd9dd028423fe5ec0a321d994478b15 | 7935fa462926bc8ea9bf9883bd15265dd0d3e6df | refs/heads/master | 2023-03-26T05:22:59.858422 | 2019-04-15T07:17:56 | 2019-04-15T07:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,014 | py | import scrapy
import re
import json
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapySchool_England.clearSpace import clear_space, clear_lianxu_space, clear_space_str
from scrapySchool_England.items import ScrapyschoolEnglandItem1
from scrapySchool_England.getItem import get_item1
from scrapySchool_England.getTuition_fee import getTuition_fee
from scrapySchool_England.getIELTS import get_ielts
from scrapySchool_England.getStartDate import getStartDate
from scrapySchool_England.remove_tags import remove_class
from scrapySchool_England.getDuration import getIntDuration, getTeachTime
import requests
from lxml import etree
class UniversityOfSurrey_RSpider(scrapy.Spider):
name = "UniversityOfSurrey_R"
start_urls = ["https://www.surrey.ac.uk/postgraduate/research"]
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3472.3 Safari/537.36"}
def parse(self, response):
links = response.xpath("//div[@class='view-content']/div//a/@href").extract()
# print(len(links))
links = list(set(links))
# print(len(links))
for link in links:
url = "https://www.surrey.ac.uk" + link
# print(url)
yield scrapy.Request(url, callback=self.parse_data)
def parse_data(self, response):
item = get_item1(ScrapyschoolEnglandItem1)
item['university'] = "University of Surrey"
item['url'] = response.url
# 授课方式
item['teach_type'] = 'phd'
# 学位类型
item['degree_type'] = 3
print("===============================")
print(response.url)
try:
# 专业、学位类型
programme_en = response.xpath("//h1[@class='text-center my-0']//text()").extract()
programme_en_list = ''.join(programme_en).split("\n")
# print(programme_en_list)
if len(programme_en_list) > 1:
item['programme_en'] = programme_en_list[0].strip()
item['degree_name'] = ''.join(programme_en_list[1:]).strip()
# print("item['programme_en'] = ", item['programme_en'])
# print("item['degree_name'] = ", item['degree_name'])
overview = response.xpath(
"//*[contains(text(),'Course facts')]/../preceding-sibling::*").extract()
item['overview_en'] = remove_class(clear_lianxu_space(overview))
# print("item['overview_en'] = ", item['overview_en'])
teach_time = response.xpath("//td[@headers='view-field-study-mode-table-column'][contains(text(),'Full-time')]//text()").extract()
item['teach_time'] = getTeachTime(''.join(teach_time))
# print("item['teach_time'] = ", item['teach_time'])
duration = response.xpath("//td[@headers='view-field-study-mode-table-column'][contains(text(),'Full-time')]/following-sibling::*[1]//text()").extract()
clear_space(duration)
# print(duration)
if len(duration) != 0:
duration_list = getIntDuration(''.join(duration))
# print("duration_list: ", duration_list)
if len(duration_list) == 2:
item['duration'] = duration_list[0]
item['duration_per'] = duration_list[-1]
# print("item['duration'] = ", item['duration'])
# print("item['duration_per'] = ", item['duration_per'])
start_date = response.xpath(
"//td[@headers='view-field-study-mode-table-column'][contains(text(),'Full-time')]/following-sibling::*[last()]//text()").extract()
# print("start_date: ", start_date)
item['start_date'] = getStartDate(''.join(start_date))
# print("item['start_date'] = ", item['start_date'])
item['location'] ='01SE01, Senate House, University of Surrey, Guildford, Surrey GU2 7XH'
# print("item['location'] = ", item['location'])
career = response.xpath("//h2[contains(text(),'Professional development')]/preceding-sibling::*[1]/following-sibling::*[position()<last()-1]").extract()
if len(career) == 0:
career = response.xpath("//h2[contains(text(),'Career prospects')]/preceding-sibling::*[1]/following-sibling::*[position()<last()-1]").extract()
if len(career) == 0:
career = response.xpath(
"//h2[contains(text(),'Graduate prospects')]/preceding-sibling::*[1]/following-sibling::*[position()<last()-1]").extract()
# print(career)
item['career_en'] = remove_class(clear_lianxu_space(career))
print("item['career_en'] = ", item['career_en'])
modules = "<h2>Research themes</h2>"
modules1 = response.xpath("//h2[contains(text(),'Research themes')]/following-sibling::*[1]").extract()
item['modules_en'] = modules + remove_class(clear_lianxu_space(modules1))
print("item['modules_en'] = ", item['modules_en'])
entry_requirements = response.xpath("//div[@id='entry-collapse']/*//text()").extract()
item['rntry_requirements'] = clear_lianxu_space(entry_requirements)
# print("item['rntry_requirements'] = ", item['rntry_requirements'])
ielts_str = response.xpath("//h2[contains(text(),'English language requirements')]/following-sibling::p[position()<4]//text()").extract()
ielts_re = re.findall(r"^IELTS.{1,80}", ''.join(ielts_str))
# print(ielts_re)
item['ielts_desc'] = ''.join(ielts_re)
print("item['ielts_desc'] = ", item['ielts_desc'])
ieltsDict = get_ielts(item['ielts_desc'])
item['ielts'] = ieltsDict.get("IELTS")
item['ielts_l'] = ieltsDict.get("IELTS_L")
item['ielts_s'] = ieltsDict.get("IELTS_S")
item['ielts_r'] = ieltsDict.get("IELTS_R")
item['ielts_w'] = ieltsDict.get("IELTS_W")
print("item['IELTS'] = %sitem['IELTS_L'] = %sitem['IELTS_S'] = %sitem['IELTS_R'] = %sitem['IELTS_W'] = %s==" % (
item['ielts'], item['ielts_l'], item['ielts_s'], item['ielts_r'], item['ielts_w']))
tuition_fee = response.xpath("//div[@id='fees-collapse']//td[@headers='view-field-study-mode-table-column--2'][contains(text(),'Full-time')]/following-sibling::*[last()]//text()").extract()
# print(tuition_fee)
if len(tuition_fee) > 0:
item['tuition_fee'] = int(''.join(tuition_fee[0]).replace("£", "").replace(",", "").strip())
item['tuition_fee_pre'] = "£"
print("item['tuition_fee'] = ", item['tuition_fee'])
print("item['tuition_fee_pre'] = ", item['tuition_fee_pre'])
how_to_apply_url = response.xpath(
"//span[@class='studymode'][contains(text(), 'Full-time')]/following-sibling::span[@class='applink']/a/@href").extract()
if len(how_to_apply_url) > 0:
how_to_apply_url = ''.join(how_to_apply_url[0])
# print(how_to_apply_url)
item['apply_proces_en'] = self.parse_apply_proces_en(how_to_apply_url)
print("item['apply_proces_en'] = ", item['apply_proces_en'])
# https://www.surrey.ac.uk/china/entry-requirements
item['require_chinese_en'] = """"""
department_dict = {}
department1_list = ["Criminology", "Criminology and Sociology", "Law with Criminology", "Media, Culture and Society", "Media Studies with Film Studies", "Politics and Sociology", "Sociology", "Criminology and Social Research", "Criminology and Social Research (Corporate Crime and Corporate Responsibility)", "Criminology and Social Research (Cybercrime and Cybersecurity)", "Social Research Methods", "Sociology", "Economics", "Business Economics", "Economics and Finance", "Economics and thetics", "Economics", "Business Economics and Finance", "Economics", "Economics and Finance", "International Economics, Finance and Development", "Economics (Four Year)", "Law", "Law with Criminology", "Law with International Relations", "International Commercial Law", "Law", "Accounting and Finance", "Business and Retail nagement", "Business nagement", "Business nagement (Entrepreneurship)", "Business nagement (HRM)", "Business nagement (rketing)", "International Business nagement", "Accounting and Finance", "Business Administration", "Business Analytics", "Corporate Finance", "Entrepreneurship", "Hun Resources nagement", "International Business nagement", "International Financial nagement", "International rketing nagement", "International Retail rketing in the Digital Environment", "Investment nagement", "nagement Education", "rketing nagement", "Occupational and Organizational Psychology", "Operations and Supply Chain in the Digital Era", "nagement and Business", "Creative Music Technology", "Digital Media Arts", "Film and Video Production Technology", "Music", "Music and Sound Recording (Tonmeister)", "Music (Composition)", "Music (Conducting)", "Music (Creative Practice)", "Music (Musicology)", "Music (Perfornce)", "Digital Media Arts", "Music", "Sound Recording", "English Literature with Politics", "International Relations", "Politics", "Politics and Economics", "Politics and Sociology", "Public Affairs", "International Relations", "Public Affairs", "International Event nagement", "International Hospitality and Tourism nagement", "International Hospitality nagement", "International Tourism nagement", "Air Transport nagement", "International Events nagement", "International Events nagement (Eurosters)", "Eurosters", "International Hospitality nagement (Eurosters)", "International Hotel nagement", "International Tourism nagement", "International Tourism nagement (Eurosters)", "Eurosters", "Strategic Hotel nagement", "Strategic Tourism nagement and rketing", "Hospitality and Tourism nagement", "English Literature", "English Literature and French", "English Literature and Gern", "English Literature and Spanish", "English Literature with Creative Writing", "English Literature with Film Studies", "English Literature with Politics", "English Literature with Sociology", "Creative Writing", "Creative Writing", "English Literature", "Creative Writing", "English Literature", "Business nagement and French", "Business nagement and Gern", "Business nagement and Spanish", "English Literature and French", "English Literature and Gern", "English Literature and Spanish", "Modern Languages (French and Gern)", "Modern Languages (French and Spanish)", "Modern Languages (Gern and Spanish)", "Communication and International rketing", "Intercultural Communication with International Business", "Interpreting", "Interpreting (Chinese Pathway)", "Teaching English to Speakers of Other Languages (TESOL)", "Translation", "Translation and Interpreting", "Translation and Interpreting Studies", "Film Studies", "Linguistics", "Literary and Cultural Studies", "Translation and Interpreting", "Acting", "Actor-Musician", "Dance", "Musical Theatre", "Theatre", "Theatre and Perfornce", "Theatre Production", "Acting", "Musical Theatre", "Stage and Production nagement", "Theatre", "Acting", "Musical Theatre", "Dance", "Theatre",]
department1_list = list(set(department1_list))
department1_value = "Faculty of Arts and Social Sciences"
for d in department1_list:
department_dict[d.lower()] = department1_value
department2_list = ["Practitioner Doctorate in Sustainability", "Environment and Sustainability", "Corporate Environmental Management", "Environmental Strategy", "Sustainable Development", "Chemistry", "Chemistry", "Chemistry", "Chemistry with Forensic Investigation", "Medicinal Chemistry", "Mathematics", "Mathematics with Statistics", "Mathematics with Music", "Financial Mathematics", "Mathematics and Physics", "Economics and Mathematics", "Mathematics", "Mathematics and Physics", "Physics", "Physics with Astronomy", "Physics with Nuclear Astrophysics", "Physics with Quantum Technologies", "Medical Physics", "Nuclear Science and Applications", "Physics", "Radiation and Environmental Protection", "Physics", "Information Systems", "Information Security", "Advanced Materials", "Biomedical Engineering",]
department2_list = list(set(department2_list))
department2_value = "Faculty of Engineering and Physical Sciences"
for d in department2_list:
department_dict[d.lower()] = department2_value
department3_list = ["Nutrition", "Nutrition and Dietetics", "Nutrition and Food Science", "Human Nutrition", "Nutritional Medicine", "International English Language Testing System (IELTS)", "Developmental Psychology in Research and Practice", "Health Psychology", "Psychology (Conversion)", "Primary and Community Care (SPA Community Children's Nursing)", "Primary and Community Care (SPA District Nursing)", "Primary and Community Care (SPA General Practice Nursing)", "Public Health Practice (SCPHN Health Visiting)", "Public Health Practice (SCPHN School Nursing)", "Advanced Clinical Practice", "Advanced Practitioner (Primary and Community Care)", "Advanced Practitioner (Public Health Practice)", "Education for Health Professionals", "Education for Professional Practice", "Healthcare Practice", "Leadership and Healthcare", "Physician Associate", "Primary and Community Care (SPA Community Children's Nursing)", "Primary and Community Care (SPA District Nursing)", "Primary and Community Care (SPA General Practice Nursing)", "Public Health Practice (SCPHN Health Visiting)", "Public Health Practice (SCPHN School Nursing)",]
department3_list = list(set(department3_list))
department3_value = "Faculty of Health and Medical Sciences"
for d in department3_list:
department_dict[d.lower()] = department3_value
item['department'] = department_dict.get(item['programme_en'].lower())
print("item['department: ", item['department'])
yield item
except Exception as e:
with open(item['university'] + str(item['degree_type']) + ".txt", 'a+', encoding="utf-8") as f:
f.write(str(e) + "\n" + response.url + "\n========================")
print("异常:", str(e))
print("报错url:", response.url)
def parse_apply_proces_en(self, how_to_apply_url):
data = requests.get(how_to_apply_url, headers=self.headers)
response = etree.HTML(data.text)
# print(response)
apply_proces_en = response.xpath("//div[@class='layout-row intro summary']")
# 将Element转换成HTML格式
apply = etree.tostring(apply_proces_en[0], encoding='unicode', pretty_print=False, method='html')
apply = remove_class(clear_space_str(apply))
return apply
| [
"[email protected]"
] | |
36452b274c24a8d85cfad2937c0a5943990eea13 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/surface/access_context_manager/levels/update.py | f22234640a48085e7e67ec5bc155d8fda74563b6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 2,453 | py | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud access-context-manager levels update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.accesscontextmanager import levels as levels_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.accesscontextmanager import levels
from googlecloudsdk.command_lib.accesscontextmanager import policies
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateLevelsGA(base.UpdateCommand):
"""Update an existing access level."""
_API_VERSION = 'v1'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1')
@staticmethod
def ArgsVersioned(parser, version='v1'):
levels.AddResourceArg(parser, 'to update')
levels.AddLevelArgs(parser, version=version)
levels.AddLevelSpecArgs(parser, version=version)
def Run(self, args):
client = levels_api.Client(version=self._API_VERSION)
level_ref = args.CONCEPTS.level.Parse()
policies.ValidateAccessPolicyArg(level_ref, args)
mapper = levels.GetCombineFunctionEnumMapper(version=self._API_VERSION)
combine_function = mapper.GetEnumForChoice(args.combine_function)
return client.Patch(
level_ref,
description=args.description,
title=args.title,
combine_function=combine_function,
basic_level_conditions=args.basic_level_spec)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateLevelsBeta(UpdateLevelsGA):
_API_VERSION = 'v1beta'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1beta')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateLevelsAlpha(UpdateLevelsGA):
_API_VERSION = 'v1alpha'
@staticmethod
def Args(parser):
UpdateLevelsGA.ArgsVersioned(parser, version='v1alpha')
| [
"[email protected]"
] | |
ed024b233a743c82860ea0b7ae492792813afcad | 3b5fcac24cff965124c18f3327a4b105bfaeac1c | /forms/forms/formsApp/urls.py | bd5071c1cd1244e7b79377906135872de6144685 | [] | no_license | Sanil2108/django | ef75e06970982da02d4e65d7e0f689be90d5ec03 | 21e546f6f7d63cc859ea23cec8fb4a15a82541f9 | refs/heads/master | 2021-06-26T01:09:03.533520 | 2020-12-05T19:22:17 | 2020-12-05T19:22:17 | 176,133,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from django.urls import path
from . import views
urlpatterns = [
path('form1/', views.form1, name = 'form1'),
] | [
"[email protected]"
] | |
228600e7a43d9390eb83a4bc4a96de585a9e6f59 | a7596165a29e5186bc6c4718e3b6e835939b105d | /desktop/libs/libsolr/src/libsolr/conf.py | dbc65d6d5d35fa9bcc38744608dcae449b9ad21d | [
"Apache-2.0"
] | permissive | lockhart39/HueQualityAndIngestionApp | f0c778665f0fbe699ec30e0df5e9f3ed8a9c3384 | c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c | refs/heads/master | 2021-08-20T00:31:29.481333 | 2017-11-27T19:22:16 | 2017-11-27T19:22:16 | 112,237,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _t
from desktop.lib.conf import Config, coerce_bool
from desktop.conf import default_ssl_validate
SSL_CERT_CA_VERIFY = Config(
key="ssl_cert_ca_verify",
help=_t("In secure mode (HTTPS), if Solr SSL certificates have to be verified against certificate authority"),
dynamic_default=default_ssl_validate,
type=coerce_bool
)
SOLR_ZK_PATH = Config(
key="solr_zk_path",
help=_t("Default path to Solr in ZooKeeper"),
default='/solr',
type=str
)
| [
"[email protected]"
] | |
c39fd1ca1f556f4ee4250827d5a78b0030d68abf | 6da0547afcecb3444d0b429161e46bc5a38e14ab | /demo_project/urls.py | 5e53e90908206cd1f6045e53f5f990be0f0c1e38 | [] | no_license | rashidhamid139/DjangoCustomAdmin | 53e440071a31d3ab6cff308c404ba89cc40cd6a0 | 3b01f38201754ae1370684cafb58f4005e5c8b35 | refs/heads/master | 2022-06-17T10:15:34.213179 | 2020-05-11T15:12:09 | 2020-05-11T15:12:09 | 261,787,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('email/', include('sendemail.urls')),
path('posts/', include('posts.urls')),
path('', include('pages.urls')),
path('users/', include('users.urls')),
path('accounts/', include('allauth.urls')),
path('payment/', include('payment.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"[email protected]"
] | |
171cc337c57713480ffad953a757cb65ff9424ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03646/s301674634.py | c91b7b40947c4be6233524eaca5442719d6c84a0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import sys
def main():
input = sys.stdin.readline
K=int(input())
N=max(2,min(50,K))
ans=[i+K//N for i in range(N)]
m=K%N
for i in range(m):
for j in range(N):
if i==j: ans[j]+=N
else: ans[j]-=1
print(N)
print(*ans)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
e89f1bc3975c4e316d484945057a258863a59f6e | dd35833bead7de2f2ca7affd985ac0d345b2ab6e | /apps/useroperation/views.py | 57349580d24bee20652e0332cd757dbe46a503ec | [] | no_license | lhsheild/MxShop | df14c11aa7457f304194ff099a35869d83f0d9a7 | 811be4dad55284e737c80ebd4d00c079837393f2 | refs/heads/master | 2020-05-27T16:23:00.578686 | 2019-09-10T08:09:01 | 2019-09-10T08:09:01 | 188,130,934 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,877 | py | from rest_framework.authentication import SessionAuthentication
from rest_framework.mixins import CreateModelMixin, DestroyModelMixin, ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.permissions import IsOwnerOrReadOnly
from .models import UserFav, UserLeavingMessage, UserAddress
from .serializers import UserFavSerializer, UserFavDetailSerializer, UserLeavingMessageSerializer, UserAddressSerializer
# Create your views here.
class UserFavViewset(CreateModelMixin, DestroyModelMixin, ListModelMixin, RetrieveModelMixin, GenericViewSet):
"""
list:获取用户收藏列表
retrieve:判断某个商品是否已经收藏
create:收藏商品
"""
# queryset = UserFav.objects.all()
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
# serializer_class = UserFavSerializer
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
lookup_field = 'goods_id'
def perform_create(self, serializer): # 商品收藏数修改,也可以用信号量实现
instance = serializer.save()
goods = instance.goods
goods.fav_num += 1
goods.save()
def perform_destroy(self, instance): # 商品收藏数删减,也可以用信号量实现
goods = instance.goods
goods.fav_num -= 1
goods.save()
instance.delete()
def get_queryset(self):
return UserFav.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'list':
return UserFavDetailSerializer
elif self.action == 'create':
return UserFavSerializer
return UserFavSerializer
class LeavingMessageViewset(ListModelMixin, CreateModelMixin, RetrieveModelMixin, DestroyModelMixin, GenericViewSet):
"""
list:获取用户留言
create:添加留言
destroy:删除留言
"""
serializer_class = UserLeavingMessageSerializer
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
def get_queryset(self):
return UserLeavingMessage.objects.filter(user=self.request.user)
class AddressViewset(ModelViewSet):
"""
收获地址管理
list:获取收货地址列表
create:新建收获地址
destroy:删除收货地址
update:更新收货地址
retrieve:获取详细收获地址
"""
serializer_class = UserAddressSerializer
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
def get_queryset(self):
return UserAddress.objects.filter(user=self.request.user)
| [
"[email protected]"
] | |
56885001d9dcf999407005b8ba85693c10b09566 | bd0dc9a8d24863f7353c4124ce7e3c6b25e94910 | /test/test_sampling.py | e91fe37a33293f41c5d7049f2544d29df6d104b3 | [
"BSD-3-Clause"
] | permissive | qbektrix/profiling | 6cdc7a07a10955e993988217a720509bd4b961c4 | 89d1bc572c2502e02aeb822134453fd8d228e526 | refs/heads/master | 2021-01-21T00:53:25.880636 | 2015-08-26T15:00:39 | 2015-08-26T15:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | # -*- coding: utf-8 -*-
from __future__ import division
import sys
import pytest
from profiling.sampling import SamplingProfiler
from profiling.sampling.samplers import ItimerSampler, TracingSampler
from utils import find_stats, spin
def spin_100ms():
spin(0.1)
def spin_500ms():
spin(0.5)
def _test_sampling_profiler(sampler):
profiler = SamplingProfiler(top_frame=sys._getframe(), sampler=sampler)
with profiler:
spin_100ms()
spin_500ms()
stat1 = find_stats(profiler.stats, 'spin_100ms')
stat2 = find_stats(profiler.stats, 'spin_500ms')
ratio = stat1.deep_hits / stat2.deep_hits
# 1:5 expaected, but tolerate (0.8~1.2):5
assert 0.8 <= ratio * 5 <= 1.2
@pytest.mark.flaky(reruns=10)
def test_itimer_sampler():
_test_sampling_profiler(ItimerSampler(0.0001))
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler():
_test_sampling_profiler(TracingSampler(0.0001))
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler_does_not_sample_too_often():
# pytest-cov cannot detect a callback function registered by
# :func:`sys.setprofile`.
class fake_profiler(object):
samples = []
@classmethod
def sample(cls, frame):
cls.samples.append(frame)
@classmethod
def count_and_clear_samples(cls):
count = len(cls.samples)
del cls.samples[:]
return count
sampler = TracingSampler(0.1)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 0
spin(0.5)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
def test_not_sampler():
with pytest.raises(TypeError):
SamplingProfiler(sampler=123)
def test_sample_1_depth():
frame = sys._getframe()
while frame.f_back is not None:
frame = frame.f_back
assert frame.f_back is None
profiler = SamplingProfiler()
profiler.sample(frame)
| [
"[email protected]"
] | |
0b7bab2ef4cad9a4121b6af03e00f73434566b2c | 69be26f4fd44ed3bac1c9dd0941e435b2b2728af | /backend/products/urls.py | b93ad9e8922f0977eae8878c66380ab40a41d404 | [] | no_license | mahidulmoon/djreact-e-commerce | 0078598ab2327e4ef5b992c1fd8f202aca4c705e | 0fb7d50b408710d90af43db9326d9fff6b03804f | refs/heads/master | 2023-02-15T10:55:14.830589 | 2020-05-20T16:45:27 | 2020-05-20T16:45:27 | 265,608,114 | 1 | 0 | null | 2021-01-06T02:55:27 | 2020-05-20T15:28:50 | JavaScript | UTF-8 | Python | false | false | 293 | py | from django.urls import path,include
from rest_framework import routers
from .views import ShirtViewset,MobileViewset
router = routers.DefaultRouter()
router.register('shirtlist',ShirtViewset)
router.register('mobilelist',MobileViewset)
urlpatterns = [
path('',include(router.urls)),
]
| [
"[email protected]"
] | |
300d36bfc3dda2289fcb3e130725c170a3571471 | babe7369133362ba58d0fa12e2b5b7e5535cc78a | /venv/bin/django-admin.py | a189e715b9c5bed3d2c43dc06505ebd3b9d33938 | [] | no_license | bradykim7/blog | 4391c7762997f41e750aa6929fb55cf232be1f3e | 585ce3af28b5afdc5b137b956c4143e523ce36bd | refs/heads/main | 2023-04-08T05:18:59.683323 | 2021-04-12T07:31:11 | 2021-04-12T07:31:11 | 296,575,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!/Users/mskim/study/pythonWork/config/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
2c6e7507e4d055e36773baca642ec3c3dab8cc84 | 5a4d5ee624b375ece06fda1467afe18beb69c14b | /Algorithm/BOJ/12865.knapsack.py | 2825339abbe9002cecb181cc88098d9982880547 | [] | no_license | Knightofcydonia51/TIL | cd10dab949659bc827118ee42b25d926336dce23 | 78d7e8617f4abed9932a557c12e68bd950f8230d | refs/heads/master | 2022-12-26T00:10:06.262200 | 2022-05-26T01:12:32 | 2022-05-26T01:12:32 | 195,938,010 | 0 | 0 | null | 2022-12-16T01:03:09 | 2019-07-09T05:22:49 | Python | UTF-8 | Python | false | false | 549 | py | import sys
sys.stdin=open('12865.knapsack.txt')
N,K=map(int,input().split())
stuffs=[list(map(int,input().split())) for x in range(N)]
dp=[[0 for x in range(K+1)]for y in range(N+1)]
# N : 물건 개수 K : 배낭 크기
for k in range(1,N+1):
for j in range(K+1):
# j가 k번째 물건의 무게와 같아지기 시작할 때부터 끝까지
weight,value=stuffs[k-1]
if j>=stuffs[k-1][0]:
dp[k][j]=max(dp[k-1][j-weight]+value,dp[k-1][j])
else:
dp[k][j]=dp[k-1][j]
print(max(dp[-1]))
| [
"[email protected]"
] | |
f8191ca72ddf845194adef5e0ffa2088accb0580 | a0784b1a66a6c1a89ee8a75e32cd48d2c168931b | /extras/tools/rst_tokens.py | 9c3614127a376786a554d7277a8ff3cbca89df9c | [
"MIT"
] | permissive | cltrudeau/purdy | ebe5d8b556dadc0a4eb04018826c066b83617f71 | 4ff2d5b33771266d46260ee9ba6503bb4895ab2f | refs/heads/master | 2023-07-08T08:23:08.409053 | 2023-06-29T21:37:29 | 2023-06-29T21:37:29 | 210,162,520 | 10 | 3 | MIT | 2021-03-10T21:55:26 | 2019-09-22T14:40:17 | Python | UTF-8 | Python | false | false | 645 | py | #!/usr/bin/env python
import argparse
from pygments.lexers.markup import RstLexer
# =============================================================================
parser = argparse.ArgumentParser(description=('Prints out the tokens '
'generated by pygments.lexers.markup.RstLexer'))
parser.add_argument('files', type=str, nargs='+',
help='One or more file names to lex and parse')
args = parser.parse_args()
# --- Do the parsing
lexer = RstLexer()
with open(args.files[0]) as f:
contents = f.read()
for token, text in lexer.get_tokens(contents):
if text == '\n':
text = '\\n'
print('%s: %s' % (token, text))
| [
"[email protected]"
] | |
a30101b7f38ba1fddd661a493cdcfae8287a25d6 | b6bcaae5169cf20a84edafae98ba649dab6fc67c | /crowdsourcing/migrations/0034_auto_20150817_2049.py | b473f2c71a406fed6d27fb1b2fe3e7e97180cfcc | [
"MIT"
] | permissive | shriyanka/daemo-forum | b7eb84a46799d8c6bcb29a4f5c9996a3d2f40351 | 58c555f69208beedbb0c09f7b7d1e32ab741b2c5 | refs/heads/master | 2023-01-12T05:13:48.804930 | 2015-09-20T01:52:29 | 2015-09-20T01:52:29 | 40,193,653 | 1 | 0 | MIT | 2022-12-26T19:49:24 | 2015-08-04T15:42:57 | Python | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0033_templateitem_position'),
]
operations = [
migrations.AlterField(
model_name='templateitem',
name='position',
field=models.IntegerField(),
),
]
| [
"[email protected]"
] | |
0e39f0a5b1327cea53f1645eb0d644c1a1759991 | 64eb1cfb19e01c629c3ef7fc40fe4dd4dda078ff | /pyunitgrading/testrunner.py | 3da1d1b15800f8a8b6123cde32e0ee8209a01f7d | [
"BSD-3-Clause"
] | permissive | stevecassidy/pyunitgrading | 2406942d6fb851a5576da0155cac7410687f5ff4 | 183677f89f385201dd54959b4bc84a6236cee59e | refs/heads/master | 2023-03-15T17:21:13.564061 | 2022-03-03T23:38:03 | 2022-03-03T23:38:03 | 32,105,841 | 0 | 1 | BSD-3-Clause | 2023-03-14T01:00:56 | 2015-03-12T22:16:10 | Python | UTF-8 | Python | false | false | 7,487 | py | """
run unit tests for a set of student submissions downloaded from iLearn (moodle)
"""
import unittest
import os
import traceback
import sys
import importlib
import imp
import multiprocessing
import re
import shutil
import csv
if sys.version_info < (3,0):
from ConfigParser import ConfigParser
else:
from configparser import ConfigParser
import subprocess
import datetime
from pyunitgrading.filehandling import scan_or_unpack_submissions
def report_error(pid, message):
"""Report an error for this student"""
out = os.path.join(pid, "test_errors.txt")
ostream = open(out,"a")
print("\tError running tests:", message)
ostream.write("Error running tests\n")
ostream.write(message + "\n")
ostream.close()
def find_sourcedir(basedir, modulename):
"""Locate the sourcedir by looking for the modulename
to be tested inside a subfolder of basedir"""
for dirpath, dirnames, filenames in os.walk(basedir):
if modulename in filenames:
return dirpath
# if we don't find it, raise an error
report_error(basedir, "Can't locate module %s" % modulename)
return None
class TestRunner(multiprocessing.Process):
"""Class to run a set of unit tests in a separate process"""
def __init__(self, basedir, sid, testmodulename, targetname, modules, outputname, queue):
"""Initialise a test runner
basedir - directory in which student submissions are unpacked
sid - student id
testmodulename - name of the test module to run
targetname - name of a source module in the submission
modules - list of python modules to be copied into project, including test module
outputname - name for file to write test output to
queue - message queue to send result back to parent process
"""
multiprocessing.Process.__init__(self)
self.sid = sid
self.modules = modules
self.testmodulename = testmodulename
self.targetname = targetname
self.queue = queue
self.result = (self.sid, 0, 0, 0, 0)
self.rootdir = os.getcwd()
out = os.path.join(basedir, sid, outputname)
self.ostream = open(out,"w")
self.sourcedir = find_sourcedir(os.path.join(basedir, sid), self.targetname)
def __report_error(self, message=""):
"""Report an error, either an explicit message
or just dump out crash info"""
print("\tError running test: ",)
self.ostream.write("Error running tests\n")
if message != "":
self.ostream.write(message + "\n")
print(message)
else:
info = sys.exc_info()
self.ostream.write(str(info))
traceback.print_exc(None, self.ostream)
def run(self):
# if there is no source to load, we quit now
if self.sourcedir == None:
self.__report_error("Source file not found in submission")
self.ostream.close()
return
print("DIR", self.sourcedir)
# get the python script to test from the given directory: add it to the path
sys.path.insert(0, '.')
# any modules already in this dir should be reloaded
reloadmods = []
for modfile in os.listdir(self.sourcedir):
if modfile.endswith('.py'):
modname, ext = os.path.splitext(modfile)
#print("add to reload queue: ", modname)
reloadmods.append(modname)
# copy the test module file into the target dir
for m in self.modules:
#print("COPYING: ", m, " to ", self.sourcedir)
shutil.copy(m, self.sourcedir)
try:
os.chdir(self.sourcedir)
# reload any user modules
for modname in reloadmods:
if modname in sys.modules:
#print('\treloading', sys.modules[modname])
target = imp.reload(sys.modules[modname])
testmodule = importlib.import_module(self.testmodulename)
# load all tests in the module
suite = unittest.defaultTestLoader.loadTestsFromModule(testmodule)
# run the tests
result = unittest.TextTestRunner(stream=self.ostream, verbosity=2).run(suite)
totalmark = result.testsRun-len(result.errors)-len(result.failures)
self.result = (self.sid, result.testsRun, len(result.failures), len(result.errors), totalmark)
# put the result onto the queue to send back to the caller
self.queue.put(self.result)
except Exception:
self.__report_error()
self.queue.put((self.sid, 0, 0, 0, 0, "Error running tests"))
finally:
# ensure we reset the path
sys.path.pop(0)
os.chdir(self.rootdir)
self.ostream.close()
def read_config(configfile):
"""Read config file and set up defaults,
return a dictionary of config values"""
r = dict()
config = ConfigParser()
config.read(configfile)
# paths are resolved relative to the config file directory
configdir = os.path.dirname(configfile)
r['basedir'] = config.get('default', 'basedir')
r['targetname'] = config.get('default', 'targetname', fallback=None)
r['testmodule'] = config.get('default', 'testmodule')
r['outputname'] = config.get('default', 'outputname', fallback='test_output.txt')
expectzip = config.get('default', 'expectzip', fallback='no')
r['expectzip'] = expectzip == 'yes'
modules = config.get('default', 'modules')
# we split modules on whitespace
r['modules'] = [os.path.join(configdir, m) for m in modules.split()]
r['csvname'] = config.get('default', 'csvname', fallback="results.csv")
return r
def run_tests_on_collection(dirlist, basedir, testmodule, targetname, modules, outputname):
"""Run unit tests for each student directory in an unpacked directory
dirlist is a list of student submissions directories"""
# 60 second timeout threshold
threshold = datetime.timedelta(0, 60, 0)
result = []
queue = multiprocessing.Queue()
for sid in dirlist:
thr = TestRunner(basedir, sid, testmodule, targetname, modules, outputname, queue)
thr.start()
start = datetime.datetime.now()
timeout = False
while not timeout and thr.is_alive():
if datetime.datetime.now() - start > threshold:
timeout = True
if not queue.empty():
testresult = queue.get()
else:
testresult = (sid,0,0,0,0)
print("RESULT: ", sid, testresult)
result.append(testresult)
return result
def process(zfile, configfile):
"""Unpack submissions and run the unit tests for each
student"""
c = read_config(configfile)
h = open(c['csvname'], 'w')
results = csv.writer(h)
results.writerow(('SID', 'Tests', 'Failed', 'Errors', 'Total'))
unpacked, problems = scan_or_unpack_submissions(zfile, c['basedir'], c['targetname'], c['expectzip'])
result = run_tests_on_collection(unpacked, c['basedir'], c['testmodule'], c['targetname'], c['modules'], c['outputname'])
for row in result:
results.writerow(row)
print("Problem cases:\n")
for sid in problems:
results.writerow((sid,))
print(sid)
h.close()
print("Grading complete")
print("Results in ", c['csvname'])
| [
"[email protected]"
] | |
f7729591d5635a1da2b4fe884e44539a6aa15cd9 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /FEK7892zgj4nPJvkE_4.py | 6ec575bba1998ca7103a7d9ed80a1478a9fb8052 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py |
def primes2(n):
""" Input n>=6, Returns a list of primes, 2 <= p < n """
n, correction = n-n%6+6, 2-(n%6>1)
sieve = [True] * (n//3)
for i in range(1,int(n**0.5)//3+1):
if sieve[i]:
k=3*i+1|1
sieve[ k*k//3 ::2*k] = [False] * ((n//6-k*k//6-1)//k+1)
sieve[k*(k-2*(i&1)+4)//3::2*k] = [False] * ((n//6-k*(k-2*(i&1)+4)//6-1)//k+1)
return [2,3] + [3*i+1|1 for i in range(1,n//3-correction) if sieve[i]]
primes = primes2(10**6)
def prime_gaps(g, a, b):
for i in range(len(primes) - 1):
p1, p2 = primes[i:i+2]
if p1 >= a and p2 <= b and p2 - p1 == g:
return [p1, p2]
if p1 + g > b:
break
return None
| [
"[email protected]"
] | |
276f391bee58142fcfb697ca4d76631818bcd5f4 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/list_topics_response.py | 8df5bc3f28136681613fcee4eb2ec78edaee20f2 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,583 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListTopicsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'size': 'int',
'items': 'list[Topic]'
}
attribute_map = {
'total': 'total',
'size': 'size',
'items': 'items'
}
def __init__(self, total=None, size=None, items=None):
"""ListTopicsResponse
The model defined in huaweicloud sdk
:param total: 总数
:type total: int
:param size: 本次返回数量
:type size: int
:param items: 主题列表
:type items: list[:class:`huaweicloudsdkroma.v2.Topic`]
"""
super(ListTopicsResponse, self).__init__()
self._total = None
self._size = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if size is not None:
self.size = size
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this ListTopicsResponse.
总数
:return: The total of this ListTopicsResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListTopicsResponse.
总数
:param total: The total of this ListTopicsResponse.
:type total: int
"""
self._total = total
@property
def size(self):
"""Gets the size of this ListTopicsResponse.
本次返回数量
:return: The size of this ListTopicsResponse.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ListTopicsResponse.
本次返回数量
:param size: The size of this ListTopicsResponse.
:type size: int
"""
self._size = size
@property
def items(self):
"""Gets the items of this ListTopicsResponse.
主题列表
:return: The items of this ListTopicsResponse.
:rtype: list[:class:`huaweicloudsdkroma.v2.Topic`]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ListTopicsResponse.
主题列表
:param items: The items of this ListTopicsResponse.
:type items: list[:class:`huaweicloudsdkroma.v2.Topic`]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTopicsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
27356cd1fbc4682db48ec7a2a95c16ce4ee088bd | b9a6440766ac6d09cbe5bcb0dd9ec035e79b68de | /0x05-python-exceptions/5-raise_exception.py | ca2fd4d2af3f9a6ae2a66b6c2664afe383b8d389 | [] | no_license | zulsb/holbertonschool-higher_level_programming | aa684ce2bad9f583dd54224e7cb1d60d2189b229 | 0a23d2ffc4ec5810213b6fcd82732f221c97a553 | refs/heads/master | 2021-06-25T15:16:48.849508 | 2021-05-23T00:07:13 | 2021-05-23T00:07:13 | 226,905,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | #!/usr/bin/python3
def raise_exception():
r = "lo" + 26
return r
| [
"[email protected]"
] | |
2146eb5e6ceca20a4c1d95187a11b0f63525e355 | 9e7ecc7fa214ad154a17fad249a47b52b26b65e3 | /docs/conf.py | 7013d49eb0156cd3195ccb76dbc27aa263895163 | [
"BSD-3-Clause"
] | permissive | PythonicNinja/downloader-amazon-cloud-drive | ca869d3e548113e27d77f909c6a78b3e13250ad0 | 0dbe967e61ddf52f324242bf79c4e3d54cbdc3a2 | refs/heads/master | 2020-05-17T18:02:04.845974 | 2015-08-08T16:25:51 | 2015-08-08T16:25:51 | 39,800,748 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,681 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# downloader-amazon-cloud-drive documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import downloader-amazon-cloud-drive
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Downloader amazon cloud drive'
copyright = u'2015, Wojciech Nowak'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = downloader-amazon-cloud-drive.__version__
# The full version, including alpha/beta/rc tags.
release = downloader-amazon-cloud-drive.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'downloader-amazon-cloud-drivedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'downloader-amazon-cloud-drive.tex',
u'Downloader amazon cloud drive Documentation',
u'Wojciech Nowak', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'downloader-amazon-cloud-drive',
u'Downloader amazon cloud drive Documentation',
[u'Wojciech Nowak'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'downloader-amazon-cloud-drive',
u'Downloader amazon cloud drive Documentation',
u'Wojciech Nowak',
'downloader-amazon-cloud-drive',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
a2d04d53f2ed1214f12315065171a637a70f1949 | 841c0df958129bef4ec456630203992a143c7dc7 | /src/15/15726.py | ea66bee86e5bafd68fe9975425375b2326cc0ab2 | [
"MIT"
] | permissive | xCrypt0r/Baekjoon | da404d3e2385c3278a1acd33ae175c2c1eb82e5e | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | refs/heads/master | 2022-12-25T18:36:35.344896 | 2021-11-22T20:01:41 | 2021-11-22T20:01:41 | 287,291,199 | 16 | 25 | MIT | 2022-12-13T05:03:49 | 2020-08-13T13:42:32 | C++ | UTF-8 | Python | false | false | 291 | py | """
15726. 이칙연산
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 60 ms
해결 날짜: 2020년 9월 19일
"""
def main():
A, B, C = map(int, input().split())
print(int(A * B / C if B > C else A / B * C))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
b4eedfabafacd95da1f202e2227f0a1c1511d5b0 | d8e662acf8aa90b5a76351712dfcb405bfebc01a | /webtelnet/django_webtelnet/tools/telnet.py | d45fb8934cda5ac1bca1438f8426ee2b5d2edc79 | [
"MIT"
] | permissive | crazyinstall/django-webtelnet | c86f35d6f22c7f06c2fad378907da0444b3f9fb6 | 840572c14792109025bf2a17bc481ae58b06b29e | refs/heads/master | 2023-05-26T06:20:11.033401 | 2019-07-30T08:40:42 | 2019-07-30T08:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,198 | py | import telnetlib
from threading import Thread
import json
import time
import traceback
class Telnet:
"""
由于 telnetlib 库的原因,终端无法显示颜色以及设置终端大小
"""
def __init__(self, websocker, message):
self.websocker = websocker
self.message = message
self.cmd = ''
self.res = ''
self.tn = telnetlib.Telnet()
def connect(self, host, user, password, port=23, timeout=30):
try:
self.tn.open(host=host, port=port, timeout=timeout)
self.tn.read_until(b'login: ', timeout=10)
user = '{0}\n'.format(user).encode('utf-8')
self.tn.write(user)
self.tn.read_until(b'Password: ', timeout=10)
password = '{0}\n'.format(password).encode('utf-8')
self.tn.write(password)
time.sleep(0.5) # 服务器响应慢的话需要多等待些时间
command_result = self.tn.read_very_eager().decode('utf-8')
self.message['status'] = 0
self.message['message'] = command_result
message = json.dumps(self.message)
self.websocker.send(message)
self.res += command_result
if 'Login incorrect' in command_result:
self.message['status'] = 2
self.message['message'] = 'connection login faild...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close(3001)
self.tn.write(b'export TERM=ansi\n')
time.sleep(0.2)
self.tn.read_very_eager().decode('utf-8')
# 创建1线程将服务器返回的数据发送到django websocket, 多个的话会极容易导致前端显示数据错乱
Thread(target=self.websocket_to_django).start()
except:
print(traceback.format_exc())
self.message['status'] = 2
self.message['message'] = 'connection faild...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close(3001)
def django_to_ssh(self, data):
try:
self.tn.write(data.encode('utf-8'))
if data == '\r':
data = '\n'
self.cmd += data
except:
self.close()
def websocket_to_django(self):
try:
while True:
data = self.tn.read_very_eager().decode('utf-8')
if not len(data):
continue
self.message['status'] = 0
self.message['message'] = data
self.res += data
message = json.dumps(self.message)
self.websocker.send(message)
except:
self.close()
def close(self):
try:
self.message['status'] = 1
self.message['message'] = 'connection closed...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close()
self.tn.close()
except:
pass
def shell(self, data):
self.django_to_ssh(data)
| [
"[email protected]"
] | |
6d8fc1aa3874d6519fdfe7a3ce8bc07ba45332d9 | 7ee4e7e48da5390839fd91f561637267bc65c731 | /examples/jupyter/merge.py | 764fa19fd8b7942c1b4f21f310bb6d0a9ac975ce | [
"Apache-2.0"
] | permissive | OliverEvans96/python-pachyderm | f82239d230e2346a677841c7e94079c7b4dabcbd | 8a3755402b0e32048c89315c3e7754cf9836d310 | refs/heads/master | 2020-07-19T04:10:47.168478 | 2019-08-08T21:23:27 | 2019-08-08T21:23:27 | 206,371,596 | 0 | 0 | Apache-2.0 | 2019-09-04T17:09:57 | 2019-09-04T17:09:57 | null | UTF-8 | Python | false | false | 1,148 | py | import os
import csv
import json
import datetime
PRICE = 5
def main():
try:
weather_filenames = os.listdir("/pfs/weather")
except:
weather_filenames = []
with open("/pfs/out/data.csv", "w") as out_file:
writer = csv.writer(out_file)
for weather_filename in weather_filenames:
dt = datetime.datetime.strptime(weather_filename, "%Y-%m-%d")
trip_filepath = "/pfs/trips/{}-{}".format(dt.month, dt.strftime("%d-%y"))
if os.path.exists(trip_filepath):
with open("/pfs/weather/{}".format(weather_filename), "r") as weather_file:
with open(trip_filepath, "r") as trip_file:
weather_json = json.load(weather_file)
precip = weather_json["daily"]["data"][0]["precipProbability"]
trip_csv = csv.reader(trip_file)
next(trip_csv) # skip the header row
trips = int(next(trip_csv)[1])
writer.writerow([dt.strftime("%Y-%m-%d"), precip, trips, trips * PRICE])
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
69d7f955c39a3df010c6b0722c30cd0b852e3a78 | 230633f33aaf722b1ece605a81ee566e1060fa3a | /textattack/models/wrappers/pytorch_model_wrapper.py | e3d9bcd7e5b5ce7880a2f46e8bd9ef8d633497c5 | [
"MIT"
] | permissive | ashwani-bhat/TextAttack | bc9a428a68f0894db7d6404e91adf8e2891055c0 | 9f5c0794b95779f11bf2a120642db00da2bc4928 | refs/heads/master | 2022-12-08T17:09:31.272779 | 2020-08-04T16:35:57 | 2020-08-04T16:35:57 | 284,734,102 | 0 | 0 | MIT | 2020-08-03T15:17:18 | 2020-08-03T15:17:17 | null | UTF-8 | Python | false | false | 1,111 | py | import torch
import textattack
from .model_wrapper import ModelWrapper
class PyTorchModelWrapper(ModelWrapper):
"""Loads a PyTorch model (`nn.Module`) and tokenizer."""
def __init__(self, model, tokenizer, batch_size=32):
if not isinstance(model, torch.nn.Module):
raise TypeError(
f"PyTorch model must be torch.nn.Module, got type {type(model)}"
)
self.model = model.to(textattack.shared.utils.device)
self.tokenizer = tokenizer
self.batch_size = batch_size
def tokenize(self, inputs):
if hasattr(self.tokenizer, "batch_encode"):
return self.tokenizer.batch_encode(inputs)
else:
return [self.tokenizer.encode(x) for x in inputs]
def __call__(self, text_input_list):
ids = self.tokenize(text_input_list)
ids = torch.tensor(ids).to(textattack.shared.utils.device)
with torch.no_grad():
outputs = textattack.shared.utils.batch_model_predict(
self.model, ids, batch_size=self.batch_size
)
return outputs
| [
"[email protected]"
] | |
d02dee9370662c42f3808914bfe276ed8b71b720 | 733f1b8e8069ee11e4f4d56e57c8fdc4c901d080 | /python/qa_pdu_round_robin.py | 1541e08ed6ff97e53c5bf1abf1a9727c589646f0 | [] | no_license | arirubinstein/gr-iridium | 3bbe8858968a4fb872a7da8abf621ce72a595fd1 | 3f7079bdf688f70acb43d12f049a405262982d78 | refs/heads/master | 2021-01-21T01:43:16.111168 | 2016-09-14T06:16:32 | 2016-09-14T06:16:32 | 68,176,992 | 2 | 0 | null | 2016-09-14T05:45:37 | 2016-09-14T05:45:37 | null | UTF-8 | Python | false | false | 1,241 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Free Software Foundation, Inc.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import iridium_swig as iridium
class qa_pdu_round_robin (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_pdu_round_robin, "qa_pdu_round_robin.xml")
| [
"[email protected]"
] | |
7ec6ea6355a4691c164a132310317a1a87d8f9a3 | b2158bfa536ff4f0dd1359be383fe2331f3e9707 | /1_Web_Server/Skeleton WebServer 2.py | 1a21b0b12ec4e228bca8f52436c167fac03e0746 | [] | no_license | kristogj/TTM4100_KTN | 63fc6f1840927b392fc2d140be73cd4a6ccfb6ec | a202b9d8cc6db7ea5936d550671e4076fc09dc89 | refs/heads/master | 2021-05-11T02:02:16.929177 | 2018-05-03T09:19:30 | 2018-05-03T09:19:30 | 118,347,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | # This skeleton is valid for both Python 2.7 and Python 3.
# You should be aware of your additional code for compatibility of the Python version of your choice.
# Import socket module
from socket import *
# Create a TCP server socket
#(AF_INET is used for IPv4 protocols)
#(SOCK_STREAM is used for TCP)
serverSocket = socket(AF_INET, SOCK_STREAM)
# Prepare a server socket
# FILL IN START
# Assign a port number
serverPort = 56400##
# Bind the socket to server address and server port
serverSocket.bind(('',serverPort)) ##
# Listen to at most 1 connection at a time
serverSocket.listen(1) ##
# FILL IN END
# Server should be up and running and listening to the incoming connections
while True:
print('Ready to serve...')
# Set up a new connection from the client
connectionSocket, addr = serverSocket.accept()# FILL IN START # FILL IN END
# If an exception occurs during the execution of try clause
# the rest of the clause is skipped
# If the exception type matches the word after except
# the except clause is executed
try:
# Receives the request message from the client
message = connectionSocket.recv(1024) # FILL IN START # FILL IN END
# Extract the path of the requested object from the message
# The path is the second part of HTTP header, identified by [1]
filepath = message.split()[1]
# Because the extracted path of the HTTP request includes
# a character '\', we read the path from the second character
f = open(filepath[1:],'r')
# Read the file "f" and store the entire content of the requested file in a temporary buffer
outputdata = f.readlines()# FILL IN START # FILL IN END
print(outputdata)
# Send the HTTP response header line to the connection socket
# Format: "HTTP/1.1 *code-for-successful-request*\r\n\r\n"
# FILL IN START
connectionSocket.send(b"HTTP/1.1 200 OK\r\n\r\n")##
# FILL IN END
# Send the content of the requested file to the connection socket
for i in range(0, len(outputdata)):
connectionSocket.send(outputdata[i].encode())
connectionSocket.send(b"\r\n")
# Close the client connection socket
connectionSocket.close()
except IOError:
# Send HTTP response message for file not found
# Same format as above, but with code for "Not Found" (see outputdata variable)
# FILL IN START
connectionSocket.send(b"HTTP/1.1 404 NOT FOUND\r\n\r\n")##
# FILL IN END
connectionSocket.send(b"<html><head></head><body><h1>404 Not Found</h1></body></html>\r\n")
# Close the client connection socket
# FILL IN START
connectionSocket.close()##
# FILL IN END
serverSocket.close()
| [
"[email protected]"
] | |
afd65f32fc819927ccab7f35007e2a0607ad0d78 | e701f53f8770795cd805f5ce9e83042edb1f4e5f | /tests/model/test_function_policies.py | 371ea70b183e65eb7c82e0623e1a4728661df037 | [
"Apache-2.0"
] | permissive | praneetap/serverless-application-model | f306384ad0bf2f54573617485268f5654683a589 | d874b75a117880f390ce6d6637963e05030af650 | refs/heads/master | 2022-07-27T00:56:15.716035 | 2019-07-29T22:14:51 | 2019-07-29T22:14:51 | 184,330,486 | 2 | 0 | Apache-2.0 | 2019-08-31T22:09:14 | 2019-04-30T20:56:40 | Python | UTF-8 | Python | false | false | 12,977 | py | from mock import Mock, patch
from unittest import TestCase
from samtranslator.model.function_policies import FunctionPolicies, PolicyTypes, PolicyEntry
class TestFunctionPolicies(TestCase):
def setUp(self):
self.policy_template_processor_mock = Mock()
self.is_policy_template_mock = Mock()
self.function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.function_policies._is_policy_template = self.is_policy_template_mock
@patch.object(FunctionPolicies, "_get_policies")
def test_initialization_must_ingest_policies_from_resource_properties(self, get_policies_mock):
resource_properties = {}
dummy_policy_results = ["some", "policy", "statements"]
expected_length = 3
get_policies_mock.return_value = dummy_policy_results
function_policies = FunctionPolicies(resource_properties, self.policy_template_processor_mock)
get_policies_mock.assert_called_once_with(resource_properties)
self.assertEqual(expected_length, len(function_policies))
@patch.object(FunctionPolicies, "_get_policies")
def test_get_must_yield_results_on_every_call(self, get_policies_mock):
resource_properties = {} # Just some input
dummy_policy_results = ["some", "policy", "statements"]
expected_results = ["some", "policy", "statements"]
# Setup _get_policies to return these dummy values for testing
get_policies_mock.return_value = dummy_policy_results
function_policies = FunctionPolicies(resource_properties, self.policy_template_processor_mock)
# `list()` will implicitly call the `get()` repeatedly because it is a generator
self.assertEqual(list(function_policies.get()), expected_results)
@patch.object(FunctionPolicies, "_get_policies")
def test_get_must_yield_no_results_with_no_policies(self, get_policies_mock):
resource_properties = {} # Just some input
dummy_policy_results = []
expected_result = []
# Setup _get_policies to return these dummy values for testing
get_policies_mock.return_value = dummy_policy_results
function_policies = FunctionPolicies(resource_properties, self.policy_template_processor_mock)
# `list()` will implicitly call the `get()` repeatedly because it is a generator
self.assertEqual(list(function_policies.get()), expected_result)
def test_contains_policies_must_work_for_valid_input(self):
resource_properties = {
"Policies": "some managed policy"
}
self.assertTrue(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_resources_without_policies(self):
resource_properties = {
"some key": "value"
}
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_non_dict_resources(self):
resource_properties = "some value"
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_none_resources(self):
resource_properties = None
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_lowercase_property_name(self):
# Property names are case sensitive
resource_properties = {
"policies": "some managed policy"
}
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_get_type_must_work_for_managed_policy(self):
policy = "managed policy is a string"
expected = PolicyTypes.MANAGED_POLICY
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
@patch("samtranslator.model.function_policies.is_instrinsic")
def test_get_type_must_work_for_managed_policy_with_intrinsics(self, is_intrinsic_mock):
policy = {
"Ref": "somevalue"
}
expected = PolicyTypes.MANAGED_POLICY
is_intrinsic_mock.return_value = True
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_work_for_policy_statements(self):
policy = {
"Statement": "policy statements have a 'Statement' key"
}
expected = PolicyTypes.POLICY_STATEMENT
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_work_for_policy_templates(self):
policy = {
"PolicyTemplate": "some template"
}
self.is_policy_template_mock.return_value = True
expected = PolicyTypes.POLICY_TEMPLATE
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_ignore_invalid_policy(self):
policy = {
"not-sure-what-this-is": "value"
}
# This is also not a policy template
self.is_policy_template_mock.return_value = False
expected = PolicyTypes.UNKNOWN
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_ignore_invalid_policy_value_list(self):
policy = ["invalid", "policy"]
expected = PolicyTypes.UNKNOWN
self.is_policy_template_mock.return_value = False
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
self.is_policy_template_mock.assert_called_once_with(policy)
def test_get_policies_must_return_all_policies(self):
policies = [
"managed policy 1",
{"Ref": "some managed policy"},
{"Statement": "policy statement"},
{"PolicyTemplate": "some value"},
["unknown", "policy"]
]
resource_properties = {
"Policies": policies
}
self.is_policy_template_mock.side_effect = [True, False] # Return True for policy template, False for the list
expected = [
PolicyEntry(data="managed policy 1", type=PolicyTypes.MANAGED_POLICY),
PolicyEntry(data={"Ref": "some managed policy"}, type=PolicyTypes.MANAGED_POLICY),
PolicyEntry(data={"Statement": "policy statement"}, type=PolicyTypes.POLICY_STATEMENT),
PolicyEntry(data={"PolicyTemplate": "some value"}, type=PolicyTypes.POLICY_TEMPLATE),
PolicyEntry(data=["unknown", "policy"], type=PolicyTypes.UNKNOWN),
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_ignore_if_resource_does_not_contain_policy(self):
resource_properties = {
}
expected = []
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_ignore_if_policies_is_empty(self):
resource_properties = {
"Policies": []
}
expected = []
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_policy_string(self):
resource_properties = {
"Policies": "single managed policy"
}
expected = [
PolicyEntry(data="single managed policy", type=PolicyTypes.MANAGED_POLICY)
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_dict_with_managed_policy_intrinsic(self):
resource_properties = {
"Policies": {
"Ref": "some managed policy"
}
}
expected = [
PolicyEntry(data={"Ref": "some managed policy"}, type=PolicyTypes.MANAGED_POLICY)
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_dict_with_policy_statement(self):
resource_properties = {
"Policies": {
"Statement": "some policy statement"
}
}
expected = [
PolicyEntry(data={"Statement": "some policy statement"}, type=PolicyTypes.POLICY_STATEMENT)
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_dict_of_policy_template(self):
resource_properties = {
"Policies": {
"PolicyTemplate": "some template"
}
}
self.is_policy_template_mock.return_value = True
expected = [
PolicyEntry(data={"PolicyTemplate": "some template"}, type=PolicyTypes.POLICY_TEMPLATE)
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
self.is_policy_template_mock.assert_called_once_with(resource_properties["Policies"])
def test_get_policies_must_work_for_single_dict_of_invalid_policy_template(self):
resource_properties = {
"Policies": {
"InvalidPolicyTemplate": "some template"
}
}
self.is_policy_template_mock.return_value = False # Invalid policy template
expected = [
PolicyEntry(data={"InvalidPolicyTemplate": "some template"}, type=PolicyTypes.UNKNOWN)
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
self.is_policy_template_mock.assert_called_once_with({"InvalidPolicyTemplate": "some template"})
def test_get_policies_must_work_for_unknown_policy_types(self):
resource_properties = {
"Policies": [
1, 2, 3
]
}
expected = [
PolicyEntry(data=1, type=PolicyTypes.UNKNOWN),
PolicyEntry(data=2, type=PolicyTypes.UNKNOWN),
PolicyEntry(data=3, type=PolicyTypes.UNKNOWN),
]
self.is_policy_template_mock.return_value = False
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_is_policy_template_must_detect_valid_policy_templates(self):
template_name = "template_name"
policy = {
template_name: {
"Param1": "foo"
}
}
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertTrue(function_policies._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_called_once_with(template_name)
def test_is_policy_template_must_ignore_non_dict_policies(self):
policy = [1,2,3]
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_not_called()
def test_is_policy_template_must_ignore_none_policies(self):
policy = None
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
def test_is_policy_template_must_ignore_dict_with_two_keys(self):
template_name = "template_name"
policy = {
template_name: {"param1": "foo"},
"A": "B"
}
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
def test_is_policy_template_must_ignore_non_policy_templates(self):
template_name = "template_name"
policy = {
template_name: {"param1": "foo"}
}
self.policy_template_processor_mock.has.return_value = False
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_called_once_with(template_name)
def test_is_policy_template_must_return_false_without_the_processor(self):
policy = {
"template_name": {"param1": "foo"}
}
function_policies_obj = FunctionPolicies({}, None) # No policy template processor
self.assertFalse(function_policies_obj._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_not_called()
| [
"[email protected]"
] | |
5fc6b4960040d94fb1e4a0cbd82e50746474d47a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02821/s590731514.py | 827448373b6a7d5f2313e8622bb2f870d0d97350 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | import cmath
pi = cmath.pi
exp = cmath.exp
N=2**18
def make_exp_t(N, base):
exp_t = {0: 1}
temp = N
while temp:
exp_t[temp] = exp(base / temp)
temp >>= 1
return exp_t
fft_exp_t = make_exp_t(N, -2j*pi)
ifft_exp_t = make_exp_t(N, 2j*pi)
def fft_dfs(f, s, N, st, exp_t):
if N==2:
a = f[s]; b = f[s+st]
return [a+b, a-b]
N2 = N//2; st2 = st*2
F0 = fft_dfs(f, s , N2, st2, exp_t)
F1 = fft_dfs(f, s+st, N2, st2, exp_t)
w = exp_t[N]; wk = 1.0
for k in range(N2):
U = F0[k]; V = wk * F1[k]
F0[k] = U + V
F1[k] = U - V
wk *= w
F0.extend(F1)
return F0
def fft(f, N):
if N==1:
return f
return fft_dfs(f, 0, N, 1, fft_exp_t)
def ifft(F, N):
if N==1:
return F
f = fft_dfs(F, 0, N, 1, ifft_exp_t)
for i in range(N):
f[i] /= N
return f
n,m,*a=map(int,open(0).read().split())
b=[0]*N
for i in a:b[i]+=1
i=N
c=0
for a in ifft([t*t for t in fft(b,N)],N)[::-1]:
a=int(a.real+.5)
i-=1
if a:
t=min(m,a)
c+=i*t
m-=t
if not m:break
print(c) | [
"[email protected]"
] | |
d0d2fec898dd15fa114393b866c165481d23c57f | 0547d1826e99eedb959a3463520d73985a3b844e | /Data Science for Everyone Track/19-Introduction to Shell/01- Manipulating files and directories/01-How does the shell compare to a desktop interface.py | 5e8bc41d249261420a721998a9502cdcd75ba95c | [] | no_license | abhaysinh/Data-Camp | 18031f8fd4ee199c2eff54a408c52da7bdd7ec0f | 782c712975e14e88da4f27505adf4e5f4b457cb1 | refs/heads/master | 2022-11-27T10:44:11.743038 | 2020-07-25T16:15:03 | 2020-07-25T16:15:03 | 282,444,344 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | '''
How does the shell compare to a desktop interface?
An operating system like Windows, Linux, or Mac OS is a special kind of program. It controls the computer's processor, hard drive, and network connection, but its most important job is to run other programs.
Since human beings aren't digital, they need an interface to interact with the operating system. The most common one these days is a graphical file explorer, which translates clicks and double-clicks into commands to open files and run programs. Before computers had graphical displays, though, people typed instructions into a program called a command-line shell. Each time a command is entered, the shell runs some other programs, prints their output in human-readable form, and then displays a prompt to signal that it's ready to accept the next command. (Its name comes from the notion that it's the "outer shell" of the computer.)
Typing commands instead of clicking and dragging may seem clumsy at first, but as you will see, once you start spelling out what you want the computer to do, you can combine old commands to create new ones and automate repetitive operations with just a few keystrokes.
What is the relationship between the graphical file explorer that most people use and the command-line shell?
Answer the question
50 XP
Possible Answers
The file explorer lets you view and edit files, while the shell lets you run programs.
The file explorer is built on top of the shell.
The shell is part of the operating system, while the file explorer is separate.
They are both interfaces for issuing commands to the operating system.
Answer : They are both interfaces for issuing commands to the operating system.
''' | [
"[email protected]"
] | |
71b044f34e96ce148cec417fb41d88ef7818d82e | defe77f8cfb333f4c67c0f9cafb290cb337464aa | /sequencing_utilities/gdtools.py | 6f2964959ff2d33c37b40c683d098e7a05003f7b | [
"MIT"
] | permissive | dmccloskey/sequencing_utilities | 8bd5c2c3ffe5d54a3c898db86bb65d6ae2af1394 | 3845cede661bc263a38cf8850148380e08c0e9ea | refs/heads/master | 2020-04-06T07:01:42.013835 | 2016-09-15T04:24:31 | 2016-09-15T04:24:31 | 38,275,400 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | #!/usr/bin/python
# -*- coding: latin-1 -*-
"""
Implements the GDTools class that annotates and applies mutations to .gd and reference .gbk files
based on the gdtools utility program
"""
import os
class GDTools():
def apply(self,gbk_filename_I,gd_filename_I,fastaOrGff3_filename_O,output_O='gff3',
gdtools_I = 'gdtools'):
'''apply mutational changes found in the gd file to the reference genome
e.g. gdtools APPLY [ -o output.gff3 -f GFF3 ] -r reference.gbk input.gd
INPUT:
fastaOrGff3_filename_O = output filename
output_O = 'fasta' or 'gff3' (default output: gff3)
gbk_filename_I = reference genome
gd_filename_I = gd filename
gdtools_I = command for gdtools'''
cmd = ("%s APPLY -o %s -f %s -r %s %s" %(gdtools_I,fastaOrGff3_filename_O,output_O,gbk_filename_I,gd_filename_I));
print(cmd);
os.system(cmd);
def annotate(self,htmlOrGd_filename_O,gbk_filename_I,gd_filenames_I=[],output_O='html',
gdtools_I = 'gdtools'):
'''
e.g. gdtools ANNOTATE [-o annotated.html] -r reference.gbk input.1.gd [input.2.gd ... ]
INPUT:
htmlOrGd_filename_O = filename for the .html or .gd file output
output_O = 'html' or 'gd' (default output: html)
gbk_filename_I = reference genome
gd_filenames_I = list of gd files
gdtools_I = command for gdtools
OUTPUT:
html or gd file based on input
'''
gd_filename_str = ' '.join(gd_filenames_I);
if output_O=='html':
cmd = ("%s ANNOTATE -o %s --html -r %s %s" %(gdtools_I,
htmlOrGd_filename_O,gbk_filename_I,gd_filename_str));
else:
cmd = ("%s ANNOTATE -o %s -r %s %s" %(gdtools_I,
htmlOrGd_filename_O,gbk_filename_I,gd_filename_str));
print(cmd);
os.system(cmd); | [
"[email protected]"
] | |
56198034f91bc59cb3faaffacf9f3a6f362d3f7a | 05215b1f0f07eeb7266996c4d9a3f4cff78be7e1 | /ai.py | 2e5d593ca4af2608d3f1595dfe3b22e1fa50d533 | [] | no_license | ljte/TicTacToe | c54c259dc0106fddf9c814f9efac2285e5a89ae1 | 584d1f8900b9b9ee216587a247af7c97714ad3dd | refs/heads/master | 2022-12-09T13:33:57.400026 | 2020-09-12T13:16:33 | 2020-09-12T13:16:33 | 294,947,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | import sys
from main import check_winner
scores = {
'X': 1,
'O': -1,
'Tie': 0
}
ai_player = 'O'
human_player = 'X'
def minimax(board, depth, is_maximizing):
result = check_winner(board)
if result:
return scores[result]
if is_maximizing:
best_score = -sys.maxsize
for i in range(3):
for j in range(3):
if board[i][j].is_empty:
board[i][j].value = human_player
score = minimax(board, depth + 1, False)
board[i][j].value = None
best_score = max(score, best_score)
return best_score
else:
best_score = sys.maxsize
for i in range(3):
for j in range(3):
if board[i][j].is_empty:
board[i][j].value = ai_player
score = minimax(board, depth + 1, True)
board[i][j].value = None
best_score = min(score, best_score)
return best_score
def make_best_move(board):
best_score = sys.maxsize
best_move = ()
for i in range(3):
for j in range(3):
if board[i][j].is_empty:
board[i][j].value = ai_player
score = minimax(board, 0, True)
if score < best_score:
best_score = score
best_move = (i, j)
board[i][j].value = None
board[best_move[0]][best_move[1]].value = ai_player
def make_simple_turn(grid):
empty_cell = grid.get_empty_cell()
if empty_cell:
empty_cell.value = ai_player
| [
"="
] | = |
83fa38f3c85d64b10ad5d7b0a64e7056c9159000 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2137/60698/252878.py | 6263880d5378d64f8803902ccd10935998fcc738 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | from math import sqrt
def test():
n = int(input())
if n<=1:
print(False)
return
factor = [1]
for i in range(2, int(sqrt(n)) + 1):
if n % i == 0:
if i not in factor:
factor.append(i)
if int(n / i) not in factor:
factor.append(int(n / i))
sum = 0
for j in range(0, len(factor)):
sum = sum + factor[j]
if sum == n:
print(True)
else:
print(False)
test()
| [
"[email protected]"
] | |
41bc383a1fe09b076d117af164bcb72a3e2dc0ba | c8ecc1dc9b6400ecbc26edae86375a699b1fce80 | /.tox/py37/bin/pip3.7 | bbd15fc26a694ca2db31237503cfc94aeea1c9d5 | [
"MIT"
] | permissive | leefyi/bovine-picker | 16ade5f7cae039f3a92166e3318bffe1ad8ce623 | a6100e54bb02894c2d3734a9500dc77e83405da1 | refs/heads/master | 2020-04-23T16:15:44.806725 | 2019-05-27T08:56:36 | 2019-05-27T08:56:36 | 171,292,539 | 1 | 0 | MIT | 2019-05-27T08:56:37 | 2019-02-18T13:49:23 | Python | UTF-8 | Python | false | false | 277 | 7 | #!/home/SENSETIME/lifangyi/PycharmProjects/bovine-picker/.tox/py37/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"="
] | = |
5ef92ca87e34e9dc5efdc49c10d106b68e01480f | 2c3da6e0bddf55d64d650040bbf286c47b31811a | /learnpython100day/元类.py | 6af39b9d5739481bdc65d335eb5cb68b0e9e6c1e | [
"MIT"
] | permissive | Bngzifei/PythonNotes | 76bd53db3033a9c51ab4bdd727842cd89607b584 | 01590e1b6c1bc0f04aa2d355fa2553c04cce27f2 | refs/heads/master | 2023-02-04T06:49:00.725463 | 2020-12-15T09:26:40 | 2020-12-15T09:26:40 | 155,154,662 | 1 | 2 | MIT | 2020-09-08T01:30:19 | 2018-10-29T05:02:48 | Python | UTF-8 | Python | false | false | 1,108 | py | # -*- coding: utf-8 -*-
# @Author: Marte
# @Date: 2019-05-27 17:46:35
# @Last Modified by: Marte
# @Last Modified time: 2019-05-27 20:20:18
class Foo(object):
def hello(self):
print("hello world!")
return
foo = Foo()
print(type(foo)) # <class '__main__.Foo'>
print(type(foo.hello)) # <class 'method'>
print(type(Foo)) # <class 'type'>
temp = Foo # 赋值给其他变量
Foo.var = 11 # 增加参数
print(Foo) # 作为函数参数
def init(self,name):
self.name = name
return
def hello(self):
print("hello %s"%self.name)
return
Foo = type("Foo", (object,), {"__init__": init, "hello": hello, "cls_var": 10})
foo = Foo("xianhu")
print(foo.hello())
print(Foo.cls_var)
print(foo.__class__)
print(Foo.__class__)
print(type.__class__)
class Author(type):
def __new__(mcs, name, bases, dict):
# 添加作者属性
dict["author"] = "xianhu"
return super(Author, mcs).__new__(mcs, name, bases, dict)
class Foo(object, metaclass=Author):
pass
foo = Foo()
print(foo.author)
| [
"[email protected]"
] | |
4a85a1b72cfea37cab5e95a542ca77b194c0997b | 135cf3b73c4cd01970865b794260e195076875da | /scripts/r&d/testSend.py | 985650bfe80997d6ea6994ed041ff01680387f20 | [] | no_license | njha7/elbalang_orchestration | 1a4b7e6bb49dd7f7f735291949f52fdebed78c51 | 056f6305d43b24fedbf3eb4f6f26deaf5a0f57af | refs/heads/master | 2021-04-28T18:41:43.603542 | 2018-04-19T19:25:59 | 2018-04-19T19:25:59 | 121,879,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | import pika
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
for x in range(1,100):
channel.basic_publish(exchange='', routing_key='hello', body='%d' % x)
connection.close() | [
"[email protected]"
] | |
57995e50c4c2740675bad0ae2baf19562f0f7c26 | eed7b5aa4861086d34e539e7bbfeff4286506692 | /src/Server/Game/games.py | 92eaa0fc62857153b72dd206ce6f503f660e8d55 | [] | no_license | dfwarden/DeckBuilding | 0be2ccb68fc9a69c8eaa1d8acedeaa7cebef1a31 | 0b5a7573a3cf33430fe61e4ff8a8a7a0ae20b258 | refs/heads/master | 2021-01-18T09:52:51.880892 | 2015-02-03T03:21:17 | 2015-02-03T03:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from game_wrapper import GameWrapper
id = 1
games = {}
def StartNewGame(game, players):
""" Start a New Game """
global games
global id
currentId = id
games[currentId] = GameWrapper(currentId, game, players)
id += 1
return currentId | [
"[email protected]"
] | |
4f3036721abf197f97623f9c2e9af80af2d85d55 | 7792b03540784a0d28073899dd4ad78689e9a9fb | /VoiceAI/my_slice.py | 1b58955d63d01e8ff062e6c608d1488fcab21885 | [] | no_license | ayiis/coding | 3b1362f813a22a7246af3725162cfb53dea2f175 | c73e4622e1811cc3fd8729a92df6537bd73dc802 | refs/heads/master | 2021-06-02T14:55:38.451288 | 2021-04-26T08:39:16 | 2021-04-26T08:39:16 | 134,660,001 | 0 | 0 | null | 2020-06-05T04:03:58 | 2018-05-24T04:14:14 | CSS | UTF-8 | Python | false | false | 4,958 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'ayiis'
# create on 2018/11/07
"""
切分目录下所有子目录里的wav文件
用于切分喜马拉雅的儿童读英语的音频
"""
import sys
import numpy as np
import re
import ubelt
from pathlib import Path
import wave
reload(sys).setdefaultencoding("utf8")
class Slicer(object):
"""docstring for Slicer"""
def __init__(self, arg):
super(Slicer, self).__init__()
self.arg = arg
self.filename = arg["filename"]
self.save_dir = arg["save_dir"]
self.num_samples = 2048 # pyaudio内置缓冲大小
self.sampling_rate = 16000 # 取样频率
self.level = 1000 # 声音保存的阈值
self.count_num = 20 # count_num个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音
self.save_length = 10 # 声音记录的最小长度:save_length * num_samples 个取样
self.channels = 1 # 声道,单声道
self.sampwidth = 2 # 录音取样点
self.save_buffer = []
self.save_2_buffer = ""
self.save_num = 0
self.MAGIC_VALUE = 8
def save_2_file(self, content):
self.save_num += 1
ubelt.ensuredir(self.save_dir)
self.save_wav_path = "%s/%s.wav" % (self.save_dir, str(self.save_num).rjust(3, "0"))
print "save to: %s" % self.save_wav_path
wf = wave.open(self.save_wav_path, "wb")
wf.setnchannels(self.channels)
wf.setsampwidth(self.sampwidth)
wf.setframerate(self.sampling_rate)
wf.writeframes(content)
wf.close()
def do(self):
offset = []
with open(self.filename) as fr:
fr.seek(44)
while True:
string_audio_data = fr.read(self.num_samples)
if not string_audio_data:
break
self.save_2_buffer += string_audio_data
audio_data = np.fromstring(string_audio_data, dtype=np.short)
large_sample_count = np.sum(audio_data > self.level)
if large_sample_count > self.count_num:
offset.append(1)
else:
offset.append(0)
# print offset
# print len([x for x in offset if x == 1]), "/", len(offset)
# c_count = [0] * 24
cut_pos = [0]
c0 = 0
r_start = False
for pos, i in enumerate(offset):
if i == 0:
c0 += 1
else:
# for k in range(c0+1):
# if k >= 24:
# continue
# c_count[k] += 1
if c0 >= self.MAGIC_VALUE and r_start is True:
cut_pos.append(pos - c0 / 2)
c0 = 0
r_start = True
# print "------"
# print cut_pos[-1], len(offset)-1
cut_pos.append(len(offset))
# print "\t".join([str(x+1) for x in range(24)])
# print "\t".join([str(x) for x in c_count])
# print "cut at:"
print cut_pos
# print len(cut_pos)
# print "cut result:"
# print "end_pos:", cut_pos
for i, val in enumerate(cut_pos):
if i == 0:
continue
print offset[cut_pos[i-1]: val]
self.save_2_file(self.save_2_buffer[cut_pos[i-1]*self.num_samples:val*self.num_samples])
source_path = "/home/1109"
target_path = "/home/1109_done/"
def main():
for wav_dir in Path(source_path).glob("*"):
for wav_file in Path(wav_dir).glob("*.wav"):
wav_file_name = wav_file.name.lower().replace(".wav", "")
wav_file_name = re.sub(r"[\d]+[.][\d]+", "", wav_file_name)
wav_file_name = re.sub(r"raz[ -]?[a-z][ ]", "", wav_file_name)
# fixed \W
wav_file_name = re.sub(r"[\W]", "_", "%s" % wav_file_name)
wav_file_name = wav_file_name.strip()
new_file_path = "%s%s___%s" % (
target_path,
wav_dir.name.replace(" ", "").replace("-", "").lower().replace("raz", ""),
wav_file_name
)
# new_file_path = re.sub(r"[\W]", "_", new_file_path)
# print wav_dir, wav_file
# print new_file_path
ubelt.ensuredir(new_file_path + "/wav")
ubelt.ensuredir(new_file_path + "/etc")
# if "Fruit" not in "%s" % wav_file:
# continue
sc = Slicer({
"filename": "%s" % wav_file,
# "filename": "/home/data2/18988369 - Raz d/Raz d maria's halloween.wav",
# "filename": "/home/data/12338138 - RAZ-A/Fruit.wav",
"save_dir": new_file_path + "/wav",
"txt_path": new_file_path + "/etc/prompts-original",
})
sc.do()
# exit(1)
if __name__ == "__main__":
pass
# main()
| [
"[email protected]"
] | |
01921ca9f3fca49a540f52fee546503370155e01 | 2f083c3e5ebaf5803edb1c6329501d09dd7695db | /set 2h.py | 746ef779a9a804d81c050b48c947ec3ab81238a3 | [] | no_license | Anjanaanjujsrr/Anju-code_kata-PLAYER | 321a1097d1afe7da8916f193b35dfbd9ca440ec6 | fadd1a2971843c5cf12cd63bcd062e96e093feb5 | refs/heads/master | 2020-05-26T08:59:53.795026 | 2019-06-09T08:05:27 | 2019-06-09T08:05:27 | 188,175,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | #anju
number=int(input())
r=[]
a=0
count=0
for i in range(number):
c=input()
r.append(c)
for i in r:
for j in i:
a+=ord(j)
if(a==612):
count+=1
a=0
print(count) | [
"[email protected]"
] | |
f90b5b1fcbe9306e479747f6580ea0cbe3a5d1c7 | 35f069aad9f7040e20494dac11f826bba41d029e | /src/main/resources/qtools/lib/webservice/__init__.py | 384fc467ac669b3197a860fb9c1a1dfa9b73fa32 | [] | no_license | v-makarenko/vtoolsmq | 4be3bc3965aaeeee2d64c359a30f6f18617f354d | 8a0dd75b196c0e641bb8b4b20124540aaaa2814b | refs/heads/master | 2021-01-10T02:04:58.893206 | 2015-12-03T16:34:44 | 2015-12-03T16:34:44 | 47,275,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | """
This package contains modules and classes used to interact with external web services.
"""
import urllib, urllib2, cookielib, os
class RequestProxy(object):
"""
Returns a proxied response object on request.
Not sure if this is the right thing to do yet.
"""
def __init__(self, proxy_class=None, opener=None):
self.opener = opener or None
if not proxy_class:
self.proxy_class = ResponseProxy
else:
self.proxy_class = proxy_class
def request(self, *args, **kwargs):
"""
Returns a
"""
if not self.opener:
response = urllib2.urlopen(*args, **kwargs)
else:
response = self.opener.open(*args, **kwargs)
return self.proxy_class(self, response)
class ResponseProxy(object):
"""
Proxy object that may edit the proxy object
when the response is read.
"""
def __init__(self, proxy, response):
self._proxy = proxy
self._response = response
@property
def proxy(self):
return self._proxy
def __getattribute__(self, name):
# note: @property decorator seems to muck with this
response = object.__getattribute__(self, '_response')
try:
self_method = object.__getattribute__(self, name)
except AttributeError, e:
self_method = None
try:
handler = object.__getattribute__(self, "_on_%s" % name)
except AttributeError, e:
handler = None
if name in response.__dict__ and not self_method:
if handler:
def func(*args, **kwargs):
retval = response.__dict__[name](*args, **kwargs)
handler(retval)
return retval
return func
else:
def func(*args, **kwargs):
return response.__dict__[name](*args, **kwargs)
return func
return self_method
def make_get_request_url(base_url, uri='/', param_dict=None):
"""
Constructs the get request URL.
(This might not be the right abstraction, let's see what happens with cookies)
"""
if param_dict is None:
param_dict = dict()
if not uri:
uri = '/'
if base_url.endswith('/') and uri.startswith('/'):
base_url = base_url[:-1]
elif (not base_url.endswith('/') and not uri.startswith('/')):
uri = "/%s" % uri
# note, may need to use a MultiDict in source implementation.
# or maybe let's use WebOb.Request.
param_str = urllib.urlencode(param_dict)
if param_str:
full_url = "%s%s?%s" % (base_url, uri, param_str)
else:
full_url = "%s%s" % (base_url, uri)
return full_url
def make_request_params(defaults, *args, **kwargs):
if not defaults:
defaults = dict()
defaults.update(kwargs)
return defaults | [
"[email protected]"
] | |
221a8ffa70126e93462e6b05258ee3f72950aa1f | ce32ff8c0ad1ad9e42e6b59e201c70df754aa51e | /farmmonitor/manage.py | 00e0f54506f9e616394b71d808fb99fe541ced96 | [] | no_license | wangxiaoying/farm-monitor | d7710559194c2771d09012b6dd204bae12669b6e | 413ddaf1a23655f705da8b65978d06b704c81723 | refs/heads/master | 2021-01-10T21:33:35.643549 | 2015-06-16T09:07:14 | 2015-06-16T09:07:14 | 32,635,833 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "farmmonitor.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
f264cc0288deced4fe0c271ca22be87310d2710d | d659fb0db310793b918640fdb673b9bd755578bc | /standard_lib/remove_elements_list.py | 83be482832d01d31f152fe83cb07e5eb97fd4837 | [
"MIT"
] | permissive | astuk/python-snippets | 562bdcdb23c537650a767fb0369388d9530a67ae | 212f63f820b6f5842f74913ed08da18d41dfe7a4 | refs/heads/master | 2023-06-18T04:29:48.111537 | 2021-07-14T10:55:59 | 2021-07-14T10:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | data = [1, 2, 3, 4, 1, 2, 3, 4]
target = 1
print(f"Original: {data}")
data[:] = [elem for elem in data if elem != target]
print(f"New: {data}")
| [
"[email protected]"
] | |
9c515388fcd184ea1ae18872a428a79645351f33 | 5b6af599a2afb4db27b588cfc00831446ff8620f | /blog/urls.py | 62c9faaba9b33b021a3ebd8bbd5d111d134fa5a9 | [] | no_license | dlatnrud/myblog | 425acb68be9d3b672e2a84cb84e403cc31c61348 | 8e344e9eadaaf85fed530c25fefc87f916dbee0c | refs/heads/main | 2023-08-26T17:12:52.097986 | 2021-10-22T03:55:18 | 2021-10-22T03:55:18 | 419,949,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
#127.0.0.1:8000/blog
path('', views.index, name='index'),
path('<int:post_id>/', views.detail, name='detail'),
path('post/create/', views.post_create, name='post_create'),
] | [
"[email protected]"
] | |
2a6450417821ac75724ba063f20fd6289c9bb8b0 | 085ce75a507df6e755cabb7a65c4a2a8c98762ba | /dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_codecs.py | 2232eace436d8b42e47cad46c20bbf725f5f9b60 | [] | no_license | Arhzi/habr-docker-article | d44302db1fe157d81fe0818e762e82218f50e31f | 6fb094860b612e307beadaeb22981aa0ee64e964 | refs/heads/master | 2021-01-23T20:41:47.398025 | 2015-12-10T08:56:33 | 2015-12-10T08:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,216 | py | # encoding: utf-8
# module _codecs
# from (built-in)
# by generator 1.137
# no doc
# no imports
# functions
def ascii_decode(*args, **kwargs): # real signature unknown
pass
def ascii_encode(*args, **kwargs): # real signature unknown
pass
def charbuffer_encode(*args, **kwargs): # real signature unknown
pass
def charmap_build(*args, **kwargs): # real signature unknown
pass
def charmap_decode(*args, **kwargs): # real signature unknown
pass
def charmap_encode(*args, **kwargs): # real signature unknown
pass
def decode(obj, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors.
"""
return object()
def encode(obj, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors.
"""
return object()
def escape_decode(*args, **kwargs): # real signature unknown
pass
def escape_encode(*args, **kwargs): # real signature unknown
pass
def latin_1_decode(*args, **kwargs): # real signature unknown
pass
def latin_1_encode(*args, **kwargs): # real signature unknown
pass
def lookup(encoding): # real signature unknown; restored from __doc__
"""
lookup(encoding) -> CodecInfo
Looks up a codec tuple in the Python codec registry and returns
a CodecInfo object.
"""
pass
def lookup_error(errors): # real signature unknown; restored from __doc__
"""
lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name.
"""
pass
def raw_unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def raw_unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def readbuffer_encode(*args, **kwargs): # real signature unknown
pass
def register(search_function): # real signature unknown; restored from __doc__
"""
register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer)
(or a CodecInfo object).
"""
pass
def register_error(errors, handler): # real signature unknown; restored from __doc__
"""
register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple.
"""
pass
def unicode_escape_decode(*args, **kwargs): # real signature unknown
pass
def unicode_escape_encode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_decode(*args, **kwargs): # real signature unknown
pass
def unicode_internal_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_encode(*args, **kwargs): # real signature unknown
pass
def utf_16_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_16_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_be_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_encode(*args, **kwargs): # real signature unknown
pass
def utf_32_ex_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_decode(*args, **kwargs): # real signature unknown
pass
def utf_32_le_encode(*args, **kwargs): # real signature unknown
pass
def utf_7_decode(*args, **kwargs): # real signature unknown
pass
def utf_7_encode(*args, **kwargs): # real signature unknown
pass
def utf_8_decode(*args, **kwargs): # real signature unknown
pass
def utf_8_encode(*args, **kwargs): # real signature unknown
pass
# no classes
| [
"[email protected]"
] | |
0ec3ee8ba21f62bdf895ad95895a6cecee3ea293 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/7edd451a4e81080562d8e2daa1e962bf7fbe4a9b-<apply>-fix.py | 0d2625c133798c97803281d73411bc9ac3823fff | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | @staticmethod
def apply(module, name, n_power_iterations, eps):
fn = SpectralNorm(name, n_power_iterations, eps)
weight = module._parameters[name]
height = weight.size(0)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_org'), weight)
module.register_buffer(fn.name, weight)
module.register_buffer((fn.name + '_u'), u)
module.register_forward_pre_hook(fn)
return fn | [
"[email protected]"
] | |
061faf3a7b71dc0672633c4edca636e4eae445de | ad9782856ec2f860fccbefa5e75a896691b8e1cc | /MonteCarlo/test/opt6s3l/MinBias_14TeV_pythia8_TuneCUETP8M1_cfi_GEN_SIM_OT613_200_IT4025_opt6s3l.py | cade5d39c8f174af6bc59396e837fa2023545939 | [] | no_license | OSU-CMS/VFPix | 7fe092fc5a973b4f9edc29dbfdf44907664683e5 | 4c9fd903219742a4eba1321dc4181da125616e4c | refs/heads/master | 2020-04-09T05:52:05.644653 | 2019-01-09T13:44:22 | 2019-01-09T13:44:22 | 30,070,948 | 0 | 0 | null | 2018-11-30T13:15:54 | 2015-01-30T12:26:20 | Python | UTF-8 | Python | false | false | 5,781 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: MinBias_14TeV_pythia8_TuneCUETP8M1_cfi --conditions auto:phase2_realistic -n 10 --era Phase2C2 --eventcontent FEVTDEBUG --relval 90000,100 -s GEN,SIM --datatier GEN-SIM --beamspot HLLHC14TeV --geometry Extended2023D17 --fileout file:step1.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('SIM',eras.Phase2C2)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D17Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2023D17_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedHLLHC14TeV_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('MinBias_14TeV_pythia8_TuneCUETP8M1_cfi nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('file:step1.root'),
outputCommands = process.FEVTDEBUGEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
process.generator = cms.EDFilter("Pythia8GeneratorFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'),
processParameters = cms.vstring('SoftQCD:nonDiffractive = on',
'SoftQCD:singleDiffractive = on',
'SoftQCD:doubleDiffractive = on'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on')
),
comEnergy = cms.double(14000.0),
crossSection = cms.untracked.double(71390000000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGoutput_step = cms.EndPath(process.FEVTDEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.FEVTDEBUGoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
inputDir = "VFPix/MonteCarlo/data/OT613_200_IT4025_opt6s3l/"
fileNames =["pixbar.xml","pixel.xml","pixelProdCuts.xml","pixelStructureTopology.xml","pixelsens.xml","pixfwd.xml","tracker.xml","trackerProdCuts.xml","trackerRecoMaterial.xml","trackerStructureTopology.xml","trackersens.xml"]
for i in range (0, len (process.XMLIdealGeometryESSource.geomXMLFiles)):
xmlFile = process.XMLIdealGeometryESSource.geomXMLFiles[i]
fileName = xmlFile.split("/")[-1]
if fileName in fileNames:
process.XMLIdealGeometryESSource.geomXMLFiles[i] = inputDir + fileName
| [
"[email protected]"
] | |
bea1166d0ce0c63d8db6eb232de74347ba96c94e | 7d9cf43f7c17809420eae756b808cf4efbaafdb3 | /tests/plugins/test_tk.py | 04ab77d218f1e56938a7c3a03b34cb7944b05236 | [] | no_license | dahool/big-brother-bot | 8e23adf0e791188dc850bc22628234e654dca6b7 | a3818374e5a410e5a54576f91dace069250684a8 | refs/heads/master | 2020-12-24T22:10:02.459371 | 2012-03-03T17:51:34 | 2012-03-03T17:51:34 | 1,544,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,661 | py | #
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2011 Courgette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import os
from mock import Mock, patch, sentinel
import unittest
import b3
from b3.plugins.tk import TkPlugin, TkInfo
from b3.config import XmlConfigParser
from tests import B3TestCase
default_plugin_file = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../b3/conf/plugin_tk.xml"))
@patch("threading.Timer")
class Test_Tk_plugin(B3TestCase):
def setUp(self):
super(Test_Tk_plugin, self).setUp()
b3.console.gameName = 'f00'
self.conf = XmlConfigParser()
self.p = TkPlugin(b3.console, self.conf)
def test_onLoadConfig_minimal(self, timer_patcher):
self.conf.setXml(r"""
<configuration plugin="tk">
<settings name="settings">
<set name="max_points">400</set>
<set name="levels">0</set>
<set name="round_grace">7</set>
<set name="issue_warning">sfire</set>
</settings>
<settings name="level_0">
<set name="kill_multiplier">2</set>
<set name="damage_multiplier">1</set>
<set name="ban_length">2</set>
</settings>
</configuration>
""")
self.p = TkPlugin(b3.console, self.conf)
self.p.onLoadConfig()
self.assertEqual(400, self.p._maxPoints)
self.assertEqual(0, self.p._maxLevel)
self.assertEqual(7, self.p._round_grace)
self.assertEqual('sfire', self.p._issue_warning)
self.assertTrue(self.p._private_messages)
def test_onLoadConfig(self, timer_patcher):
self.conf.setXml(r"""
<configuration plugin="tk">
<settings name="settings">
<set name="max_points">350</set>
<set name="levels">0,1,2</set>
<set name="round_grace">3</set>
<set name="issue_warning">foo</set>
<set name="private_messages">off</set>
</settings>
<settings name="level_0">
<set name="kill_multiplier">2</set>
<set name="damage_multiplier">1</set>
<set name="ban_length">2</set>
</settings>
<settings name="level_1">
<set name="kill_multiplier">2</set>
<set name="damage_multiplier">1</set>
<set name="ban_length">2</set>
</settings>
<settings name="level_2">
<set name="kill_multiplier">1</set>
<set name="damage_multiplier">0.5</set>
<set name="ban_length">1</set>
</settings>
</configuration>
""")
self.p = TkPlugin(b3.console, self.conf)
self.p.onLoadConfig()
self.assertEqual(350, self.p._maxPoints)
self.assertEqual(2, self.p._maxLevel)
self.assertEqual(3, self.p._round_grace)
self.assertEqual('foo', self.p._issue_warning)
self.assertFalse(self.p._private_messages)
@unittest.skipUnless(os.path.exists(default_plugin_file), reason="cannot get default plugin config file at %s" % default_plugin_file)
class Test_Tk_default_config(B3TestCase):
def setUp(self):
super(Test_Tk_default_config, self).setUp()
b3.console.gameName = 'f00'
self.conf = XmlConfigParser()
self.conf.load(default_plugin_file)
self.p = TkPlugin(b3.console, self.conf)
self.p.onLoadConfig()
def test(self):
self.assertEqual("sfire", self.p._issue_warning)
self.assertEqual(7, self.p._round_grace)
self.assertEqual(40, self.p._maxLevel)
self.assertEqual(400, self.p._maxPoints)
self.assertEqual({
0: (2.0, 1.0, 2),
1: (2.0, 1.0, 2),
2: (1.0, 0.5, 1),
20: (1.0, 0.5, 0),
40: (0.75, 0.5, 0)
}, self.p._levels)
self.assertTrue(self.p._private_messages)
class Test_TkInfo(unittest.TestCase):
def setUp(self):
self.my_cid = 1
self.mock_plugin = Mock(name="plugin", spec=TkPlugin)
self.info = TkInfo(self.mock_plugin, self.my_cid)
def test_construct(self):
self.assertIsNone(self.info.lastAttacker)
self.assertEqual({}, self.info.attackers)
self.assertEqual({}, self.info.attacked)
self.assertEqual(0, self.info.points)
def test_damage(self):
self.assertNotIn(2, self.info._attacked)
self.info.damage(cid=2, points=5)
self.assertTrue(self.info._attacked[2])
def test_damaged(self):
cidA = 3
self.assertNotIn(cidA, self.info._attackers)
self.info.damaged(cidA, points=15)
self.assertEqual(15, self.info._attackers[cidA])
self.info.damaged(cidA, points=5)
self.assertEqual(20, self.info._attackers[cidA])
cidB = 2
self.info.damaged(cidB, points=7)
self.assertEqual(20, self.info._attackers[cidA])
self.assertEqual(7, self.info._attackers[cidB])
def test_grudge(self):
cid = 4
self.assertNotIn(cid, self.info._grudged)
self.assertFalse(self.info.isGrudged(cid))
self.info.grudge(cid=cid)
self.assertIn(cid, self.info._grudged)
self.assertTrue(self.info.isGrudged(cid))
def test_getAttackerPoints(self):
cidA = 2
s = sentinel
self.info._attackers[cidA] = s
self.assertEqual(s, self.info.getAttackerPoints(cidA))
cidB = 3
self.assertEqual(0, self.info.getAttackerPoints(cidB))
def test_points(self):
self.assertEqual(0, self.info.points)
cid2 = 2
cid3 = 3
infos = {
cid2: TkInfo(self.mock_plugin, cid2),
cid3: TkInfo(self.mock_plugin, cid3)
}
self.mock_plugin.console.clients.getByCID = Mock(side_effect=lambda cid:cid)
self.mock_plugin.getClientTkInfo = Mock(side_effect=lambda cid:infos[cid])
points_2 = 45
self.info.damage(cid2, points_2)
infos[cid2].damaged(self.my_cid, points_2)
self.assertEqual(points_2, self.info.points)
points_3 = 21
self.info.damage(cid3, points_3)
infos[cid3].damaged(self.my_cid, points_3)
self.assertEqual(points_2 + points_3, self.info.points)
def test_lastAttacker(self):
self.assertIsNone(self.info.lastAttacker)
cid2 = 2
self.info.damaged(cid2, 32)
self.assertEqual(cid2, self.info.lastAttacker)
def test_forgive(self):
cid2 = 2
cid3 = 3
self.info.damaged(cid2, 75)
self.info.damaged(cid3, 47)
self.assertEqual(75, self.info.getAttackerPoints(cid2))
self.assertEqual(47, self.info.getAttackerPoints(cid3))
self.info.forgive(cid2)
self.assertEqual(0, self.info.getAttackerPoints(cid2))
self.assertEqual(47, self.info.getAttackerPoints(cid3))
def test_forgive_last_attacker(self):
cid2 = 2
cid3 = 3
self.info.damaged(cid2, 75)
self.info.damaged(cid3, 47)
self.assertEqual(75, self.info.getAttackerPoints(cid2))
self.assertEqual(47, self.info.getAttackerPoints(cid3))
self.assertEqual(cid3, self.info.lastAttacker)
self.info.forgive(cid3)
self.assertEqual(75, self.info.getAttackerPoints(cid2))
self.assertEqual(0, self.info.getAttackerPoints(cid3))
self.assertNotEqual(cid3, self.info.lastAttacker)
def test_forgiven(self):
self.mock_plugin.console = Mock()
cid2 = 2
self.info._attacked[cid2] = True
self.info._warnings[cid2] = mock_warn = Mock()
self.info.forgiven(cid2)
self.assertNotIn(cid2, self.info._attacked)
self.assertEqual(1, mock_warn.inactive)
mock_warn.save.assert_called_once_with(self.mock_plugin.console)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
918d35988abeede9ff21191733d53fc0101d01a2 | cc734ab47096dfd38b8cb554ced88c0689c450e8 | /geraGrafico.py | d6fe5e35797531bdbc4ae8026746d5742448f779 | [] | no_license | Claudiocfls/ELE32-lab3 | efaa46bd73ead3e6030d3e231ce577b53a36027b | 366b34d5d66d0555451206697ce6116f363c60cf | refs/heads/master | 2020-04-01T18:50:44.395937 | 2018-11-28T15:23:37 | 2018-11-28T15:23:37 | 153,518,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import DadosGrafico
import Grafico
grafico = Grafico.Grafico()
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("hamming.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("3, 13, 15, 17.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("4, 25, 33, 37.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("6, 117, 127, 155.txt")
grafico.adicionaDados(dados1, "ro" )
dados1 = DadosGrafico.DadosGrafico()
dados1.dadosDeArquivo("semcodificacao.txt")
grafico.adicionaDados(dados1, "ro" )
grafico.mostraGrafico()
| [
"[email protected]"
] | |
2146a1abe1e044c23653b6572a99383187acf3c3 | 1de7512183974adfc5dbdd30b3bf7e042ea194d9 | /poetries_server_beta.py | b34d1625a90aa1b8b66c3050158c73b812ec84c9 | [] | no_license | articuly/Self-study | d06b0bdcead38282701e4d5118cefd8b83e80241 | 878ed68fc31dc1f5c2f13bcb5d98539264985c17 | refs/heads/master | 2023-05-13T18:34:33.904864 | 2023-05-09T11:03:29 | 2023-05-09T11:03:29 | 223,714,760 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | # coding:utf-8
import socket, time
from poetries import POUETRIES # 此处导入诗词库(poetries.py)
def poetries_server():
"""古诗词服务器"""
delay = 0.1 # 诗词显示速度(字间隔时间)
subjects = [item.split()[0] for item in POUETRIES] # 诗词目录
welcome = '欢迎来到风花雪月古诗词库, 请输入序号后回车以选择你喜欢的诗词\r\n'
welcome += '输入fast加速,输入slow减速,输入bye退出\r\n\r\n' # 输入quit或exit,退出并同时关闭诗词服务
for index, subject in enumerate(subjects):
welcome += '%d %s\r\n' % (index + 1, subject)
welcome += '\r\n'
welcome = welcome.encode('gbk')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 56789))
sock.listen(2)
runing = True
while runing:
c_sock, c_addr = sock.accept()
c_sock.sendall(welcome)
while True:
cmd = b''
while not cmd.endswith(b'\r\n'):
cmd += c_sock.recv(1024)
cmd = cmd.strip()
if cmd in [b'bye', b'quit', b'exit']:
c_sock.sendall('再见\r\n'.encode('gbk'))
c_sock.close()
runing = cmd == b'bye'
break
elif cmd == b'help':
c_sock.sendall(welcome)
elif cmd == b'fast':
delay /= 2
c_sock.sendall('加速设置已完成\r\n'.encode('gbk'))
c_sock.sendall('请选择诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
elif cmd == b'slow':
delay *= 2
c_sock.sendall('减速设置已完成\r\n'.encode('gbk'))
c_sock.sendall('请选择诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
else:
try:
index = int(cmd) - 1
assert -1 < index < len(POUETRIES)
except:
c_sock.sendall('请输入有效的诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
continue
c_sock.sendall(b'--------------------------\r\n')
for line in POUETRIES[index].split('\n'):
for word in line:
c_sock.sendall(word.encode('gbk'))
time.sleep(delay)
c_sock.sendall(b'\r\n')
c_sock.sendall(b'--------------------------\r\n')
c_sock.sendall('请选择诗词序号,输入help显示诗词目录:\r\n\r\n'.encode('gbk'))
if __name__ == '__main__':
poetries_server()
| [
"[email protected]"
] | |
767b0dd7cd5fd2f0d94d01ece471a26c8cedee83 | 8a699595e7f156b1ade42f6042900b3331831fbf | /src/transformers/models/rag/modeling_tf_rag.py | 30f50a29ff404da24f1c6aee56808ad35fbc856b | [
"Apache-2.0"
] | permissive | stas00/transformers | ab654371a387c5883fc882dd0286177875d6d3b4 | 7c5d79912a21880ce13d77881940458e90d98917 | refs/heads/master | 2023-02-16T00:22:41.298155 | 2022-04-08T20:55:42 | 2022-04-08T20:55:42 | 278,214,696 | 6 | 0 | Apache-2.0 | 2022-01-28T18:39:00 | 2020-07-08T23:24:49 | Python | UTF-8 | Python | false | false | 89,780 | py | # coding=utf-8
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFRAG model implementation."""
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
import tensorflow as tf
from ...configuration_utils import PretrainedConfig
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, shape_list, unpack_inputs
from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "RagConfig"
@dataclass
class TFRetrievAugLMMarginOutput(ModelOutput):
"""
Base class for retriever augmented marginalized models outputs.
Args:
loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
sequence_length, embed_size_per_head)`).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
doc_scores: Optional[tf.Tensor] = None
retrieved_doc_embeds: Optional[tf.Tensor] = None
retrieved_doc_ids: Optional[tf.Tensor] = None
context_input_ids: Optional[tf.Tensor] = None
context_attention_mask: Optional[tf.Tensor] = None
question_encoder_last_hidden_state: Optional[tf.Tensor] = None
question_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None
question_enc_attentions: Optional[Tuple[tf.Tensor]] = None
generator_enc_last_hidden_state: Optional[tf.Tensor] = None
generator_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None
generator_enc_attentions: Optional[Tuple[tf.Tensor]] = None
generator_dec_hidden_states: Optional[Tuple[tf.Tensor]] = None
generator_dec_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFRetrievAugLMOutput(ModelOutput):
"""
Args:
logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
sequence_length, embed_size_per_head)`).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
"""
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
doc_scores: Optional[tf.Tensor] = None
retrieved_doc_embeds: Optional[tf.Tensor] = None
retrieved_doc_ids: Optional[tf.Tensor] = None
context_input_ids: Optional[tf.Tensor] = None
context_attention_mask: Optional[tf.Tensor] = None
question_encoder_last_hidden_state: Optional[tf.Tensor] = None
question_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None
question_enc_attentions: Optional[Tuple[tf.Tensor]] = None
generator_enc_last_hidden_state: Optional[tf.Tensor] = None
generator_enc_hidden_states: Optional[Tuple[tf.Tensor]] = None
generator_enc_attentions: Optional[Tuple[tf.Tensor]] = None
generator_dec_hidden_states: Optional[Tuple[tf.Tensor]] = None
generator_dec_attentions: Optional[Tuple[tf.Tensor]] = None
class TFRagPreTrainedModel(TFPreTrainedModel):
r"""
RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
"""
config_class = RagConfig
base_model_prefix = "rag"
_keys_to_ignore_on_load_missing = [r"position_ids"]
@classmethod
def from_pretrained_question_encoder_generator(
cls,
question_encoder_pretrained_model_name_or_path: str = None,
generator_pretrained_model_name_or_path: str = None,
retriever: RagRetriever = None,
*model_args,
**kwargs
) -> TFPreTrainedModel:
r"""
Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
model checkpoints.
Params:
question_encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the question encoder. Can be either:
- A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
`bert-base-uncased`.
- A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
`dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
`question_encoder_from_pt` should be set to `True`.
generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the generator. Can be either:
- A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
`t5-small`.
- A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
`facebook/bart-base`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
`generator_from_pt` should be set to `True`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
retriever ([`RagRetriever`], *optional*):
The retriever to use.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the question_encoder configuration, use the prefix *question_encoder_* for each
configuration parameter.
- To update the generator configuration, use the prefix *generator_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import RagRetriever, TFRagModel
>>> # initialize a RAG from two pretrained models.
>>> model = TFRagModel.from_pretrained_question_encoder_generator(
... "facebook/dpr-question_encoder-single-nq-base", "t5-small"
... )
>>> # alternatively, initialize from pytorch pretrained models can also be done
>>> model = TFRagModel.from_pretrained_question_encoder_generator(
... "facebook/dpr-question_encoder-single-nq-base",
... "facebook/bart-base",
... generator_from_pt=True,
... question_encoder_from_pt=True,
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./rag")
>>> # load retriever
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
... )
>>> # load fine-tuned model with retriever
>>> model = TFRagModel.from_pretrained("./rag", retriever=retriever)
```"""
kwargs_question_encoder = {
argument[len("question_encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("question_encoder_")
}
kwargs_generator = {
argument[len("generator_") :]: value
for argument, value in kwargs.items()
if argument.startswith("generator_")
}
# remove question_encoder, generator kwargs from kwargs
for key in kwargs_question_encoder.keys():
del kwargs["question_encoder_" + key]
for key in kwargs_generator.keys():
del kwargs["generator_" + key]
# Load and initialize the question_encoder and generator
# The distinction between question_encoder and generator at the model level is made
# by the value of the flag `is_generator` that we need to set correctly.
question_encoder = kwargs_question_encoder.pop("model", None)
if question_encoder is None:
assert (
question_encoder_pretrained_model_name_or_path is not None
), "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to be defined"
from ..auto.modeling_tf_auto import TFAutoModel
if "config" not in kwargs_question_encoder:
from ..auto.configuration_auto import AutoConfig
question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path)
kwargs_question_encoder["config"] = question_encoder_config
question_encoder = TFAutoModel.from_pretrained(
question_encoder_pretrained_model_name_or_path,
name="question_encoder",
load_weight_prefix=cls.load_weight_prefix,
*model_args,
**kwargs_question_encoder,
)
generator = kwargs_generator.pop("generator", None)
if generator is None:
assert (
generator_pretrained_model_name_or_path is not None
), "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has to be defined"
from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
if "config" not in kwargs_generator:
from ..auto.configuration_auto import AutoConfig
generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path)
kwargs_generator["config"] = generator_config
generator = TFAutoModelForSeq2SeqLM.from_pretrained(
generator_pretrained_model_name_or_path,
name="generator",
load_weight_prefix=cls.load_weight_prefix,
**kwargs_generator,
)
# instantiate config with corresponding kwargs
config = kwargs.get("config", None)
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
RAG_START_DOCSTRING = r"""
RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator.
During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract
relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to
the generator.
The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be
any *seq2seq* model, preferably [`TFBartForConditionalGeneration`].
The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
*autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`]
as the `generator`.
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Tensorflow [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to
general usage and behavior.
The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in
SavedModel format.
Args:
config ([`RagConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
question_encoder ([`TFPreTrainedModel`]):
An encoder model compatible with the faiss index encapsulated by the `retriever`.
generator ([`TFPreTrainedModel`]):
A seq2seq model used as the generator in the RAG architecture.
retriever ([`RagRetriever`]):
A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
"""
RAG_FORWARD_INPUTS_DOCSTRING = r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
obtain the indices.
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*)
Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
*optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
generator's encoder.
Used by the ([`TFRagModel`]) model during decoding.
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
past_key_values (`tuple(tuple(tf.Tensor))`):
Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
`past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
in the ([`RagTokenForGeneration`]) model during decoding.
doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
has to be provided to the forward pass. `doc_scores` can be computed via
`question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask
(`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when
*output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question
encoder `input_ids` by the retriever.
If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the
forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
output_retrieved(`bool`, *optional*):
Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask`. See returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple.
n_docs (`int`, *optional*, defaults to `config.n_docs``)
Number of documents to retrieve and/or number of documents for which to generate an answer.
"""
@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
class TFRagModel(TFRagPreTrainedModel):
load_weight_prefix = "tf_rag_model_1"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
question_encoder: Optional[TFPreTrainedModel] = None,
generator: Optional[TFPreTrainedModel] = None,
retriever: Optional = None,
load_weight_prefix: Optional[str] = None,
**kwargs,
):
assert config is not None or (
question_encoder is not None and generator is not None
), "Either a configuration or an question_encoder and a generator has to be provided."
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
else:
assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
super().__init__(config, **kwargs)
if question_encoder is None:
from ..auto.modeling_tf_auto import TFAutoModel
question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder")
if generator is None:
from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix
generator = TFAutoModelForSeq2SeqLM.from_config(
config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator"
)
self.retriever = retriever
if self.retriever is not None:
assert isinstance(
retriever, RagRetriever
), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
self.retriever = retriever
self.question_encoder = question_encoder
self.generator = generator
def set_retriever(self, retriever: RagRetriever):
self.retriever = retriever
@unpack_inputs
@add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
past_key_values=None,
doc_scores=None,
context_input_ids=None,
context_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
n_docs=None,
return_dict=None,
training=False,
**kwargs
):
r"""
Returns:
Example:
```python
>>> from transformers import RagTokenizer, RagRetriever, TFRagModel
>>> import torch
>>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True)
>>> input_dict = tokenizer.prepare_seq2seq_batch(
... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
... )
>>> input_ids = input_dict["input_ids"]
>>> outputs = model(input_ids)
```"""
assert (
"decoder_cached_states" not in kwargs
), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
# aliasing to minimize code changing
n_docs = n_docs if n_docs is not None else self.config.n_docs
# whether retriever has to be used
has_to_retrieve = (
self.retriever is not None
and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
and encoder_outputs is None
)
# encoder_outputs are pre-computed during RAG-token generation
if encoder_outputs is None:
if has_to_retrieve:
question_enc_outputs = self.question_encoder(
input_ids, attention_mask=attention_mask, return_dict=True, training=training
)
# see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91
question_encoder_last_hidden_state = question_enc_outputs[
0
] # hidden states of question encoder => pooler_output
retriever_outputs = self.retriever(
input_ids,
question_encoder_last_hidden_state.numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="tf",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
retriever_outputs["context_input_ids"],
retriever_outputs["context_attention_mask"],
retriever_outputs["retrieved_doc_embeds"],
retriever_outputs["doc_ids"],
)
context_input_ids = tf.cast(context_input_ids, tf.int32)
context_attention_mask = tf.cast(context_attention_mask, tf.int32)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(
tf.expand_dims(question_encoder_last_hidden_state, axis=1),
retrieved_doc_embeds,
transpose_b=True,
),
axis=1,
)
else:
assert (
context_input_ids is not None
), "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
context_attention_mask is not None
), "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
assert (
doc_scores.shape[1] % n_docs
) == 0, f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}."
# Decoder input without context documents
if decoder_input_ids is not None:
decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0)
if decoder_attention_mask is not None:
decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0)
gen_outputs = self.generator(
context_input_ids,
attention_mask=context_attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
return_dict=True,
training=training,
)
if not has_to_retrieve:
question_encoder_last_hidden_state = None
question_enc_hidden_states = None
question_enc_attentions = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
else:
question_enc_hidden_states = question_enc_outputs.hidden_states
question_enc_attentions = question_enc_outputs.attentions
if not has_to_retrieve or not output_retrieved:
# don't output retrieved docs
context_input_ids = (None,)
context_attention_mask = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
return TFRetrievAugLMOutput(
logits=gen_outputs.logits,
doc_scores=doc_scores,
past_key_values=gen_outputs.past_key_values,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
retrieved_doc_embeds=retrieved_doc_embeds,
retrieved_doc_ids=retrieved_doc_ids,
question_encoder_last_hidden_state=question_encoder_last_hidden_state,
question_enc_hidden_states=question_enc_hidden_states,
question_enc_attentions=question_enc_attentions,
generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
generator_enc_attentions=gen_outputs.encoder_attentions,
generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
generator_dec_attentions=gen_outputs.decoder_attentions,
)
@add_start_docstrings_to_model_forward(
"""
A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
""",
RAG_START_DOCSTRING,
)
class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
load_weight_prefix = "tf_rag_token_for_generation_1/rag"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
question_encoder: Optional[TFPreTrainedModel] = None,
generator: Optional[TFPreTrainedModel] = None,
retriever: Optional = None,
**kwargs,
):
assert config is not None or (
question_encoder is not None and generator is not None
), "Either a configuration or an encoder and a generator has to be provided."
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
super().__init__(config)
# instantiate model
self.rag = TFRagModel(
config=config,
question_encoder=question_encoder,
generator=generator,
retriever=retriever,
load_weight_prefix=self.load_weight_prefix,
name="rag",
)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
# Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
use_cache=None,
encoder_outputs=None,
doc_scores=None,
n_docs=None,
**kwargs
):
if past is not None:
# if past is defined use only last decoder_input_ids
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None,
"encoder_outputs": encoder_outputs,
"doc_scores": doc_scores,
"context_attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"past_key_values": past,
"use_cache": use_cache,
"do_marginalize": True,
"n_docs": n_docs,
}
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@staticmethod
def _reorder_cache(past, beam_idx):
"""Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
def _reorder_stacked(hidden_states, new_order):
n_docs = hidden_states.shape[0] // new_order.shape[0]
hidden_states = tf.reshape(hidden_states, (-1, n_docs, *hidden_states.shape[1:]))
hidden_states = tf.gather(hidden_states, new_order, axis=0)
result = tf.reshape(hidden_states, (-1, *hidden_states.shape[2:]))
return result
reordered_past = ()
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
reordered_past += (tuple(_reorder_stacked(past_state, beam_idx) for past_state in layer_past),)
return reordered_past
def marginalize(self, seq_logits, doc_scores, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
# RAG-token marginalization
seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]])
doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice
log_prob_sum = seq_logprobs + doc_logprobs
return tf.reduce_logsumexp(log_prob_sum, axis=1)
@unpack_inputs
@add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
doc_scores=None,
context_input_ids=None,
context_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
n_docs=None,
do_marginalize=None,
labels=None,
reduce_loss=None,
return_dict=None,
training=False,
**kwargs # needs kwargs for generation
):
r"""
do_marginalize (`bool`, *optional*):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the cross entropy classification loss according to Rag-Token model formulation See
https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be
in `[0, ..., config.vocab_size - 1]`.
reduce_loss (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
operation.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Legacy dictionary, which is required so that model can use *generate()* function.
Returns:
Example:
```python
>>> import tensorflow as tf
>>> from transformers import RagTokenizer, RagRetriever, TFRagTokenForGeneration
>>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True)
>>> input_dict = tokenizer.prepare_seq2seq_batch(
... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
... )
>>> outputs = model(input_dict, output_retrieved=True)
>>> # or use retriever separately
>>> # 1. Encode
>>> input_ids = input_dict["input_ids"]
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
>>> doc_scores = tf.squeeze(
... tf.matmul(
... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
... ),
... axis=1,
... )
>>> # 3. Forward to generator
>>> outputs = model(
... inputs=None,
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... decoder_input_ids=input_dict["labels"],
... )
>>> # or directly generate
>>> generated = model.generate(
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... )
>>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
```"""
assert (
"decoder_cached_states" not in kwargs
), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize
reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(
input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
training=training,
)
loss = None
logits = outputs.logits
if labels is not None:
assert decoder_input_ids is not None
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
labels,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
n_docs=n_docs,
)
if do_marginalize:
logits = self.marginalize(logits, outputs.doc_scores, n_docs)
return TFRetrievAugLMMarginOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
doc_scores=outputs.doc_scores,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
)
def generate(
self,
input_ids: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
max_length=None,
min_length=None,
early_stopping=None,
use_cache=None,
num_beams=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
bad_words_ids=None,
num_return_sequences=None,
decoder_start_token_id=None,
n_docs=None,
output_scores=None,
output_attentions=None,
output_hidden_states=None,
return_dict_in_generate=None,
**model_kwargs
):
"""
Implements TFRAG token decoding.
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
The sequence used as a prompt for the generation. If `input_ids` is not passed, then
`context_input_ids` has to be provided.
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
min_length (`int`, *optional*, defaults to 10):
The minimum length of the sequence to be generated.
early_stopping (`bool`, *optional*, defaults to `False`):
Whether or not to stop the beam search when at least `num_beams` sentences are finished per batch or
not.
use_cache: (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
bos_token_id (`int`, *optional*):
The id of the *beginning-of-sequence* token.
eos_token_id (`int`, *optional*):
The id of the *end-of-sequence* token.
length_penalty (`float`, *optional*, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (`int`, *optional*, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(`List[int]`, *optional*):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search. 1 means no beam search.
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the `generator`'s `[`~generation_utils.GenerationMixin.generate`] function,
where we set `num_return_sequences` to `num_beams`. decoder_start_token_id (`int`, *optional*): If an
encoder-decoder model starts decoding with a different token than *bos*, the id of that token.
n_docs (`int`, *optional*, defaults to `config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_specific_kwargs:
Additional model specific kwargs will be forwarded to the `forward` function of the model.
Return:
`tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
due to the `eos_token_id`.
"""
# set default parameters
n_docs = n_docs if n_docs is not None else self.config.n_docs
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
bos_token_id = bos_token_id if bos_token_id is not None else self.config.generator.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.generator.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.generator.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.config.generator.decoder_start_token_id
)
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
model_kwargs["output_scores"] = output_scores
model_kwargs["output_attentions"] = output_attentions
model_kwargs["output_hidden_states"] = output_hidden_states
model_kwargs["encoder_attentions"] = None
model_kwargs["encoder_hidden_states"] = None
# retrieve docs
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = self.retriever(
input_ids,
question_hidden_states.numpy().astype(np.float32),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="tf",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
context_input_ids = tf.cast(context_input_ids, tf.int32)
context_attention_mask = tf.cast(context_attention_mask, tf.int32)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.matmul(
tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True
)
doc_scores = tf.squeeze(doc_scores, axis=1)
assert (
context_input_ids.shape[0] % n_docs
) == 0, f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}."
batch_size = context_input_ids.shape[0] // n_docs
encoder = self.rag.generator.get_encoder()
encoder_outputs = encoder(
input_ids=context_input_ids,
attention_mask=context_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
decoder_input_ids = tf.fill(
(batch_size * num_beams, 1),
tf.cast(decoder_start_token_id, tf.int32),
)
last_hidden_state = encoder_outputs["last_hidden_state"]
def extend_enc_output(tensor, num_beams=None):
"""
Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs ,
d) Output: tensor of shape (batch_size*num_beams*n_docs , d)
"""
# expand batch_size & num_beam dimensions
d_shape_list = tensor.shape[1:]
# split n_docs dimensions
new_shape = (batch_size, 1, n_docs) + d_shape_list
tensor = tf.reshape(tensor, new_shape)
# repeat same last hidden states over `num_beams` dimension
new_shape = (batch_size, num_beams, n_docs) + d_shape_list
tensor = tf.broadcast_to(tensor, new_shape)
# merge `batch_size`, `num_beams`, `num_docs` dims again
new_shape = (batch_size * num_beams * n_docs,) + d_shape_list
return tf.reshape(tensor, new_shape)
# correctly extend last_hidden_state and attention mask
context_attention_mask = extend_enc_output(context_attention_mask, num_beams=num_beams)
encoder_outputs["last_hidden_state"] = extend_enc_output(last_hidden_state, num_beams=num_beams)
doc_scores = tf.repeat(doc_scores, num_beams, axis=0)
# define start_len & additional parameters
cur_len = 1
vocab_size = self.config.generator.vocab_size
model_kwargs["doc_scores"] = doc_scores
model_kwargs["encoder_outputs"] = encoder_outputs
model_kwargs["n_docs"] = n_docs
# not needed. TODO(PVP): change after generate refactor
do_sample = False
temperature = self.config.temperature
top_k = self.config.top_k
top_p = self.config.top_p
repetition_penalty = self.config.repetition_penalty
if num_beams > 1:
return self._generate_beam_search(
decoder_input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
attention_mask=context_attention_mask,
use_cache=use_cache,
forced_bos_token_id=None,
forced_eos_token_id=None,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs, # encoder_outputs is here as in Pytorch's version
)
else:
pre_processor = self._get_logits_processor(
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
forced_bos_token_id=None,
forced_eos_token_id=None,
)
model_kwargs["attention_mask"] = context_attention_mask
if model_kwargs.get("encoder_attentions", None) is None:
model_kwargs.pop("encoder_attentions", None)
if model_kwargs.get("encoder_hidden_states", None) is None:
model_kwargs.pop("encoder_hidden_states", None)
model_kwargs.pop("output_hidden_states", None)
model_kwargs.pop("output_attentions", None)
model_kwargs.pop("output_scores", None)
return self.greedy_search(
input_ids=decoder_input_ids,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
logits_processor=pre_processor,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs,
)
def get_input_embeddings(self):
return self.rag.generator.get_input_embeddings()
def get_output_embeddings(self):
return self.rag.generator.get_output_embeddings()
# Adapted from tf_t5's & tf_bart's _shift_right
def shift_tokens_right(self, input_ids, start_token_id=None):
"""Shift input ids one token to the right, and pad with start_token_id"""
if start_token_id is None:
start_token_id = self.generator.config.decoder_start_token_id
assert (
start_token_id is not None
), "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as generator, see Bart docs for more information"
pad_token_id = self.generator.config.pad_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
shifted_input_ids = tf.cast(input_ids, tf.int32)
start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), start_token_id)
shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, tf.int32))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# nll stands for 'negative log likelihood'
def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
# shift tokens left (from original Pytorch's version)
target = tf.concat([target[:, 1:], tf.fill([target.shape[0], 1], self.config.generator.pad_token_id)], axis=1)
rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss)
return loss
# Adopted modeling_tf_bart + add smooth_loss to match with pytorch version
def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False):
"""CrossEntropyLoss that ignores pad tokens"""
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.SUM,
)
if from_logits is False: # convert to logits
eps = 1e-9
y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps)
y_pred = tf.math.log(y_pred)
logits = y_pred
melted_labels = tf.reshape(labels, (-1,))
active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss)
labels = tf.boolean_mask(melted_labels, active_loss)
nll_loss = loss_fn(labels, reduced_logits)
smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1)
smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch
eps_i = smooth_epsilon / reduced_logits.shape[-1]
loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss
return loss
@add_start_docstrings_to_model_forward(
"""
A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
""",
RAG_START_DOCSTRING,
)
class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
load_weight_prefix = "tf_rag_sequence_for_generation_1/rag"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
question_encoder: Optional[TFPreTrainedModel] = None,
generator: Optional[TFPreTrainedModel] = None,
retriever: Optional = None,
**kwargs,
):
assert config is not None or (
question_encoder is not None and generator is not None
), "Either a configuration or an encoder and a generator has to be provided."
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
super().__init__(config)
# instantiate model
self.rag = TFRagModel(
config=config,
question_encoder=question_encoder,
generator=generator,
retriever=retriever,
load_weight_prefix=self.load_weight_prefix,
name="rag",
)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@unpack_inputs
@add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
doc_scores=None,
context_input_ids=None,
context_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
n_docs=None,
exclude_bos_score=None,
labels=None,
reduce_loss=None,
return_dict=None,
training=False,
**kwargs # needs kwargs for generation
):
r"""
exclude_bos_score (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
the loss.
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See
https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should
be in `[0, ..., config.vocab_size - 1]`.
reduce_loss (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
operation.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Legacy dictionary, which is required so that model can use *generate()* function.
Returns:
Example:
```python
>>> from transformers import RagTokenizer, RagRetriever, TFRagSequenceForGeneration
>>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = TFRagSequenceForGeneration.from_pretrained(
... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True
... )
>>> input_dict = tokenizer.prepare_seq2seq_batch(
... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
... )
>>> outputs = model(input_dict, output_retrieved=True)
>>> # or use retriever separately
>>> # 1. Encode
>>> input_ids = input_dict["input_ids"]
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
>>> doc_scores = tf.squeeze(
... tf.matmul(
... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
... ),
... axis=1,
... )
>>> # 3. Forward to generator
>>> outputs = model(
... inputs=None,
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... decoder_input_ids=input_dict["labels"],
... )
>>> # or directly generate
>>> generated = model.generate(
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... )
>>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
```"""
assert (
"decoder_cached_states" not in kwargs
), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score
reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(
input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
training=training,
)
loss = None
if labels is not None:
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
labels,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
n_docs=n_docs,
)
return TFRetrievAugLMMarginOutput(
loss=loss,
logits=outputs.logits,
doc_scores=outputs.doc_scores,
past_key_values=outputs.past_key_values,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
)
def get_nll(
self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
):
# shift tokens left
target = tf.concat([target[:, 1:], tf.fill([target.shape[0], 1], self.config.generator.pad_token_id)], axis=1)
# bos_token_id is None for T5
bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
n_docs = n_docs if n_docs is not None else self.config.n_docs
equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id))
use_bos = bos_token_id is not None and equal_bos_token_id_all
def _mask_pads(ll, smooth_obj):
pad_mask = tf.equal(target, self.config.generator.pad_token_id)
if tf.reduce_any(pad_mask):
ll = tf.where(pad_mask, 0.0, ll)
smooth_obj = tf.where(pad_mask, 0.0, smooth_obj)
return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1)
# seq_logits.shape = (batch*n_docs, tgt_len , vocabs)
seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
seq_logprobs = tf.reshape(
seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1])
) # (batch_size, n_docs, tgt_len, vocabs)
doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D
# RAG-sequence marginalization
first_token_scores = seq_logprobs[:, :, :1, :]
second_token_scores = seq_logprobs[:, :, 1:2, :]
remainder = seq_logprobs[:, :, 2:, :]
rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2)
# calculate loss
target = tf.expand_dims(target, axis=1) # n_docs dimension
target = tf.expand_dims(target, axis=-1) # logits dimension
target = tf.repeat(target, n_docs, axis=1)
assert len(target.shape) == len(rag_logprobs.shape)
# last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering
def torch_gather(param, id_tensor):
# 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather
def gather2d(target, id_tensor):
idx = tf.stack([tf.range(tf.shape(id_tensor)[0]), id_tensor[:, 0]], axis=-1)
result = tf.gather_nd(target, idx)
return tf.expand_dims(result, axis=-1)
target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D
target_shape = id_tensor.shape
id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index
result = gather2d(target, id_tensor)
return tf.reshape(result, target_shape)
ll = torch_gather(rag_logprobs, id_tensor=target)
smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
# sum over tokens, exclude bos while scoring
if exclude_bos_score and use_bos:
ll = tf.reduce_sum(ll[:, :, 1:], axis=2)
else:
ll = tf.reduce_sum(ll, axis=2)
smooth_obj = tf.reduce_sum(smooth_obj, axis=2)
ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs
smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = tf.reduce_sum(nll_loss)
smooth_loss = tf.reduce_sum(smooth_loss)
eps_i = epsilon / rag_logprobs.shape[-1]
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
def generate(
self,
input_ids: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
do_deduplication=None, # defaults to True
num_return_sequences=None, # defaults to 1
num_beams=None, # defaults to 1
n_docs=None,
**model_kwargs
):
"""
Implements RAG sequence "thorough" decoding. Read the [`~generation_utils.GenerationMixin.generate`]`
documentation for more information on how to set other generate input parameters
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
The sequence used as a prompt for the generation. If `input_ids` is not passed, then
`context_input_ids` has to be provided.
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for
tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention
masks?](../glossary#attention-mask)
context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
retriever.
context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given,
`context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are
returned by [`~RagRetriever.__call__`].
doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or
`input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are
returned by [`~RagRetriever.__call__`].
do_deduplication (`bool`, *optional*):
Whether or not to deduplicate the generations from different context documents for a given input. Has
to be set to `False` if used while training with distributed backend.
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the `generator`'s `[`~generation_utils.GenerationMixin.generate`]`
function, where we set `num_return_sequences` to `num_beams`.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search. 1 means no beam search.
n_docs (`int`, *optional*, defaults to `config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
kwargs:
Additional kwargs will be passed to [`~generation_utils.GenerationMixin.generate`]
Return:
`tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early
due to the `eos_token_id`.
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
num_doc_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
num_beams = num_beams if num_beams is not None else self.config.num_beams
assert (
input_ids is not None or context_input_ids is not None
), " At least one of input_ids or context_input_ids must be given"
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
context_input_ids = self.retriever(
input_ids,
question_hidden_states.numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="tf",
)["context_input_ids"]
hypos = []
model_kwargs["num_beams"] = num_beams
model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences
model_kwargs["attention_mask"] = None
batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
for index in range(batch_size):
# first, generate beams from documents:
generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
output_sequences = self.generator.generate(
generator_input_ids,
**model_kwargs,
) # n_docs * n_beam, tgt_len
if do_deduplication:
# do_deduplication -- for TF, work on Eager mode only!
output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values()))
num_candidates = output_sequences.shape[
0
] # after deduplication, this number can be less than n_docs*n_beam
# then, run model forwards to get nll scores:
if input_ids is not None:
new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1))
outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
else: # input_ids is None, need context_input_ids/mask and doc_scores
assert (
context_attention_mask is not None
), "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
individual_input_ids = tf.tile(
generator_input_ids, (num_candidates, 1)
) # (num_candidates*n_docs, max_len)
individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1))
individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs]
outputs = self(
input_ids=None,
context_input_ids=individual_input_ids,
context_attention_mask=individual_attention_mask,
doc_scores=individual_doc_scores,
labels=output_sequences,
exclude_bos_score=True,
)
top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1]
# add hypothesis
hypos.append(tf.gather(output_sequences, top_cand_inds))
return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
@staticmethod
def _cat_and_pad(tensors, pad_token_id):
# used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch
# Initialize padded tensor with shape ( all_candidates , max_candidate_length ),
# where all_candidates counted from all inputs
new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])
output = tf.fill(new_shape, pad_token_id)
# Normal tensor doesn't support slice assignment, so we need tf.Variable
output = tf.Variable(output)
# Assign, and then convert back to tensor
ind = 0
for t in tensors:
output[ind : ind + t.shape[0], : t.shape[1]].assign(t)
ind += t.shape[0]
output = tf.convert_to_tensor(output)
return tf.cast(output, tensors[0][0][0].dtype)
| [
"[email protected]"
] | |
73bee41645a2a5d29d2307248dc9dd4042c9cb15 | 9c315e3762961668a1fe58ad811ae87c5fbf7539 | /apertium-tools/scrapers-misc/bibleScraper-ibt.py | 5e2698746ef1e1f835a59b51ad687209b7d5c5ee | [] | no_license | frankier/apertium | f2b893115c413203b1194e5c0d4feb0adf2b1b3e | d3f5515bf2455f3046314a62ea564457bcf504b8 | refs/heads/gnulib | 2021-01-20T21:00:53.139135 | 2016-05-27T17:30:01 | 2016-05-27T17:30:01 | 59,847,975 | 0 | 1 | null | 2016-07-07T12:39:01 | 2016-05-27T16:21:14 | HTML | UTF-8 | Python | false | false | 6,867 | py | #!/usr/bin/env python3
# WARNING
# ONLY USE THIS SCRIPT WITH PERMESSION FROM ibt.org.ru ADMINISTRATORS
# UNAUTHORIZED ACCESS OF ibt.org.ru IS ILLEAGL IN MOST COUNTRIES!!!
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from bs4 import BeautifulSoup
import urllib.request, re, time, argparse, sys, os
import romanclass as roman
if sys.version_info < (3, 3, 0): fileError = IOError
else: fileError = FileNotFoundError
parser = argparse.ArgumentParser(description = 'Scrape ibt.org')
parser.add_argument('-l', action = 'store', nargs = '*', help = 'Scrape the bibles with these codes')
parser.add_argument('-x', action = 'store', nargs = '*', help = 'Skip scraping certain book(s); OT to get just New Testament')
parser.add_argument('-a', action = 'store_const', const = 2, help = 'List all the valid language codes')
parser.add_argument('-s', action = 'store_const', const = 2, help = 'Parse titles within each chapter')
parser.add_argument('-q', action = 'store_false', help = 'Suppress progress messages')
parser.add_argument('-u', action = 'store_true', help = 'Add to file, don\'t overwrite')
args = parser.parse_args()
urls = args.l
if args.x:
toSkip = args.x
OT = ['Genesis', 'Exodus', 'Leviticus', 'Numbers', 'Deuteronomy', 'Joshua', 'Judges', 'Ruth', '1 Samuel', '2 Samuel', '1 Kings', '2 Kings', '1 Chronicles', '2 Chronicles', 'Ezra', 'Nehemiah', 'Esther', 'Job', 'Psalms', 'Proverbs', 'Ecclesiastes', 'Song of Songs', 'Isaiah', 'Jeremiah', 'Lamentations', 'Ezekiel', 'Daniel', 'Hosea', 'Joel', 'Amos', 'Obadiah', 'Jonah', 'Micah', 'Nahum', 'Habakkuk', 'Zechariah', 'Zephaniah', 'Haggai', 'Malachi']
if "OT" in args.x:
toSkip = OT
else:
toSkip = []
def firstPage(url):
results = re.search('m=(.*)', url)
filename = results.group(1) + ".out"
prefix = url.split('l=')[0]
text = urllib.request.urlopen(url)
soup = BeautifulSoup(text)
selbook = soup.find('select', {'id':'selbook'})
books = [(option['value'], option.text) for option in selbook.find_all('option')]
if args.u:
mode = 'a'
else:
mode = 'w'
with open(filename, mode, encoding = 'utf-8') as outfile:
if not os.path.isdir('.cache'): os.mkdir('.cache')
for urlB, fullB in books:
print(fullB, end='')
if fullB in toSkip:
print(" [skipping]")
else:
sys.stdout.flush()
firstUrl = prefix + '&l=' + urlB
#print(firstUrl)
soup = BeautifulSoup(urllib.request.urlopen(firstUrl).read())
selchap = soup.find('select', {'id':'selchap'})
chap = [(option['value'], option.text) for option in selchap.find_all('option')]
print(": ", end='')
for urlC, fullC in chap:
outfile.write(fullB + ' ' + str(roman.Roman(urlC)) + '\n')
print(fullC, end='')
sys.stdout.flush()
u = 'http://ibt.org.ru/en/text.htm?m=' + results.group(1) + '&l=' + urlB + '.' + str(urlC) + '&g=0'
s = allPages(u, results.group(1))
print(".", end='')
sys.stdout.flush()
outfile.write(s + '\n')
print(" ", end='')
sys.stdout.flush()
print()
def allPages(url, bible):
urlparts = url.split('?')
filepath = os.path.join(os.path.curdir, '.cache', urlparts[1]+'.html')
try:
with open(filepath, encoding = 'utf-8') as infile:
text = infile.read()
except fileError:
text = urllib.request.urlopen(url).read().decode('utf-8')
#print("Downloaded")
with open(filepath, 'w', encoding = 'utf-8') as outfile:
outfile.write(text)
time.sleep(0.5)
soup = BeautifulSoup(text)
flowcolumn = soup.find('div', {'id':'flowcolumn'})
s = ''
i = 1
for verse in flowcolumn.find_all('span', {'class':'cs-' + bible}):
if verse.sup != None:
verse.sup.clear()
#print verse['id']
#print verse.text.encode('utf-8')
if verse.previous_sibling != None:
try:
if verse.previous_sibling.name == 'div' and args.s == 2:
s += verse.previous_sibling.text.strip() + '\n'
except AttributeError:
# Was a string/skip
pass
s += str(i)+ '. ' + verse.text.strip().strip() + '\n'
i += 1
return s
CODES = { 'ADG' : 'Adygei',
'AGL' : 'Agul',
'AVR' : 'Avar',
'CHV' : 'Chuvash',
'CRT' : 'Crimean Tatar',
'KHK' : 'Khakas',
'XKS' : 'Khakas',
'KJV' : 'English',
'WEB' : 'English',
'KUMYK' : 'Kumyk',
'KYLSC' : 'Kyrgyz',
'KYROHC': 'Kyrgyz',
'KYLSA' : 'Kyrgyz Arabic',
'KYROHA': 'Kyrgyz Arabic',
'OSS' : 'Ossetic',
'TTR' : 'Tatar',
'TKL' : 'Turkmen',
'TKLI' : 'Turkmen',
'TKCI' : 'Turkmen Cyrillic',
'TYV' : 'Tuvan',
'TVN' : 'Tuvan',
'RSP' : 'Russian',
'UZVL' : 'Uzbek',
'UZIBTL': 'Uzbek',
'UZV' : 'Uzbek Cyrillic',
'UZIBT' : 'Uzbek Cyrillic',
'LXX' : 'Greek',
'TR' : 'Greek',
'OSMHB' : 'Hebrew',
'KRK' : 'Qaraqalpaq Latin',
'KRKL' : 'Qaraqalpaq Cyrillic',
'SHR' : 'Shor',
'BUR' : 'Buryat',
}
if __name__ == '__main__':
if args.a == 2:
for x in sorted(CODES):
print(x, '\t', CODES[x])
elif urls != None:
for url in urls:
url = url.upper()
if url not in CODES:
print(url, 'is not a valid code. It will be skipped.')
else:
print('Will begin scraping', url)
firstPage('http://ibt.org.ru/en/text.htm?m=' + url)
else:
parser.parse_args(['-h'])
print('No argument selected.')
| [
"unhammer@72bbbca6-d526-0410-a7d9-f06f51895060"
] | unhammer@72bbbca6-d526-0410-a7d9-f06f51895060 |
28eb4f066f9dbe9f85d53858545bd15c3df79d6b | 6f1d57238f3b395b04696a16768bcc507f00630c | /A_GCD_Sum.py | 3a137dfc4dc2318e724874ceedd601f7481d3f84 | [] | no_license | FazleRabbbiferdaus172/Codeforces_Atcoder_Lightoj_Spoj | 024a4a2a627de02e4698709d6ab86179b8301287 | 6465e693337777e7bd78ef473b4d270ce757a3a2 | refs/heads/master | 2023-07-01T06:32:14.775294 | 2021-07-27T17:07:37 | 2021-07-27T17:07:37 | 271,202,781 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | import math
def digi_sum(n):
ds = 0
for i in str(n):
ds += ord(i) - 48
return ds
for _ in range(int(input())):
n = int(input())
while math.gcd(n, digi_sum(n)) == 1:
n += 1
print(n)
| [
"[email protected]"
] | |
310fa56760bac3e6cdd5a4d0331475d6fa83a2ef | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/RIST/API/Deprecated/F172/Regression_Data.py | d6a5825dd029264612a58aa0b8b5ac7f4264621a | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | admin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}
enclosure_name = "CN754406XL"
drive_enclosure_name = "CN754406XL, bay 1"
expected_number_of_DE = 1
expected_number_of_drives = 8
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.