blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
afde146aa9ee1b9e20a0a626accf7c3bfa4aa5fe | fe7c897aa034d73281224e38dc5fdbe7a360d8e0 | /cs459_2/week06_orm/myproject/myapp/models.py | 6d3b19cb8cdabc0780e4700d8975fcff2d759301 | [
"BSD-3-Clause"
] | permissive | wasit7/2020 | 487592fa3b8d0351dfcf432207bfce8ded0db996 | 5fe6d41c1a5957bf240f9094d9b7c0f108835142 | refs/heads/master | 2023-05-15T00:47:52.309036 | 2021-04-30T04:46:08 | 2021-04-30T04:46:08 | 235,760,846 | 0 | 3 | BSD-3-Clause | 2022-06-22T01:39:10 | 2020-01-23T09:19:44 | Jupyter Notebook | UTF-8 | Python | false | false | 997 | py | from django.db import models
# Create your models here.
class Bike(models.Model):
start=models.DateField(auto_now=False, auto_now_add=True)
type=models.CharField(max_length=10)
price=models.DecimalField( max_digits=8, decimal_places=2)
def __str__(self):
return "bike_%s %s"%(self.id, self.type)
class Customer(models.Model):
name=models.CharField(max_length=100)
dob=models.DateField(auto_now=False, auto_now_add=False)
mobile=models.CharField(max_length=20)
def __str__(self):
return "customer_%s %s"%(self.id, self.name)
class Rent(models.Model):
start=models.DateTimeField(auto_now=False, auto_now_add=True)
stop=models.DateTimeField(auto_now=False, auto_now_add=False)
cost=models.DecimalField( max_digits=5, decimal_places=2)
customer=models.ForeignKey( Customer, on_delete=models.CASCADE)
bike=models.ForeignKey( Bike, on_delete=models.CASCADE)
def __str__(self):
return "rent_%s %s"%(self.id, self.cost)
| [
"[email protected]"
] | |
cd16af1b63d57fd896b42948d92a0bff70396206 | 94bf7b3d09947fa7375acecd59e4b062e6381c09 | /parsifal/reviews/models.py | f8cf789530ca2f6518df3be2f8514f13b6c3b081 | [] | no_license | epkanol/parsifal | 5302c0eadad04f5bbcd2ff61bdb7c6718f7982c2 | 9ff8308b7a3827cdbfec5910de8a52a9ff0a0dc3 | refs/heads/master | 2020-04-09T12:27:50.251269 | 2015-11-19T16:53:24 | 2015-11-19T16:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,150 | py | # coding: utf-8
import datetime
from django.utils import timezone
from django.utils.html import escape
from django.db import models
from django.db.models import Sum
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from parsifal.library.models import Document
class Source(models.Model):
name = models.CharField(max_length=100)
url = models.CharField(max_length=200)
is_default = models.BooleanField(default=False)
class Meta:
verbose_name = u'Source'
verbose_name_plural = u'Sources'
ordering = ('name',)
def __unicode__(self):
return self.name
def set_url(self, value):
if 'http://' not in value and 'https://' not in value and len(value) > 0:
self.url = u'http://{0}'.format(value)
else:
self.url = value
class Review(models.Model):
UNPUBLISHED = u'U'
PUBLISHED = u'P'
REVIEW_STATUS = (
(UNPUBLISHED, u'Unpublished'),
(PUBLISHED, u'Published'),
)
name = models.SlugField(max_length=255)
title = models.CharField(max_length=255)
description = models.CharField(max_length=500, null=True, blank=True)
author = models.ForeignKey(User)
create_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
objective = models.TextField(max_length=1000)
sources = models.ManyToManyField(Source)
status = models.CharField(max_length=1, choices=REVIEW_STATUS, default=UNPUBLISHED)
co_authors = models.ManyToManyField(User, related_name='co_authors')
quality_assessment_cutoff_score = models.FloatField(default=0.0)
population = models.CharField(max_length=200, blank=True)
intervention = models.CharField(max_length=200, blank=True)
comparison = models.CharField(max_length=200, blank=True)
outcome = models.CharField(max_length=200, blank=True)
context = models.CharField(max_length=200, blank=True)
class Meta:
verbose_name = u'Review'
verbose_name_plural = u'Reviews'
unique_together = (('name', 'author'),)
def __unicode__(self):
return self.name
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('review', args=(str(self.author.username), str(self.name)))
def get_questions(self):
questions = Question.objects.filter(review__id=self.id)
return questions
def get_inclusion_criterias(self):
return SelectionCriteria.objects.filter(review__id=self.id, criteria_type='I')
def get_exclusion_criterias(self):
return SelectionCriteria.objects.filter(review__id=self.id, criteria_type='E')
def get_keywords(self):
return Keyword.objects.filter(review__id=self.id, synonym_of=None)
def is_author_or_coauthor(self, user):
if user.id == self.author.id:
return True
for co_author in self.co_authors.all():
if user.id == co_author.id:
return True
return False
def get_generic_search_string(self):
try:
search_string = SearchSession.objects.filter(review__id=self.id, source=None)[:1].get()
except SearchSession.DoesNotExist:
search_string = SearchSession(review=self)
return search_string
def get_latest_source_search_strings(self):
return self.searchsession_set.exclude(source=None).order_by('source__name')
def get_source_articles(self, source_id=None):
if source_id is None:
return Article.objects.filter(review__id=self.id)
else:
return Article.objects.filter(review__id=self.id, source__id=source_id)
def get_duplicate_articles(self):
articles = Article.objects.filter(review__id=self.id).exclude(status=Article.DUPLICATED).order_by('title')
grouped_articles = dict()
for article in articles:
slug = slugify(article.title)
if slug not in grouped_articles.keys():
grouped_articles[slug] = { 'size': 0, 'articles': list() }
grouped_articles[slug]['size'] += 1
grouped_articles[slug]['articles'].append(article)
duplicates = list()
for slug, data in grouped_articles.iteritems():
if data['size'] > 1:
duplicates.append(data['articles'])
return duplicates
def get_accepted_articles(self):
return Article.objects.filter(review__id=self.id, status=Article.ACCEPTED)
def get_final_selection_articles(self):
accepted_articles = Article.objects.filter(review__id=self.id, status=Article.ACCEPTED)
if self.has_quality_assessment_checklist() and self.quality_assessment_cutoff_score > 0.0:
articles = accepted_articles
for article in accepted_articles:
if article.get_score() <= self.quality_assessment_cutoff_score:
articles = articles.exclude(id=article.id)
return articles
else:
return accepted_articles
def has_quality_assessment_checklist(self):
has_questions = self.qualityquestion_set.exists()
has_answers = self.qualityanswer_set.exists()
return has_questions and has_answers
def get_data_extraction_fields(self):
return DataExtractionField.objects.filter(review__id=self.id)
def get_quality_assessment_questions(self):
return QualityQuestion.objects.filter(review__id=self.id)
def get_quality_assessment_answers(self):
return QualityAnswer.objects.filter(review__id=self.id)
def calculate_quality_assessment_max_score(self):
try:
questions_count = QualityQuestion.objects.filter(review__id=self.id).count()
higher_weight_answer = QualityAnswer.objects.filter(review__id=self.id).order_by('-weight')[:1].get()
if questions_count and higher_weight_answer:
return questions_count * higher_weight_answer.weight
else:
return 0.0
except:
return 0.0
class Question(models.Model):
review = models.ForeignKey(Review, related_name='research_questions')
question = models.CharField(max_length=500)
parent_question = models.ForeignKey('self', null=True, related_name='+')
order = models.IntegerField(default=0)
class Meta:
verbose_name = u'Question'
verbose_name_plural = u'Questions'
ordering = ('order',)
def __unicode__(self):
return self.question
def get_child_questions(self):
return Question.objects.filter(parent_question=self)
class SelectionCriteria(models.Model):
INCLUSION = u'I'
EXCLUSION = u'E'
SELECTION_TYPES = (
(INCLUSION, u'Inclusion'),
(EXCLUSION, u'Exclusion'),
)
review = models.ForeignKey(Review)
criteria_type = models.CharField(max_length=1, choices=SELECTION_TYPES)
description = models.CharField(max_length=200)
class Meta:
verbose_name = u'Selection Criteria'
verbose_name_plural = u'Selection Criterias'
ordering = ('description',)
def __unicode__(self):
return self.description
def save(self, *args, **kwargs):
self.description = self.description[:200]
super(SelectionCriteria, self).save(*args, **kwargs)
class SearchSession(models.Model):
review = models.ForeignKey(Review)
source = models.ForeignKey(Source, null=True)
search_string = models.TextField(max_length=2000)
version = models.IntegerField(default=1)
def __unicode__(self):
return self.search_string
def search_string_as_html(self):
escaped_string = escape(self.search_string)
html = escaped_string.replace(' OR ', ' <strong>OR</strong> ').replace(' AND ', ' <strong>AND</strong> ')
return html
def search_result_file_upload_to(instance, filename):
return u'reviews/{0}/search_result/'.format(instance.review.pk)
class SearchResult(models.Model):
review = models.ForeignKey(Review)
source = models.ForeignKey(Source)
search_session = models.ForeignKey(SearchSession, null=True)
imported_file = models.FileField(upload_to=search_result_file_upload_to, null=True)
documents = models.ManyToManyField(Document)
class StudySelection(models.Model):
review = models.ForeignKey(Review)
user = models.ForeignKey(User, null=True)
has_finished = models.BooleanField(default=False)
def __unicode__(self):
if self.user:
selection = u'{0}\'s Selection'.format(self.user.username)
else:
selection = u'Final Selection'
return u'{0} ({1})'.format(selection, self.review.title)
class Study(models.Model):
UNCLASSIFIED = u'U'
REJECTED = u'R'
ACCEPTED = u'A'
DUPLICATED = u'D'
STUDY_STATUS = (
(UNCLASSIFIED, u'Unclassified'),
(REJECTED, u'Rejected'),
(ACCEPTED, u'Accepted'),
(DUPLICATED, u'Duplicated'),
)
study_selection = models.ForeignKey(StudySelection, related_name=u'studies')
document = models.ForeignKey(Document)
source = models.ForeignKey(Source, null=True)
status = models.CharField(max_length=1, choices=STUDY_STATUS, default=UNCLASSIFIED)
updated_at = models.DateTimeField(auto_now=True)
comments = models.TextField(max_length=2000, blank=True, null=True)
class Article(models.Model):
UNCLASSIFIED = u'U'
REJECTED = u'R'
ACCEPTED = u'A'
DUPLICATED = u'D'
ARTICLE_STATUS = (
(UNCLASSIFIED, u'Unclassified'),
(REJECTED, u'Rejected'),
(ACCEPTED, u'Accepted'),
(DUPLICATED, u'Duplicated'),
)
review = models.ForeignKey(Review)
bibtex_key = models.CharField(max_length=100)
title = models.CharField(max_length=1000, null=True, blank=True)
author = models.CharField(max_length=1000, null=True, blank=True)
journal = models.CharField(max_length=1000, null=True, blank=True)
year = models.CharField(max_length=10, null=True, blank=True)
source = models.ForeignKey(Source, null=True)
pages = models.CharField(max_length=20, null=True, blank=True)
volume = models.CharField(max_length=100, null=True, blank=True)
abstract = models.TextField(max_length=4000, null=True, blank=True)
document_type = models.CharField(max_length=100, null=True, blank=True)
status = models.CharField(max_length=1, choices=ARTICLE_STATUS, default=UNCLASSIFIED)
comments = models.TextField(max_length=2000, null=True, blank=True)
doi = models.CharField(max_length=50, null=True, blank=True)
url = models.CharField(max_length=500, null=True, blank=True)
affiliation = models.CharField(max_length=500, null=True, blank=True)
author_keywords = models.CharField(max_length=500, null=True, blank=True)
keywords = models.CharField(max_length=500, null=True, blank=True)
publisher = models.CharField(max_length=100, null=True, blank=True)
issn = models.CharField(max_length=50, null=True, blank=True)
language = models.CharField(max_length=50, null=True, blank=True)
note = models.CharField(max_length=500, null=True, blank=True)
finished_data_extraction = models.BooleanField(default=False)
selection_criteria = models.ForeignKey(SelectionCriteria, null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = 'Article'
verbose_name_plural = 'Articles'
def __unicode__(self):
return self.title
def get_score(self):
score = QualityAssessment.objects.filter(article__id=self.id).aggregate(Sum('answer__weight'))
if score['answer__weight__sum'] == None:
return 0.0
return score['answer__weight__sum']
def get_quality_assesment(self):
quality_assessments = QualityAssessment.objects.filter(article__id=self.id)
return quality_assessments
def get_status_html(self):
label = { Article.UNCLASSIFIED: 'default', Article.REJECTED: 'danger', Article.ACCEPTED: 'success', Article.DUPLICATED: 'warning' }
return u'<span class="label label-{0}">{1}</span>'.format(label[self.status], self.get_status_display())
class Keyword(models.Model):
POPULATION = u'P'
INTERVENTION = u'I'
COMPARISON = u'C'
OUTCOME = u'O'
RELATED_TO = (
(POPULATION, u'Population'),
(INTERVENTION, u'Intervention'),
(COMPARISON, u'Comparison'),
(OUTCOME, u'Outcome'),
)
review = models.ForeignKey(Review, related_name='keywords')
description = models.CharField(max_length=200)
synonym_of = models.ForeignKey('self', null=True, related_name='synonyms')
related_to = models.CharField(max_length=1, choices=RELATED_TO, blank=True)
class Meta:
verbose_name = u'Keyword'
verbose_name_plural = u'Keywords'
ordering = ('description',)
def __unicode__(self):
return self.description
def save(self, *args, **kwargs):
self.description = self.description[:200]
super(Keyword, self).save(*args, **kwargs)
def get_synonyms(self):
return Keyword.objects.filter(review__id=self.review.id, synonym_of__id=self.id)
class QualityAnswer(models.Model):
SUGGESTED_ANSWERS = (
('Yes', 1.0),
('Partially', 0.5),
('No', 0.0)
)
review = models.ForeignKey(Review)
description = models.CharField(max_length=255)
weight = models.FloatField()
class Meta:
verbose_name = 'Quality Assessment Answer'
verbose_name_plural = 'Quality Assessment Answers'
ordering = ('-weight',)
def __unicode__(self):
return self.description
class QualityQuestion(models.Model):
review = models.ForeignKey(Review)
description = models.CharField(max_length=255)
order = models.IntegerField(default=0)
class Meta:
verbose_name = 'Quality Assessment Question'
verbose_name_plural = 'Quality Assessment Questions'
ordering = ('order',)
def __unicode__(self):
return self.description
class QualityAssessment(models.Model):
user = models.ForeignKey(User, null=True)
article = models.ForeignKey(Article)
question = models.ForeignKey(QualityQuestion)
answer = models.ForeignKey(QualityAnswer, null=True)
def __unicode__(self):
return str(self.article.id) + ' ' + str(self.question.id)
class DataExtractionField(models.Model):
BOOLEAN_FIELD = 'B'
STRING_FIELD = 'S'
FLOAT_FIELD = 'F'
INTEGER_FIELD = 'I'
DATE_FIELD = 'D'
SELECT_ONE_FIELD = 'O'
SELECT_MANY_FIELD = 'M'
FIELD_TYPES = (
(BOOLEAN_FIELD, 'Boolean Field'),
(STRING_FIELD, 'String Field'),
(FLOAT_FIELD, 'Float Field'),
(INTEGER_FIELD, 'Integer Field'),
(DATE_FIELD, 'Date Field'),
(SELECT_ONE_FIELD, 'Select One Field'),
(SELECT_MANY_FIELD, 'Select Many Field'),
)
review = models.ForeignKey(Review)
description = models.CharField(max_length=255)
field_type = models.CharField(max_length=1, choices=FIELD_TYPES)
order = models.IntegerField(default=0)
class Meta:
verbose_name = 'Data Extraction Field'
verbose_name_plural = 'Data Extraction Fields'
ordering = ('order',)
def get_select_values(self):
return DataExtractionLookup.objects.filter(field__id=self.id)
def is_select_field(self):
return self.field_type in (self.SELECT_ONE_FIELD, self.SELECT_MANY_FIELD)
class DataExtractionLookup(models.Model):
field = models.ForeignKey(DataExtractionField)
value = models.CharField(max_length=1000)
class Meta:
verbose_name = 'Lookup Value'
verbose_name_plural = 'Lookup Values'
ordering = ('value',)
def __unicode__(self):
return self.value
class DataExtraction(models.Model):
user = models.ForeignKey(User, null=True)
article = models.ForeignKey(Article)
field = models.ForeignKey(DataExtractionField)
value = models.TextField(blank=True, null=True)
select_values = models.ManyToManyField(DataExtractionLookup)
def _set_boolean_value(self, value):
if value:
if value in ['True', 'False']:
self.value = value
else:
raise ValueError('Expected values: "True" or "False"')
else:
self.value = ''
def _set_string_value(self, value):
try:
self.value = value.strip()
except Exception, e:
raise e
def _set_float_value(self, value):
try:
if value:
_value = value.replace(',', '.')
self.value = float(_value)
else:
self.value = ''
except:
raise Exception('Invalid input for ' + self.field.description + ' field. Expected value: floating point number. Please use dot instead of comma.')
def _set_integer_value(self, value):
try:
if value:
_value = value.replace(',', '.')
self.value = int(float(_value))
else:
self.value = ''
except:
raise Exception('Invalid input for ' + self.field.description + ' field. Expected value: integer number.')
def _set_date_value(self, value):
try:
if value:
_value = datetime.datetime.strptime(value, '%m/%d/%Y').date()
self.value = str(_value)
else:
self.value = ''
except:
raise Exception('Invalid input for ' + self.field.description + ' field. Expected value: date. Please use the format MM/DD/YYYY.')
def _set_select_one_value(self, value):
try:
self.value = ''
self.select_values.clear()
if value:
_value = DataExtractionLookup.objects.get(pk=value)
self.select_values.add(_value)
except Exception, e:
raise e
def _set_select_many_value(self, value):
try:
self.value = ''
_value = DataExtractionLookup.objects.get(pk=value)
if _value in self.select_values.all():
self.select_values.remove(_value)
else:
self.select_values.add(_value)
except Exception, e:
raise e
def set_value(self, value):
set_value_functions = {
DataExtractionField.BOOLEAN_FIELD: self._set_boolean_value,
DataExtractionField.STRING_FIELD: self._set_string_value,
DataExtractionField.FLOAT_FIELD: self._set_float_value,
DataExtractionField.INTEGER_FIELD: self._set_integer_value,
DataExtractionField.DATE_FIELD: self._set_date_value,
DataExtractionField.SELECT_ONE_FIELD: self._set_select_one_value,
DataExtractionField.SELECT_MANY_FIELD: self._set_select_many_value,
}
set_value_functions[self.field.field_type](value[:1000])
def _get_boolean_value(self):
try:
if self.value == 'True':
return True
elif self.value == 'False':
return False
else:
return ''
except Exception, e:
return ''
def _get_string_value(self):
return self.value
def _get_float_value(self):
try:
return float(self.value)
except Exception, e:
return ''
def _get_integer_value(self):
try:
return int(self.value)
except Exception, e:
return ''
def _get_date_value(self):
try:
if self.value != '':
return datetime.datetime.strptime(self.value, '%Y-%m-%d').date()
else:
return ''
except Exception, e:
return ''
def _get_select_one_value(self):
try:
return self.select_values.all()[0]
except Exception, e:
return None
def _get_select_many_value(self):
try:
return self.select_values.all()
except Exception, e:
return []
def get_value(self):
if self.field.field_type:
get_value_functions = {
DataExtractionField.BOOLEAN_FIELD: self._get_boolean_value,
DataExtractionField.STRING_FIELD: self._get_string_value,
DataExtractionField.FLOAT_FIELD: self._get_float_value,
DataExtractionField.INTEGER_FIELD: self._get_integer_value,
DataExtractionField.DATE_FIELD: self._get_date_value,
DataExtractionField.SELECT_ONE_FIELD: self._get_select_one_value,
DataExtractionField.SELECT_MANY_FIELD: self._get_select_many_value,
}
return get_value_functions[self.field.field_type]()
return self._get_string_value
def get_date_value_as_string(self):
try:
value = self.get_value()
return value.strftime('%m/%d/%Y')
except Exception, e:
return ''
| [
"[email protected]"
] | |
61d2e7a48c7da7254694bab7f217ea1627b12718 | edc1c404069441a8cb67ca90bf78985d24e7262d | /video/admin.py | 36763879e0898a78fe5cf5e4c8d7bbde9064aaf0 | [] | no_license | jsparmani/AndromediaProductionsRemake | 98b6ac26a2080b7c1cc654ed4636089e816b9ef5 | 10ee43dd6785c458100bdb41702927744551fd0d | refs/heads/master | 2022-02-20T20:11:01.095443 | 2019-10-06T18:52:26 | 2019-10-06T18:52:26 | 212,735,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,208 | py | from django.contrib import admin
from . import models
from apiclient.discovery import build
from django.contrib import messages
api_key = "AIzaSyDahUDOnXFAW0jlIC-gIc1cKt_tLlOXzf4"
class PlaylistAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
"fields": (
'name', 'playlist_id'
),
}),
)
def save_model(self, request, obj, form, change):
youtube = build('youtube', 'v3', developerKey=api_key)
obj.user = request.user
uploading_user = obj.user.uploadingusers
channel_id = uploading_user.channel_id
try:
res = youtube.playlists().list(id=obj.playlist_id, part='snippet').execute()
channel_id_res = res['items'][0]['snippet']['channelId']
except:
messages.set_level(request, messages.ERROR)
messages.error(
request, 'The playlist does not exist')
if channel_id == channel_id_res:
super(PlaylistAdmin, self).save_model(
request, obj, form, change)
try:
video_ids = []
next_page_token = None
while(True):
res = youtube.playlistItems().list(playlistId=obj.playlist_id,
part='contentDetails', maxResults=50, pageToken=next_page_token).execute()
for item in res['items']:
video_ids.append(item['contentDetails']['videoId'])
next_page_token = res.get('nextPageToken')
if next_page_token is None:
break
for video in video_ids:
models.Video.objects.create(
uploading_user=uploading_user,
video_id=video,
image_url=f'https://img.youtube.com/vi/{video}/sddefault.jpg',
playlist=obj
)
print(video_ids)
except:
messages.set_level(request, messages.ERROR)
messages.error(
request, 'Server Error')
else:
messages.set_level(request, messages.ERROR)
messages.error(
request, 'The playlist does not belong to your channel')
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def delete_model(self, request, obj):
videos = models.Video.objects.all().filter(
playlist__playlist_id=obj.playlist_id)
for video in videos:
video.delete()
obj.delete()
class VideoAdmin(admin.ModelAdmin):
def get_fieldsets(self, request, obj=None):
if request.user.is_superuser:
return super().get_fieldsets(request, obj=obj)
else:
return (
(None, {
"fields": (
'video_id',
),
}),
)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(uploading_user=request.user.uploadingusers)
def save_model(self, request, obj, form, change):
obj.uploading_user = request.user.uploadingusers
youtube = build('youtube', 'v3', developerKey=api_key)
res = youtube.videos().list(id=obj.video_id,
part='snippet').execute()
channel_id_res = res['items'][0]['snippet']['channelId']
if channel_id_res == obj.uploading_user.channel_id:
image_url = f'https://img.youtube.com/vi/{obj.video_id}/sddefault.jpg'
obj.image_url = image_url
super().save_model(request, obj, form, change)
else:
messages.set_level(request, messages.ERROR)
messages.error(
request, 'The video does not belong to your channel')
admin.site.register(models.Playlist, PlaylistAdmin)
admin.site.register(models.Video, VideoAdmin)
| [
"[email protected]"
] | |
57dbde9dc8ad0c6e3250b9e330788f193600672a | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py | a08b0246147c3590b599a7ad05b42db219fc4820 | [
"Apache-2.0"
] | permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 3,332 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.params",
manifest={"VideoClassificationPredictionParams",},
)
class VideoClassificationPredictionParams(proto.Message):
r"""Prediction model parameters for Video Classification.
Attributes:
confidence_threshold (float):
The Model only returns predictions with at
least this confidence score. Default value is
0.0
max_predictions (int):
The Model only returns up to that many top,
by confidence score, predictions per instance.
If this number is very high, the Model may
return fewer predictions. Default value is
10,000.
segment_classification (bool):
Set to true to request segment-level
classification. Vertex AI returns labels and
their confidence scores for the entire time
segment of the video that user specified in the
input instance. Default value is true
shot_classification (bool):
Set to true to request shot-level
classification. Vertex AI determines the
boundaries for each camera shot in the entire
time segment of the video that user specified in
the input instance. Vertex AI then returns
labels and their confidence scores for each
detected shot, along with the start and end time
of the shot.
WARNING: Model evaluation is not done for this
classification type, the quality of it depends
on the training data, but there are no metrics
provided to describe that quality.
Default value is false
one_sec_interval_classification (bool):
Set to true to request classification for a
video at one-second intervals. Vertex AI returns
labels and their confidence scores for each
second of the entire time segment of the video
that user specified in the input WARNING: Model
evaluation is not done for this classification
type, the quality of it depends on the training
data, but there are no metrics provided to
describe that quality. Default value is false
"""
confidence_threshold = proto.Field(proto.FLOAT, number=1,)
max_predictions = proto.Field(proto.INT32, number=2,)
segment_classification = proto.Field(proto.BOOL, number=3,)
shot_classification = proto.Field(proto.BOOL, number=4,)
one_sec_interval_classification = proto.Field(proto.BOOL, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
70aa5a4a1494fbd51725b7d6beb2d6cd3a1feca9 | 2215163696e362260a90bc3c011fcdb540d8d226 | /forge/kubernetes.py | f41d5e42400661c61b1d7dae93395a0911ebfb6b | [
"Apache-2.0"
] | permissive | aj0415/cforge | 47cf97d006d3a8b6ae438b1e9739c594c1fbfb8a | a99c6333ffa6b7686c8eb622cbf45a742b002716 | refs/heads/master | 2020-03-13T00:20:51.455760 | 2018-04-27T20:12:40 | 2018-04-27T20:12:40 | 130,884,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,751 | py | # Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, glob
from tasks import task, TaskError, get, sh, SHResult
from forge.match import match
from forge.yamlutil import MappingNode, Node, as_node, compose, compose_all, serialize_all, view
from forge import yamlutil
@match(MappingNode, basestring, dict)
def fixup(node, key, pairs):
node = view(node)
kind = node.get("kind")
if kind and kind.lower() not in ('ns', 'namespace'):
md = node.get("metadata")
if md is None:
md = view(compose("{}"))
node["metadata"] = md
orig = md.get(key)
if orig is None:
orig = view(compose("{}"))
md[key] = orig
for k, v in pairs.items():
orig[k] = as_node(v)
@match(Node, basestring, dict)
def fixup(*args):
pass
ALL = ('csr',
'clusterrolebindings',
'clusterroles',
'cm',
'controllerrevisions',
'crd',
'ds',
'deploy',
'ep',
'ev',
'hpa',
'ing',
'jobs',
'limits',
'ns',
'netpol',
'no',
'pvc',
'pv',
'pdb',
'po',
'psp',
'podtemplates',
'rs',
'rc',
'quota',
'rolebindings',
'roles',
'secrets',
'sa',
'svc',
'sts',
'sc')
@match("deployment", object)
def status_summary(kind, status):
conds = status.get("conditions")
if conds:
return conds[0]["message"]
else:
return "(none)"
@match("service", object)
def status_summary(kind, status):
if status is None:
return "(none)"
ready = []
not_ready = []
for subset in status:
for key, lst in ("addresses", ready), ("notReadyAddresses", not_ready):
for address in subset.get(key, ()):
for port in subset["ports"]:
lst.append("%s:%s" % (address["ip"], port["port"]))
result = []
if ready:
result.append("READY(%s)" % ", ".join(ready))
if not_ready:
result.append("NOT READY(%s)" % ", ".join(not_ready))
return ", ".join(result)
@match(basestring, object)
def status_summary(kind, status):
return str(status)
def is_yaml_empty(dir):
for name in glob.glob("%s/*.yaml" % dir):
with open(name) as f:
if f.read().strip():
return False
return True
def selector(labels):
return "-l%s" % (",".join(("%s=%s" % (k, v)) if v else k for k, v in labels.items()))
def is_yaml_file(name):
return name.endswith(".yml") or name.endswith(".yaml")
class Kubernetes(object):
def __init__(self, namespace=None, context=None, dry_run=False):
self.namespace = namespace or os.environ.get("K8S_NAMESPACE", None)
self.context = context
self.dry_run = dry_run
@task()
def resources(self, yaml_dir):
if is_yaml_empty(yaml_dir):
return []
cmd = "kubectl", "apply", "--dry-run", "-R", "-f", yaml_dir, "-o", "name"
if self.namespace:
cmd += "--namespace", self.namespace
return sh(*cmd).output.split()
def _labeltate(self, yaml_dir, labels, annotate):
if is_yaml_empty(yaml_dir):
return SHResult("", 0, "")
key = "annotations" if annotate else "labels"
for path, dirs, files in os.walk(yaml_dir):
for name in files:
if not is_yaml_file(name): continue
fixed = []
filename = os.path.join(path, name)
with open(filename, 'read') as f:
for nd in compose_all(f):
fixup(nd, key, labels)
# we filter out null nodes because istioctl sticks
# them in for some reason, and then we end up
# serializing them in a way that kubectl doesn't
# understand
if nd.tag == u'tag:yaml.org,2002:null':
continue
fixed.append(nd)
munged = serialize_all(fixed)
with open(filename, 'write') as f:
f.write(munged)
@task()
def annotate(self, yaml_dir, labels):
self._labeltate(yaml_dir, labels, annotate=True)
@task()
def label(self, yaml_dir, labels):
self._labeltate(yaml_dir, labels, annotate=False)
@task()
def apply(self, yaml_dir, prune=None):
if is_yaml_empty(yaml_dir):
return SHResult("", 0, "")
cmd = "kubectl", "apply", "-R", "-f", yaml_dir
if self.namespace:
cmd += "--namespace", self.namespace
if self.dry_run:
cmd += "--dry-run",
if prune:
cmd += "--prune", selector(prune)
result = sh(*cmd)
return result
@task()
def list(self):
"""
Return a structured view of all forge deployed resources in a kubernetes cluster.
"""
output = sh("kubectl", "get", "--all-namespaces", ",".join(ALL), "-oyaml", "-lforge.service").output
repos = {}
endpoints = {}
for nd in yamlutil.load("kubectl-get", output):
items = nd["items"]
for i in items:
kind = i["kind"].lower()
md = i["metadata"]
name = md["name"]
namespace = md["namespace"]
status = i.get("status", {})
ann = md.get("annotations", {})
repo = ann.get("forge.repo", "(none)")
descriptor = ann.get("forge.descriptor", "(none)")
version = ann.get("forge.version", "(none)")
labels = md.get("labels", {})
service = labels["forge.service"]
profile = labels["forge.profile"]
if kind == "endpoints":
endpoints[(namespace, name)] = i["subsets"]
continue
if repo not in repos:
repos[repo] = {}
if service not in repos[repo]:
repos[repo][service] = {}
if profile not in repos[repo][service]:
repos[repo][service][profile] = []
repos[repo][service][profile].append({
"kind": kind,
"namespace": namespace,
"name": name,
"version": version,
"descriptor": descriptor,
"status": status
})
for repo, services in repos.items():
for service, profiles in services.items():
for profile, resources in profiles.items():
for resource in resources:
kind = resource["kind"]
if kind == "service":
status = status_summary(kind, endpoints[(resource["namespace"], resource["name"])])
else:
status = status_summary(kind, resource["status"])
resource["status"] = status
return repos
@task()
def delete(self, labels):
# never try to delete namespaces or storage classes because they are shared resources
all = ",".join(r for r in ALL if r not in ('ns', 'sc'))
lines = sh("kubectl", "get", all, '--all-namespaces', selector(labels), '-ogo-template={{range .items}}{{.kind}} {{.metadata.namespace}} {{.metadata.name}}{{"\\n"}}{{end}}').output.splitlines()
byns = {}
for line in lines:
parts = line.split()
if len(parts) == 2:
kind, name = parts
namespace = None
else:
kind, namespace, name = parts
if namespace not in byns:
byns[namespace] = []
byns[namespace].append((kind, name))
for ns in sorted(byns.keys()):
names = sorted("%s/%s" % (k, n) for k, n in byns[ns])
if ns is None:
sh("kubectl", "delete", *names)
else:
sh("kubectl", "delete", "-n", ns, *names)
| [
"[email protected]"
] | |
ad3f02552cb32fec4a851ed69631289e4ebca8ec | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/insights/v20201005preview/_inputs.py | 21a144300587977774437a23d377b3d7e412df76 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 14,537 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'HeaderFieldArgs',
'WebTestGeolocationArgs',
'WebTestPropertiesConfigurationArgs',
'WebTestPropertiesContentValidationArgs',
'WebTestPropertiesRequestArgs',
'WebTestPropertiesValidationRulesArgs',
]
@pulumi.input_type
class HeaderFieldArgs:
def __init__(__self__, *,
header_field_name: Optional[pulumi.Input[str]] = None,
header_field_value: Optional[pulumi.Input[str]] = None):
"""
A header to add to the WebTest.
:param pulumi.Input[str] header_field_name: The name of the header.
:param pulumi.Input[str] header_field_value: The value of the header.
"""
if header_field_name is not None:
pulumi.set(__self__, "header_field_name", header_field_name)
if header_field_value is not None:
pulumi.set(__self__, "header_field_value", header_field_value)
@property
@pulumi.getter(name="headerFieldName")
def header_field_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the header.
"""
return pulumi.get(self, "header_field_name")
@header_field_name.setter
def header_field_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "header_field_name", value)
@property
@pulumi.getter(name="headerFieldValue")
def header_field_value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the header.
"""
return pulumi.get(self, "header_field_value")
@header_field_value.setter
def header_field_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "header_field_value", value)
@pulumi.input_type
class WebTestGeolocationArgs:
def __init__(__self__, *,
location: Optional[pulumi.Input[str]] = None):
"""
Geo-physical location to run a WebTest from. You must specify one or more locations for the test to run from.
:param pulumi.Input[str] location: Location ID for the WebTest to run from.
"""
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location ID for the WebTest to run from.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class WebTestPropertiesConfigurationArgs:
def __init__(__self__, *,
web_test: Optional[pulumi.Input[str]] = None):
"""
An XML configuration specification for a WebTest.
:param pulumi.Input[str] web_test: The XML specification of a WebTest to run against an application.
"""
if web_test is not None:
pulumi.set(__self__, "web_test", web_test)
@property
@pulumi.getter(name="webTest")
def web_test(self) -> Optional[pulumi.Input[str]]:
"""
The XML specification of a WebTest to run against an application.
"""
return pulumi.get(self, "web_test")
@web_test.setter
def web_test(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "web_test", value)
@pulumi.input_type
class WebTestPropertiesContentValidationArgs:
def __init__(__self__, *,
content_match: Optional[pulumi.Input[str]] = None,
ignore_case: Optional[pulumi.Input[bool]] = None,
pass_if_text_found: Optional[pulumi.Input[bool]] = None):
"""
The collection of content validation properties
:param pulumi.Input[str] content_match: Content to look for in the return of the WebTest. Must not be null or empty.
:param pulumi.Input[bool] ignore_case: When set, this value makes the ContentMatch validation case insensitive.
:param pulumi.Input[bool] pass_if_text_found: When true, validation will pass if there is a match for the ContentMatch string. If false, validation will fail if there is a match
"""
if content_match is not None:
pulumi.set(__self__, "content_match", content_match)
if ignore_case is not None:
pulumi.set(__self__, "ignore_case", ignore_case)
if pass_if_text_found is not None:
pulumi.set(__self__, "pass_if_text_found", pass_if_text_found)
@property
@pulumi.getter(name="contentMatch")
def content_match(self) -> Optional[pulumi.Input[str]]:
"""
Content to look for in the return of the WebTest. Must not be null or empty.
"""
return pulumi.get(self, "content_match")
@content_match.setter
def content_match(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_match", value)
@property
@pulumi.getter(name="ignoreCase")
def ignore_case(self) -> Optional[pulumi.Input[bool]]:
"""
When set, this value makes the ContentMatch validation case insensitive.
"""
return pulumi.get(self, "ignore_case")
@ignore_case.setter
def ignore_case(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_case", value)
@property
@pulumi.getter(name="passIfTextFound")
def pass_if_text_found(self) -> Optional[pulumi.Input[bool]]:
"""
When true, validation will pass if there is a match for the ContentMatch string. If false, validation will fail if there is a match
"""
return pulumi.get(self, "pass_if_text_found")
@pass_if_text_found.setter
def pass_if_text_found(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "pass_if_text_found", value)
@pulumi.input_type
class WebTestPropertiesRequestArgs:
def __init__(__self__, *,
follow_redirects: Optional[pulumi.Input[bool]] = None,
headers: Optional[pulumi.Input[Sequence[pulumi.Input['HeaderFieldArgs']]]] = None,
http_verb: Optional[pulumi.Input[str]] = None,
parse_dependent_requests: Optional[pulumi.Input[bool]] = None,
request_body: Optional[pulumi.Input[str]] = None,
request_url: Optional[pulumi.Input[str]] = None):
"""
The collection of request properties
:param pulumi.Input[bool] follow_redirects: Follow redirects for this web test.
:param pulumi.Input[Sequence[pulumi.Input['HeaderFieldArgs']]] headers: List of headers and their values to add to the WebTest call.
:param pulumi.Input[str] http_verb: Http verb to use for this web test.
:param pulumi.Input[bool] parse_dependent_requests: Parse Dependent request for this WebTest.
:param pulumi.Input[str] request_body: Base64 encoded string body to send with this web test.
:param pulumi.Input[str] request_url: Url location to test.
"""
if follow_redirects is not None:
pulumi.set(__self__, "follow_redirects", follow_redirects)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if http_verb is not None:
pulumi.set(__self__, "http_verb", http_verb)
if parse_dependent_requests is not None:
pulumi.set(__self__, "parse_dependent_requests", parse_dependent_requests)
if request_body is not None:
pulumi.set(__self__, "request_body", request_body)
if request_url is not None:
pulumi.set(__self__, "request_url", request_url)
@property
@pulumi.getter(name="followRedirects")
def follow_redirects(self) -> Optional[pulumi.Input[bool]]:
"""
Follow redirects for this web test.
"""
return pulumi.get(self, "follow_redirects")
@follow_redirects.setter
def follow_redirects(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "follow_redirects", value)
@property
@pulumi.getter
def headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HeaderFieldArgs']]]]:
"""
List of headers and their values to add to the WebTest call.
"""
return pulumi.get(self, "headers")
@headers.setter
def headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HeaderFieldArgs']]]]):
pulumi.set(self, "headers", value)
@property
@pulumi.getter(name="httpVerb")
def http_verb(self) -> Optional[pulumi.Input[str]]:
"""
Http verb to use for this web test.
"""
return pulumi.get(self, "http_verb")
@http_verb.setter
def http_verb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_verb", value)
@property
@pulumi.getter(name="parseDependentRequests")
def parse_dependent_requests(self) -> Optional[pulumi.Input[bool]]:
"""
Parse Dependent request for this WebTest.
"""
return pulumi.get(self, "parse_dependent_requests")
@parse_dependent_requests.setter
def parse_dependent_requests(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "parse_dependent_requests", value)
@property
@pulumi.getter(name="requestBody")
def request_body(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded string body to send with this web test.
"""
return pulumi.get(self, "request_body")
@request_body.setter
def request_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_body", value)
@property
@pulumi.getter(name="requestUrl")
def request_url(self) -> Optional[pulumi.Input[str]]:
"""
Url location to test.
"""
return pulumi.get(self, "request_url")
@request_url.setter
def request_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_url", value)
@pulumi.input_type
class WebTestPropertiesValidationRulesArgs:
def __init__(__self__, *,
content_validation: Optional[pulumi.Input['WebTestPropertiesContentValidationArgs']] = None,
expected_http_status_code: Optional[pulumi.Input[int]] = None,
ignore_https_status_code: Optional[pulumi.Input[bool]] = None,
s_sl_cert_remaining_lifetime_check: Optional[pulumi.Input[int]] = None,
s_sl_check: Optional[pulumi.Input[bool]] = None):
"""
The collection of validation rule properties
:param pulumi.Input['WebTestPropertiesContentValidationArgs'] content_validation: The collection of content validation properties
:param pulumi.Input[int] expected_http_status_code: Validate that the WebTest returns the http status code provided.
:param pulumi.Input[bool] ignore_https_status_code: When set, validation will ignore the status code.
:param pulumi.Input[int] s_sl_cert_remaining_lifetime_check: A number of days to check still remain before the the existing SSL cert expires. Value must be positive and the SSLCheck must be set to true.
:param pulumi.Input[bool] s_sl_check: Checks to see if the SSL cert is still valid.
"""
if content_validation is not None:
pulumi.set(__self__, "content_validation", content_validation)
if expected_http_status_code is not None:
pulumi.set(__self__, "expected_http_status_code", expected_http_status_code)
if ignore_https_status_code is not None:
pulumi.set(__self__, "ignore_https_status_code", ignore_https_status_code)
if s_sl_cert_remaining_lifetime_check is not None:
pulumi.set(__self__, "s_sl_cert_remaining_lifetime_check", s_sl_cert_remaining_lifetime_check)
if s_sl_check is not None:
pulumi.set(__self__, "s_sl_check", s_sl_check)
@property
@pulumi.getter(name="contentValidation")
def content_validation(self) -> Optional[pulumi.Input['WebTestPropertiesContentValidationArgs']]:
"""
The collection of content validation properties
"""
return pulumi.get(self, "content_validation")
@content_validation.setter
def content_validation(self, value: Optional[pulumi.Input['WebTestPropertiesContentValidationArgs']]):
pulumi.set(self, "content_validation", value)
@property
@pulumi.getter(name="expectedHttpStatusCode")
def expected_http_status_code(self) -> Optional[pulumi.Input[int]]:
"""
Validate that the WebTest returns the http status code provided.
"""
return pulumi.get(self, "expected_http_status_code")
@expected_http_status_code.setter
def expected_http_status_code(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expected_http_status_code", value)
@property
@pulumi.getter(name="ignoreHttpsStatusCode")
def ignore_https_status_code(self) -> Optional[pulumi.Input[bool]]:
"""
When set, validation will ignore the status code.
"""
return pulumi.get(self, "ignore_https_status_code")
@ignore_https_status_code.setter
def ignore_https_status_code(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ignore_https_status_code", value)
@property
@pulumi.getter(name="sSLCertRemainingLifetimeCheck")
def s_sl_cert_remaining_lifetime_check(self) -> Optional[pulumi.Input[int]]:
"""
A number of days to check still remain before the the existing SSL cert expires. Value must be positive and the SSLCheck must be set to true.
"""
return pulumi.get(self, "s_sl_cert_remaining_lifetime_check")
@s_sl_cert_remaining_lifetime_check.setter
def s_sl_cert_remaining_lifetime_check(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "s_sl_cert_remaining_lifetime_check", value)
@property
@pulumi.getter(name="sSLCheck")
def s_sl_check(self) -> Optional[pulumi.Input[bool]]:
"""
Checks to see if the SSL cert is still valid.
"""
return pulumi.get(self, "s_sl_check")
@s_sl_check.setter
def s_sl_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "s_sl_check", value)
| [
"[email protected]"
] | |
2d868583f63376842e1cd553b872f76f6f8c7f87 | 06e5f427067574b5be79e8a0a260db8ee3b5f744 | /tests/testpadpt.py | bfcf914003a82983e73fa645d57d3c953567ce0a | [
"MIT"
] | permissive | wotsushi/padpt | 655e5b94f749f5dc3db0311fa92e642897c650cc | 177328d9d18d5b68615b96b5fe6c562bac01df2e | refs/heads/master | 2021-01-12T16:52:38.059089 | 2016-11-12T14:53:27 | 2016-11-12T14:53:27 | 71,460,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,381 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import io
import shutil
import warnings
import unittest
import urllib.error
from unittest.mock import patch
from PIL import Image
from padpt import padpt
class TestPadPT(unittest.TestCase):
def setUp(self):
if os.path.exists('tests/tmp'):
shutil.rmtree('tests/tmp')
os.mkdir('tests/tmp')
self.conf_dir = os.path.join(
os.path.expanduser('~'),
'.padpt/')
shutil.move(
self.conf_dir,
'tests/tmp/.padpt')
shutil.copytree(
'tests/.padpt',
self.conf_dir)
self.db_dir = os.path.join(
os.path.dirname(sys.modules['padpt'].__file__),
'data/db')
if not os.path.exists(self.db_dir):
os.mkdir(self.db_dir)
shutil.move(
self.db_dir,
'tests/tmp/data/db')
shutil.copytree(
'tests/data/db',
self.db_dir)
def tearDown(self):
shutil.rmtree(self.conf_dir)
shutil.move(
'tests/tmp/.padpt',
self.conf_dir)
shutil.rmtree(self.db_dir)
shutil.move(
'tests/tmp/data/db',
self.db_dir)
@patch.object(
sys,
'argv',
['padpt'])
@patch.object(
sys,
'stdout',
io.StringIO())
def test_main_00(self):
padpt.main()
self.assertEqual(
sys.stdout.getvalue(),
'usage: padpt [-h] [-u] [pt] [out]\n'
'\n'
'generates a walkthrough sheet.\n'
'\n'
'positional arguments:\n'
' pt pt file to be parsed\n'
' out path of the output file\n'
'\n'
'optional arguments:\n'
' -h, --help show this help message and exit\n'
' -u, --update updates database\n')
@patch.object(
sys,
'argv',
['padpt', 'tests/in/mill.pt'])
def test_main_01(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
padpt.main()
Image.open('tests/in/mill.png').show()
self.assertEqual(
'y',
input('OK? [y/n]'))
os.remove('tests/in/mill.png')
@patch.object(
sys,
'argv',
['padpt', 'tests/in/friday.pt', 'tests/out/testpadpt_02.png'])
def test_main_02(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
padpt.main()
Image.open('tests/out/testpadpt_02.png').show()
self.assertEqual(
'y',
input('OK? [y/n]'))
@patch.object(
sys,
'argv',
['padpt', '_.pt', '_.png'])
def test_main_03(self):
padpt_conf = os.path.join(
self.conf_dir,
'padpt.conf')
os.remove(padpt_conf)
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'{} does not exist.'.format(padpt_conf))
@patch.object(
sys,
'argv',
['padpt', '_.pt', '_.png'])
def test_main_04(self):
shutil.copy(
'tests/.padpt/padpterror.conf',
os.path.join(
self.conf_dir,
'padpt.conf'))
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'There is an error in padpt.conf')
@patch.object(
sys,
'argv',
['padpt', '-u'])
def test_main_05(self):
shutil.copy(
'tests/.padpt/padpt_keyerror.conf',
os.path.join(
self.conf_dir,
'padpt.conf'))
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'There is an error in padpt.conf')
@patch.object(
sys,
'argv',
['padpt', '-u'])
def test_main_06(self):
shutil.copy(
'tests/.padpt/padpt_urlerror.conf',
os.path.join(
self.conf_dir,
'padpt.conf'))
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'Failed to download http://padpt_test')
@patch.object(
sys,
'argv',
['padpt', 'none.pt'])
def test_main_07(self):
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'none.pt does not exist.')
@patch.object(
sys,
'argv',
['padpt', 'tests/in/pterror.pt'])
def test_main_08(self):
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'tests/in/pterror.pt has a syntax error.')
@patch.object(
sys,
'argv',
['padpt', 'tests/in/aliaserror.pt'])
def test_main_09(self):
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'覚醒エラー is undefined in alias.csv.')
@patch.object(
sys,
'argv',
['padpt', 'tests/in/mill.pt'])
def test_main_10(self):
shutil.copy(
'tests/data/db/monstererror.csv',
os.path.join(
self.db_dir,
'monsters.csv'))
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'The monster whose monster ID is 2903'
'is not registerd with your monster DB.')
@patch.object(
sys,
'argv',
['padpt', 'tests/in/mill.pt'])
def test_main_11(self):
shutil.copy(
'tests/.padpt/padpt_keyerror.conf',
os.path.join(
self.conf_dir,
'padpt.conf'))
try:
padpt.main()
except SystemExit as e:
self.assertEqual(
str(e),
'There is an error in padpt.conf')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
77455691a116df7a97fb3c60162a302cf25cdbcd | 7bb9bd2bdadef1590b2ef7ff309e08abf454e49d | /Curso em Vídeo/4-DataNascimento.py | 8a9ea4b93b275e7badf7c86dd7569ae1d8b7eeea | [] | no_license | ALREstevam/Curso-de-Python-e-Programacao-com-Python | afdf12717a710f20d4513d5df375ba63ba1e1c19 | af6227376736e63810e5979be54eb1c433d669ac | refs/heads/master | 2021-09-07T12:11:17.158298 | 2018-02-22T17:47:19 | 2018-02-22T17:47:19 | 87,453,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #Entrada e saídade dados
dia = input('Dia: ')
mes = input('Mes: ')
ano = input('Ano: ')
print('Você nasceu no dia',dia,'de',mes,'de',ano,'Não é?')
| [
"[email protected]"
] | |
3dcd089d6a30ae78f2869354ef2c95deab75e4d4 | ec0ea8854d9a04967fe8d7794454f76946a8252e | /migrations/versions/7c6b878897a6_initial_migration.py | 23dc013e61e45de6239e827e0dd76e12b45470bf | [] | no_license | alinzel/Blog_flask | 3ae3f4d6e8bfd48e67ffddf2040c37f86d8756f7 | 47f50cb409f78d5a45144a8f2d134982dc03e383 | refs/heads/master | 2020-03-07T22:38:11.537557 | 2018-04-02T13:30:55 | 2018-04-02T13:30:55 | 127,759,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | """initial migration
Revision ID: 7c6b878897a6
Revises:
Create Date: 2018-03-16 03:39:18.455855
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7c6b878897a6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('roles', sa.Column('default', sa.Boolean(), nullable=True))
op.add_column('roles', sa.Column('permissions', sa.Integer(), nullable=True))
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_column('roles', 'permissions')
op.drop_column('roles', 'default')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
89b6ebe9e6d0a67626bb5ba1b9278aa3ebafa35c | fd65851c7977176cfa69056ea5d63ca529e74271 | /samples/core/loop_parameter/loop_parameter_test.py | 11e2c9b0b63d408a08a93b63129a7a7b21c9e116 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] | permissive | NikeNano/pipelines | dad9f45267a7f4c495a30880dd6fe1570f26fa64 | 73804f8928ce671839d34800627b6d3ea9f820a7 | refs/heads/master | 2022-01-29T21:24:43.693120 | 2021-11-20T18:18:35 | 2021-11-20T18:18:35 | 221,051,451 | 1 | 1 | Apache-2.0 | 2021-04-23T20:07:11 | 2019-11-11T19:11:29 | Python | UTF-8 | Python | false | false | 951 | py | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from .loop_parameter import my_pipeline
from ...test.util import run_pipeline_func, TestCase, NEEDS_A_FIX
run_pipeline_func([
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
),
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE,
),
])
| [
"[email protected]"
] | |
9a47313b1510bcfddeeca1fb41bc3c0b99a35eda | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /blend4_pdep/pdep/network137_5.py | 3bb47eb03df3725a21b1fbbb866e1271d6159d61 | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 288,786 | py | species(
label = 'C7H8(690)(689)',
structure = SMILES('C=C1C=CC=CC1'),
E0 = (169.147,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3784.18,'J/mol'), sigma=(6.18258,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=591.08 K, Pc=36.33 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88913,0.0328299,3.37063e-05,-5.81883e-08,2.16785e-11,20431.4,16.995], Tmin=(100,'K'), Tmax=(1043.73,'K')), NASAPolynomial(coeffs=[10.5104,0.0329227,-1.40442e-05,2.72618e-09,-1.97113e-13,16827,-33.6119], Tmin=(1043.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(169.147,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(13cyclohexadiene5methylene)"""),
)
species(
label = 'C7H8(693)(692)',
structure = SMILES('[CH2]C1=CC=C[CH]C1'),
E0 = (264.261,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3819.49,'J/mol'), sigma=(6.43385,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=596.60 K, Pc=32.54 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84503,0.0349594,2.76454e-05,-5.16358e-08,1.94794e-11,31871.6,19.5027], Tmin=(100,'K'), Tmax=(1040.47,'K')), NASAPolynomial(coeffs=[9.81315,0.0341423,-1.41611e-05,2.69291e-09,-1.92156e-13,28599.6,-27.0106], Tmin=(1040.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(264.261,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CC=CCJ) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'C7H8(694)(693)',
structure = SMILES('C=C1CC2C=CC12'),
E0 = (198.472,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3648.86,'J/mol'), sigma=(6.07357,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=569.94 K, Pc=36.95 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.80553,0.0353317,2.65611e-05,-5.26938e-08,2.05238e-11,23960.9,15.7328], Tmin=(100,'K'), Tmax=(1022.53,'K')), NASAPolynomial(coeffs=[10.7844,0.0311143,-1.25913e-05,2.39298e-09,-1.71746e-13,20508.9,-35.6859], Tmin=(1022.53,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(198.472,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1)"""),
)
species(
label = 'C7H8(697)(696)',
structure = SMILES('CC1[CH][CH]C=CC=1'),
E0 = (243.094,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3819.49,'J/mol'), sigma=(6.43385,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=596.60 K, Pc=32.54 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.19809,0.0461195,-1.76465e-05,5.35241e-10,5.30964e-13,29296.5,15.4891], Tmin=(100,'K'), Tmax=(1929,'K')), NASAPolynomial(coeffs=[17.6451,0.0270016,-1.28217e-05,2.33811e-09,-1.52447e-13,20934.5,-75.41], Tmin=(1929,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.094,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Aromatic_pi_S_1_3) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'C7H8(699)(698)',
structure = SMILES('Cc1ccccc1'),
E0 = (31.5822,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3319.97,'J/mol'), sigma=(5.949e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.16224,0.0228472,6.51865e-05,-9.55095e-08,3.6504e-11,3880.32,17.4011], Tmin=(100,'K'), Tmax=(978.375,'K')), NASAPolynomial(coeffs=[11.5979,0.0282381,-1.04878e-05,1.98802e-09,-1.46124e-13,-70.3309,-38.6684], Tmin=(978.375,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(31.5822,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CbHHH) + group(Cb-Cs) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene)"""),
)
species(
label = 'CH2(S)(21)(22)',
structure = SMILES('[CH2]'),
E0 = (418.921,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1358.21,2621.43,3089.55],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19331,-0.00233105,8.15676e-06,-6.62986e-09,1.93233e-12,50366.2,-0.746734], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.13502,0.00289594,-8.16668e-07,1.13573e-10,-6.36263e-15,50504.1,4.06031], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(418.921,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = 'C6H6(468)(467)',
structure = SMILES('c1ccccc1'),
E0 = (68.5201,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (78.1118,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3319.97,'J/mol'), sigma=(5.949e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.88775,0.00339716,0.0001009,-1.32851e-07,5.08462e-11,8300.31,11.4552], Tmin=(100,'K'), Tmax=(949.238,'K')), NASAPolynomial(coeffs=[12.521,0.0165025,-4.66524e-06,8.85737e-10,-7.161e-14,4052.17,-47.2613], Tmin=(949.238,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(68.5201,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(282.692,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene)"""),
)
species(
label = 'H(3)(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=C1[CH]C=CC=C1(1310)',
structure = SMILES('[CH2]c1ccccc1'),
E0 = (192.889,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,3000,3100,440,815,1455,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.92,0.0282084,5.05307e-05,-8.41097e-08,3.34189e-11,23289.8,16.2403], Tmin=(100,'K'), Tmax=(977.876,'K')), NASAPolynomial(coeffs=[13.7265,0.0233539,-8.65716e-06,1.6695e-09,-1.24954e-13,18903.7,-51.0748], Tmin=(977.876,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(192.889,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(328.422,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CbHHH) + group(Cb-Cs) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene) + radical(Benzyl_P)"""),
)
species(
label = 'C=C1C=C[C]=CC1(1313)',
structure = SMILES('C=C1[CH]C=C=CC1'),
E0 = (333.29,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.54453,0.00308131,0.000138123,-1.78655e-07,6.7114e-11,40164.1,15.7895], Tmin=(100,'K'), Tmax=(964.926,'K')), NASAPolynomial(coeffs=[15.4321,0.0234304,-8.19192e-06,1.66766e-09,-1.3366e-13,34242.5,-63.7216], Tmin=(964.926,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(333.29,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + ring(Cyclohexane) + radical(C=CCJC=C)"""),
)
species(
label = 'C=C1[C]=CC=CC1(1312)',
structure = SMILES('C=C1[C]=CC=CC1'),
E0 = (368.143,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.78018,0.0397028,3.64261e-06,-2.43425e-08,9.63247e-12,44364.8,17.6781], Tmin=(100,'K'), Tmax=(1105.45,'K')), NASAPolynomial(coeffs=[9.3718,0.0323581,-1.36994e-05,2.58477e-09,-1.81685e-13,41456.8,-25.2699], Tmin=(1105.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(368.143,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(13cyclohexadiene5methylene) + radical(C=CJC=C)"""),
)
species(
label = 'C=C1C=[C]C=CC1(1314)',
structure = SMILES('C=C1C=C=C[CH]C1'),
E0 = (354.264,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.1409,0.0149769,0.000105222,-1.45157e-07,5.52077e-11,42698.3,13.0018], Tmin=(100,'K'), Tmax=(975.169,'K')), NASAPolynomial(coeffs=[15.9927,0.0236939,-8.9925e-06,1.84037e-09,-1.44811e-13,36880.7,-69.454], Tmin=(975.169,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(354.264,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cdd-CdsCds) + ring(Cyclohexane) + radical(Allyl_S)"""),
)
species(
label = 'CC1=CC=C=CC1(1329)',
structure = SMILES('CC1=CC=C=CC1'),
E0 = (349.448,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.61589,0.0364005,3.39277e-05,-6.86766e-08,2.83523e-11,42129,18.7077], Tmin=(100,'K'), Tmax=(977.746,'K')), NASAPolynomial(coeffs=[13.8811,0.0253878,-9.2613e-06,1.73923e-09,-1.27359e-13,37858.5,-49.762], Tmin=(977.746,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.448,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cdd-CdsCds) + ring(124cyclohexatriene)"""),
)
species(
label = 'CC1=C=CC=CC1(1470)',
structure = SMILES('CC1=C=CC=CC1'),
E0 = (349.448,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.61591,0.0364003,3.39287e-05,-6.86778e-08,2.83529e-11,42129,18.7076], Tmin=(100,'K'), Tmax=(977.742,'K')), NASAPolynomial(coeffs=[13.881,0.0253879,-9.26138e-06,1.73925e-09,-1.27361e-13,37858.6,-49.7615], Tmin=(977.742,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.448,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cdd-CdsCds) + ring(124cyclohexatriene)"""),
)
species(
label = 'C=C1CC=C=CC1(1466)',
structure = SMILES('C=C1CC=C=CC1'),
E0 = (231.565,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.22136,0.015481,0.000100689,-1.34599e-07,4.97846e-11,27935.9,15.8949], Tmin=(100,'K'), Tmax=(991.365,'K')), NASAPolynomial(coeffs=[13.4691,0.0301336,-1.23193e-05,2.48209e-09,-1.88613e-13,22755.6,-53.1507], Tmin=(991.365,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(231.565,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + ring(Cyclohexane)"""),
)
species(
label = 'C=C1C=C=CCC1(1089)',
structure = SMILES('C=C1C=C=CCC1'),
E0 = (213.151,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.9368,0.02027,9.60388e-05,-1.36392e-07,5.21272e-11,25732.9,14.841], Tmin=(100,'K'), Tmax=(977.964,'K')), NASAPolynomial(coeffs=[15.9544,0.026172,-1.00047e-05,2.01432e-09,-1.55799e-13,19967.2,-67.9339], Tmin=(977.964,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(213.151,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cdd-CdsCds) + ring(Cyclohexane)"""),
)
species(
label = 'C=C1C=CCC=C1(1006)',
structure = SMILES('C=C1C=CCC=C1'),
E0 = (153.051,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.921,0.0291054,5.02247e-05,-8.07381e-08,3.12074e-11,18497.3,16.1349], Tmin=(100,'K'), Tmax=(992.58,'K')), NASAPolynomial(coeffs=[12.2027,0.0287016,-1.11705e-05,2.14393e-09,-1.57294e-13,14435.1,-43.573], Tmin=(992.58,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(153.051,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(14cyclohexadiene3methylene)"""),
)
species(
label = 'C1=CC2CC(=C1)C2(1308)',
structure = SMILES('C1=CC2CC(=C1)C2'),
E0 = (468.256,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.24239,0.0148172,9.94407e-05,-1.37622e-07,5.27832e-11,56403,16.1002], Tmin=(100,'K'), Tmax=(964.363,'K')), NASAPolynomial(coeffs=[14.676,0.0235865,-8.05651e-06,1.57495e-09,-1.22371e-13,51199,-57.9814], Tmin=(964.363,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(468.256,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + polycyclic(s3_4_6_diene_0_2)"""),
)
species(
label = 'C1=CC2C=C(C1)C2(1339)',
structure = SMILES('C1=CC2C=C(C1)C2'),
E0 = (444.394,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2807.14,2864.29,2921.43,2978.57,3035.71,3092.86,3150,900,928.571,957.143,985.714,1014.29,1042.86,1071.43,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.27797,0.0138259,0.000101979,-1.4003e-07,5.35799e-11,53531.9,16.0423], Tmin=(100,'K'), Tmax=(964.247,'K')), NASAPolynomial(coeffs=[14.5941,0.0236537,-8.07625e-06,1.5805e-09,-1.22934e-13,48324.7,-57.6127], Tmin=(964.247,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(444.394,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s3_4_6_diene_1_4)"""),
)
species(
label = '[CH]1C[C]2C=CC1C2(1340)',
structure = SMILES('[CH]1C[C]2C=CC1C2'),
E0 = (419.66,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2807.14,2864.29,2921.43,2978.57,3035.71,3092.86,3150,900,928.571,957.143,985.714,1014.29,1042.86,1071.43,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.87272,0.0010944,0.000126501,-1.56203e-07,5.70004e-11,50535.3,16.0608], Tmin=(100,'K'), Tmax=(973.347,'K')), NASAPolynomial(coeffs=[10.7795,0.0294375,-1.09304e-05,2.14056e-09,-1.62452e-13,46114.3,-36.6749], Tmin=(973.347,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(419.66,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s3_5_5_ene_1) + radical(Allyl_T) + radical(Cs_S)"""),
)
species(
label = 'C=C1[CH]C=CC[CH]1(1095)',
structure = SMILES('[CH2]C1[CH]CC=CC=1'),
E0 = (264.261,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84503,0.0349594,2.76454e-05,-5.16358e-08,1.94794e-11,31871.6,19.5027], Tmin=(100,'K'), Tmax=(1040.47,'K')), NASAPolynomial(coeffs=[9.81315,0.0341423,-1.41611e-05,2.69291e-09,-1.92156e-13,28599.6,-27.0106], Tmin=(1040.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(264.261,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Aromatic_pi_S_1_3) + radical(C=CC=CCJ)"""),
)
species(
label = 'C=C1[CH]C=[C]CC1(1096)',
structure = SMILES('C=C1[CH]C=[C]CC1'),
E0 = (405.503,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.41056,0.00810791,0.000125854,-1.63674e-07,6.10446e-11,48852.1,18.1012], Tmin=(100,'K'), Tmax=(972.442,'K')), NASAPolynomial(coeffs=[14.1799,0.0280832,-1.04447e-05,2.08425e-09,-1.61152e-13,43329.6,-54.9757], Tmin=(972.442,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(405.503,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(C=CCJC=C) + radical(Cds_S)"""),
)
species(
label = 'C[C]1[C]=CC=CC1(1328)',
structure = SMILES('CC1=[C]C=C[CH]C1'),
E0 = (345.201,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.4413,0.050568,-2.35805e-05,3.44831e-09,2.10639e-13,41614.9,19.0167], Tmin=(100,'K'), Tmax=(1561.26,'K')), NASAPolynomial(coeffs=[12.2427,0.0311984,-1.29489e-05,2.31528e-09,-1.5344e-13,37230.1,-41.1466], Tmin=(1561.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(345.201,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CJC=C) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'C=C1[CH][C]=CCC1(1097)',
structure = SMILES('[CH2]C1=C[C]=CCC1'),
E0 = (366.369,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84647,0.0306415,5.16383e-05,-8.66328e-08,3.4979e-11,44156.7,19.6042], Tmin=(100,'K'), Tmax=(955.035,'K')), NASAPolynomial(coeffs=[12.5419,0.027617,-9.21837e-06,1.64576e-09,-1.18163e-13,40208.8,-41.4762], Tmin=(955.035,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.369,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CC=CCJ) + radical(C=CJC=C)"""),
)
species(
label = 'C=C1[C]C=CCC1(1102)',
structure = SMILES('[CH2]C1=[C]C=CCC1'),
E0 = (366.369,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84647,0.0306415,5.16383e-05,-8.66328e-08,3.4979e-11,44156.7,19.6042], Tmin=(100,'K'), Tmax=(955.035,'K')), NASAPolynomial(coeffs=[12.5419,0.027617,-9.21837e-06,1.64576e-09,-1.18163e-13,40208.8,-41.4762], Tmin=(955.035,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.369,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CC=CCJ) + radical(C=CJC=C)"""),
)
species(
label = 'C[C]1C=[C]C=CC1(1323)',
structure = SMILES('CC1=C[C]=C[CH]C1'),
E0 = (345.201,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.4413,0.050568,-2.35805e-05,3.44831e-09,2.10639e-13,41614.9,19.0167], Tmin=(100,'K'), Tmax=(1561.26,'K')), NASAPolynomial(coeffs=[12.2427,0.0311984,-1.29489e-05,2.31528e-09,-1.5344e-13,37230.1,-41.1466], Tmin=(1561.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(345.201,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Aromatic_pi_S_1_3) + radical(C=CJC=C)"""),
)
species(
label = 'C[C]1C=C[C]=CC1(1326)',
structure = SMILES('C[C]1C=C[C]=CC1'),
E0 = (352.504,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.60944,0.0433834,2.33583e-07,-2.22646e-08,9.12346e-12,42490.2,18.939], Tmin=(100,'K'), Tmax=(1103.49,'K')), NASAPolynomial(coeffs=[9.52416,0.0343753,-1.42754e-05,2.66406e-09,-1.86046e-13,39545.1,-25.454], Tmin=(1103.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(352.504,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Aromatic_pi_S_1_3) + radical(C=CJC=C)"""),
)
species(
label = '[CH]1C=CC2C[C]2C1(1071)',
structure = SMILES('[CH]1C=CC2C[C]2C1'),
E0 = (429.983,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2807.14,2864.29,2921.43,2978.57,3035.71,3092.86,3150,900,928.571,957.143,985.714,1014.29,1042.86,1071.43,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.40789,0.0213277,5.70443e-05,-7.64477e-08,2.69286e-11,51784.1,16.7668], Tmin=(100,'K'), Tmax=(1033.28,'K')), NASAPolynomial(coeffs=[7.67331,0.035978,-1.50816e-05,2.90091e-09,-2.08767e-13,48825.8,-17.8581], Tmin=(1033.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(429.983,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_3_6_ene_1) + radical(Tertalkyl) + radical(cyclohexene-allyl)"""),
)
species(
label = '[CH2]C1=CC2[CH]C2C1(1458)',
structure = SMILES('C=C1[CH]C2[CH]C2C1'),
E0 = (476.537,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.91958,0.0255409,6.78904e-05,-1.05278e-07,4.15965e-11,57407.3,22.3894], Tmin=(100,'K'), Tmax=(965.69,'K')), NASAPolynomial(coeffs=[14.5317,0.0239272,-8.24169e-06,1.56839e-09,-1.18533e-13,52610.8,-50.2388], Tmin=(965.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(476.537,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_3_5_ene_side) + radical(Allyl_S) + radical(cyclopropane)"""),
)
species(
label = '[CH2][C]1CC2C=CC12(1459)',
structure = SMILES('[CH2][C]1CC2C=CC12'),
E0 = (466.841,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.93041,0.0372886,8.45917e-06,-2.71969e-08,1.04067e-11,56229.4,21.3473], Tmin=(100,'K'), Tmax=(1077.68,'K')), NASAPolynomial(coeffs=[7.50492,0.0350181,-1.40196e-05,2.5693e-09,-1.77763e-13,53958.3,-10.9263], Tmin=(1077.68,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.841,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Isobutyl) + radical(Tertalkyl)"""),
)
species(
label = '[CH2]C12[CH]C=CC1C2(1460)',
structure = SMILES('[CH2]C12[CH]C=CC1C2'),
E0 = (441.843,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.02373,0.0201716,8.71221e-05,-1.25574e-07,4.83502e-11,53233.5,17.6992], Tmin=(100,'K'), Tmax=(974.726,'K')), NASAPolynomial(coeffs=[15.3978,0.0239124,-8.85055e-06,1.77054e-09,-1.37208e-13,47841.4,-60.766], Tmin=(974.726,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(441.843,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_3_5_ene_1) + radical(cyclopentene-allyl) + radical(Neopentyl)"""),
)
species(
label = '[CH]1[CH]C2C=C(C1)C2(1461)',
structure = SMILES('[CH]1[CH]C2C=C(C1)C2'),
E0 = (696.024,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.64462,0.0150144,7.21984e-05,-9.05671e-08,3.15462e-11,83774,20.261], Tmin=(100,'K'), Tmax=(1024.67,'K')), NASAPolynomial(coeffs=[7.08854,0.0361702,-1.51364e-05,2.9264e-09,-2.1181e-13,80841.9,-11.15], Tmin=(1024.67,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(696.024,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + polycyclic(s3_4_6_ene_4) + radical(Cs_S) + radical(cyclohexane)"""),
)
species(
label = '[CH2]C=CC=C[C]=C(1333)',
structure = SMILES('[CH2]C=CC=C[C]=C'),
E0 = (426.719,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,1685,370,2995,3005,3015,3025,975,983.333,991.667,1000,1300,1325,1350,1375,400,433.333,466.667,500,1630,1646.67,1663.33,1680,2950,3100,1380,975,1025,1650,180,180],'cm^-1')),
HinderedRotor(inertia=(1.60689,'amu*angstrom^2'), symmetry=1, barrier=(36.9457,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.60271,'amu*angstrom^2'), symmetry=1, barrier=(36.8495,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.6163,'amu*angstrom^2'), symmetry=1, barrier=(37.1619,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.927519,0.0547949,-7.40772e-06,-3.58403e-08,2.00001e-11,51444.8,24.6195], Tmin=(100,'K'), Tmax=(929.137,'K')), NASAPolynomial(coeffs=[16.2632,0.0213752,-6.08651e-06,9.75514e-10,-6.6788e-14,47187.8,-55.8119], Tmin=(929.137,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(426.719,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(C=CJC=C)"""),
)
species(
label = '[CH2]C1=CC=CC1[CH2](1462)',
structure = SMILES('[CH2]C1=CC=CC1[CH2]'),
E0 = (372.743,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.5413,0.0390306,2.9878e-05,-6.75936e-08,2.93656e-11,44932.9,20.9617], Tmin=(100,'K'), Tmax=(943.865,'K')), NASAPolynomial(coeffs=[13.7154,0.025109,-7.86477e-06,1.34991e-09,-9.53402e-14,40956.8,-45.9573], Tmin=(943.865,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(372.743,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(Cyclopentadiene) + radical(C=CC=CCJ) + radical(Isobutyl)"""),
)
species(
label = '[C]1=C[CH]C=CCC1(911)',
structure = SMILES('[C]1=C[CH]C=CCC1'),
E0 = (429.915,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2807.14,2864.29,2921.43,2978.57,3035.71,3092.86,3150,900,928.571,957.143,985.714,1014.29,1042.86,1071.43,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.4818,-0.0304742,0.000305554,-4.16705e-07,1.70846e-10,51821.5,22.6266], Tmin=(100,'K'), Tmax=(902.923,'K')), NASAPolynomial(coeffs=[39.3533,-0.0274524,2.41569e-05,-4.87715e-09,3.20018e-13,38381.5,-189.045], Tmin=(902.923,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(429.915,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(1,4-Cycloheptadiene) + radical(Cds_S) + radical(C=CCJC=C)"""),
)
species(
label = 'CH2(17)(18)',
structure = SMILES('[CH2]'),
E0 = (381.08,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([971.045,2816.03,3444.23],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.71758,0.00127391,2.17347e-06,-3.48858e-09,1.65209e-12,45872.4,1.75298], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.14632,0.00303671,-9.96474e-07,1.50484e-10,-8.57336e-15,46041.3,4.72342], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(381.08,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = '[C]1=CC=C[CH]C1(1463)',
structure = SMILES('[C]1=CC=C[CH]C1'),
E0 = (423.103,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (78.1118,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.37414,0.0283839,1.08688e-05,-2.50799e-08,8.87725e-12,50952.1,15.7688], Tmin=(100,'K'), Tmax=(1142.54,'K')), NASAPolynomial(coeffs=[7.65087,0.028021,-1.24316e-05,2.38948e-09,-1.69054e-13,48564.2,-15.5646], Tmin=(1142.54,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(423.103,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(282.692,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Cds_S) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = '[CH2]C1=[C]CC=CC1(1468)',
structure = SMILES('[CH2]C1=[C]CC=CC1'),
E0 = (444.104,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.64188,0.0350646,4.00056e-05,-7.49947e-08,3.04584e-11,53513.2,22.6284], Tmin=(100,'K'), Tmax=(978.941,'K')), NASAPolynomial(coeffs=[13.8529,0.0263441,-9.72247e-06,1.83544e-09,-1.34696e-13,49149.5,-46.0998], Tmin=(978.941,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(444.104,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(1,4-Cyclohexadiene) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'C=C1[CH]C[C]=CC1(1316)',
structure = SMILES('C=C1[CH]C[C]=CC1'),
E0 = (444.89,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.2906,0.0152166,9.76408e-05,-1.28508e-07,4.68876e-11,53589.3,17.7574], Tmin=(100,'K'), Tmax=(999.766,'K')), NASAPolynomial(coeffs=[12.3037,0.0322291,-1.35156e-05,2.71447e-09,-2.0428e-13,48734.7,-44.8131], Tmin=(999.766,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(444.89,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Allyl_S) + radical(Cds_S)"""),
)
species(
label = 'C=C1[CH]CC=[C]C1(1319)',
structure = SMILES('C=C1[CH]CC=[C]C1'),
E0 = (444.89,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.2906,0.0152166,9.76408e-05,-1.28508e-07,4.68876e-11,53589.3,17.7574], Tmin=(100,'K'), Tmax=(999.766,'K')), NASAPolynomial(coeffs=[12.3037,0.0322291,-1.35156e-05,2.71447e-09,-2.0428e-13,48734.7,-44.8131], Tmin=(999.766,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(444.89,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_S) + radical(Allyl_S)"""),
)
species(
label = 'C[C]1C=CC=[C]C1(1321)',
structure = SMILES('CC1=C[CH]C=[C]C1'),
E0 = (389.493,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.58168,0.0478088,-1.77125e-05,-1.33198e-09,1.44251e-12,46936.5,22.0805], Tmin=(100,'K'), Tmax=(1464.87,'K')), NASAPolynomial(coeffs=[11.4906,0.0326128,-1.4298e-05,2.64173e-09,-1.79026e-13,42760.8,-33.851], Tmin=(1464.87,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(389.493,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(1,4-Cyclohexadiene) + radical(Cds_S) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = '[CH2]C12[CH]C1C=CC2(1469)',
structure = SMILES('[CH2]C12[CH]C1C=CC2'),
E0 = (540.45,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.98751,0.0259959,5.99637e-05,-9.09563e-08,3.45785e-11,65089.7,21.2844], Tmin=(100,'K'), Tmax=(998.583,'K')), NASAPolynomial(coeffs=[12.8367,0.0279997,-1.13365e-05,2.23617e-09,-1.66711e-13,60656.3,-42.391], Tmin=(998.583,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(540.45,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_3_5_ene_1) + radical(cyclopropane) + radical(Neopentyl)"""),
)
species(
label = '[CH]1C=CC2C[C]1C2(1465)',
structure = SMILES('[CH]1C=C[C]2CC1C2'),
E0 = (402.569,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.80992,0.000392758,0.000135295,-1.69414e-07,6.27402e-11,48484,12.4973], Tmin=(100,'K'), Tmax=(963.381,'K')), NASAPolynomial(coeffs=[12.1782,0.0274438,-9.50754e-06,1.84789e-09,-1.42291e-13,43618.6,-48.2333], Tmin=(963.381,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(402.569,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s3_4_6_ene_1) + radical(cyclohexene-allyl) + radical(Allyl_T)"""),
)
species(
label = 'C=C1C[CH]C2[CH]C12(1464)',
structure = SMILES('C=C1C[CH]C2[CH]C12'),
E0 = (552.396,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.44698,0.01798,6.99451e-05,-9.22816e-08,3.29761e-11,66507.9,22.0996], Tmin=(100,'K'), Tmax=(1017.38,'K')), NASAPolynomial(coeffs=[9.01176,0.0335585,-1.40469e-05,2.74418e-09,-2.00738e-13,63030.2,-20.2118], Tmin=(1017.38,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(552.396,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_3_5_ane) + radical(cyclopropane) + radical(Cs_S)"""),
)
species(
label = 'C=C1C[C]=C[CH]C1(1324)',
structure = SMILES('C=C1C[C]=C[CH]C1'),
E0 = (442.38,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.2906,0.0152166,9.76408e-05,-1.28508e-07,4.68876e-11,53287.3,17.0642], Tmin=(100,'K'), Tmax=(999.766,'K')), NASAPolynomial(coeffs=[12.3037,0.0322291,-1.35156e-05,2.71447e-09,-2.0428e-13,48432.8,-45.5062], Tmin=(999.766,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(442.38,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_S) + radical(cyclohexene-allyl)"""),
)
species(
label = '[CH]=C1C[CH]C=CC1(1318)',
structure = SMILES('[CH]=C1C[CH]C=CC1'),
E0 = (451.634,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.24723,0.014181,0.000105573,-1.40057e-07,5.17546e-11,54403.8,17.1487], Tmin=(100,'K'), Tmax=(990.219,'K')), NASAPolynomial(coeffs=[13.6765,0.0299888,-1.22556e-05,2.47862e-09,-1.89056e-13,49101.8,-53.2213], Tmin=(990.219,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(451.634,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_P) + radical(cyclohexene-allyl)"""),
)
species(
label = 'C=C1C[CH][C]=CC1(1327)',
structure = SMILES('C=C1C[CH][C]=CC1'),
E0 = (442.38,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.2906,0.0152166,9.76408e-05,-1.28508e-07,4.68876e-11,53287.3,16.3711], Tmin=(100,'K'), Tmax=(999.766,'K')), NASAPolynomial(coeffs=[12.3037,0.0322291,-1.35156e-05,2.71447e-09,-2.0428e-13,48432.8,-46.1994], Tmin=(999.766,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(442.38,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_S) + radical(cyclohexene-allyl)"""),
)
species(
label = '[CH]=C1[CH]C=CCC1(1098)',
structure = SMILES('[CH]=C1[CH]C=CCC1'),
E0 = (414.757,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.36662,0.00707093,0.000133835,-1.75357e-07,6.60051e-11,49968.6,18.1881], Tmin=(100,'K'), Tmax=(966.697,'K')), NASAPolynomial(coeffs=[15.5976,0.0257695,-9.14361e-06,1.83891e-09,-1.45154e-13,43978.8,-62.9457], Tmin=(966.697,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(414.757,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_P) + radical(C=CCJC=C)"""),
)
species(
label = 'C=[C]C1C=C[CH]C1(1467)',
structure = SMILES('C=[C]C1[CH]C=CC1'),
E0 = (459.394,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,1685,370,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.04286,0.022852,7.29912e-05,-1.08467e-07,4.21519e-11,55341,21.0817], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[13.7518,0.0252079,-8.9822e-06,1.72124e-09,-1.29579e-13,50687.7,-47.3294], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(459.394,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclopentene) + radical(cyclopentene-allyl) + radical(Cds_S)"""),
)
species(
label = '[CH2]C1C=C[CH]C=C1(1011)',
structure = SMILES('[CH2]C1[CH]C=CC=C1'),
E0 = (358.591,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,3000,3100,440,815,1455,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.73813,0.0382105,1.75711e-05,-4.30647e-08,1.72123e-11,43220,20.6576], Tmin=(100,'K'), Tmax=(1023.65,'K')), NASAPolynomial(coeffs=[10.2577,0.0318844,-1.26719e-05,2.36496e-09,-1.67453e-13,40063,-27.5443], Tmin=(1023.65,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.591,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Isobutyl) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = '[CH2]C1[C]=CC=CC1(1082)',
structure = SMILES('[CH2]C1[C]=CC=CC1'),
E0 = (499.545,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.79573,0.0317879,4.65818e-05,-8.28275e-08,3.41201e-11,60175.8,21.4364], Tmin=(100,'K'), Tmax=(951.957,'K')), NASAPolynomial(coeffs=[13.4436,0.0246032,-7.89595e-06,1.40355e-09,-1.01716e-13,56066.1,-44.1225], Tmin=(951.957,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(499.545,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Cds_S) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C1C=CC=[C]C1(1083)',
structure = SMILES('[CH2]C1C=CC=[C]C1'),
E0 = (499.545,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.79573,0.0317879,4.65818e-05,-8.28275e-08,3.41201e-11,60175.8,21.4364], Tmin=(100,'K'), Tmax=(951.957,'K')), NASAPolynomial(coeffs=[13.4436,0.0246032,-7.89595e-06,1.40355e-09,-1.01716e-13,56066.1,-44.1225], Tmin=(951.957,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(499.545,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Isobutyl) + radical(Cds_S)"""),
)
species(
label = '[CH2]C1C=[C]C=CC1(1085)',
structure = SMILES('[CH2]C1C=[C]C=CC1'),
E0 = (460.698,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.74784,0.033761,4.2203e-05,-7.91839e-08,3.3336e-11,55504.7,20.7316], Tmin=(100,'K'), Tmax=(939.307,'K')), NASAPolynomial(coeffs=[13.1186,0.0251473,-7.61236e-06,1.29111e-09,-9.13001e-14,51612.4,-42.7619], Tmin=(939.307,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(460.698,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Isobutyl) + radical(C=CJC=C)"""),
)
species(
label = '[CH2]C1C=C[C]=CC1(1084)',
structure = SMILES('[CH2]C1C=C[C]=CC1'),
E0 = (460.698,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.74784,0.033761,4.2203e-05,-7.91839e-08,3.3336e-11,55504.7,20.7316], Tmin=(100,'K'), Tmax=(939.307,'K')), NASAPolynomial(coeffs=[13.1186,0.0251473,-7.61236e-06,1.29111e-09,-9.13001e-14,51612.4,-42.7619], Tmin=(939.307,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(460.698,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CJC=C) + radical(Isobutyl)"""),
)
species(
label = '[CH2]C12C=CC1[CH]C2(1471)',
structure = SMILES('[CH2]C12C=CC1[CH]C2'),
E0 = (469.288,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.85787,0.0339227,2.93193e-05,-5.36492e-08,2.01045e-11,56530.8,20.9697], Tmin=(100,'K'), Tmax=(1050.27,'K')), NASAPolynomial(coeffs=[10.6768,0.0322499,-1.38718e-05,2.69904e-09,-1.95168e-13,52918.1,-30.3891], Tmin=(1050.27,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(469.288,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Neopentyl) + radical(cyclobutane)"""),
)
species(
label = '[CH]=CC=CC([CH2])=C(1332)',
structure = SMILES('[CH]=CC=CC([CH2])=C'),
E0 = (506.698,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,3120,650,792.5,1650,2950,3100,1380,975,1025,1650,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.15733,'amu*angstrom^2'), symmetry=1, barrier=(26.6092,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.15722,'amu*angstrom^2'), symmetry=1, barrier=(26.6068,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.15726,'amu*angstrom^2'), symmetry=1, barrier=(26.6078,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.483155,0.0622715,-1.84927e-05,-3.15989e-08,1.98133e-11,61082.1,23.8446], Tmin=(100,'K'), Tmax=(940.855,'K')), NASAPolynomial(coeffs=[20.2971,0.0156362,-4.09186e-06,6.7582e-10,-4.98686e-14,55689.4,-79.3827], Tmin=(940.855,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(506.698,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)"""),
)
species(
label = '[CH2]C1([CH2])C=CC=C1(1472)',
structure = SMILES('[CH2]C1([CH2])C=CC=C1'),
E0 = (465.364,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.42465,0.0396313,2.97368e-05,-6.7217e-08,2.82859e-11,56078.3,21.9179], Tmin=(100,'K'), Tmax=(982.514,'K')), NASAPolynomial(coeffs=[15.4929,0.0236232,-8.8244e-06,1.69594e-09,-1.26183e-13,51322,-55.841], Tmin=(982.514,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(465.364,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsCs) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(Cyclopentadiene) + radical(Neopentyl) + radical(Neopentyl)"""),
)
species(
label = '[CH]=C1[CH]CC=CC1(1322)',
structure = SMILES('[CH]C1=CCC=CC1'),
E0 = (425.447,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.67529,0.0328759,5.66842e-05,-9.16105e-08,3.57568e-11,51269.4,22.8633], Tmin=(100,'K'), Tmax=(980.188,'K')), NASAPolynomial(coeffs=[12.6918,0.0329908,-1.24657e-05,2.33331e-09,-1.68919e-13,46944.6,-41.1093], Tmin=(980.188,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(425.447,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(1,4-Cyclohexadiene) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=C1C=[C]C[CH]C1(1317)',
structure = SMILES('C=C1C=[C]C[CH]C1'),
E0 = (481.092,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.07135,0.0254349,6.14019e-05,-8.78984e-08,3.21349e-11,57946.2,18.7825], Tmin=(100,'K'), Tmax=(1018.86,'K')), NASAPolynomial(coeffs=[10.6667,0.0343328,-1.44778e-05,2.83031e-09,-2.07005e-13,53981.4,-33.7072], Tmin=(1018.86,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(481.092,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(RCCJCC) + radical(Cds_S)"""),
)
species(
label = '[CH2][C]1C=CCC=C1(1010)',
structure = SMILES('[CH2]C1C=CC[CH]C=1'),
E0 = (299.169,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,3000,3100,440,815,1455,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.56291,0.0413666,1.30634e-05,-3.93866e-08,1.57053e-11,36080,18.3901], Tmin=(100,'K'), Tmax=(1056.99,'K')), NASAPolynomial(coeffs=[11.4828,0.0319785,-1.35651e-05,2.60664e-09,-1.8681e-13,32410.3,-37.4565], Tmin=(1056.99,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(299.169,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Allyl_P) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'C=C1[C]=CC[CH]C1(1320)',
structure = SMILES('C=C1[C]=CC[CH]C1'),
E0 = (442.245,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.01381,0.0275455,5.64184e-05,-8.32666e-08,3.08276e-11,53275.5,18.804], Tmin=(100,'K'), Tmax=(1010.07,'K')), NASAPolynomial(coeffs=[10.2742,0.0349839,-1.42528e-05,2.73118e-09,-1.97659e-13,49558.6,-31.2691], Tmin=(1010.07,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(442.245,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(RCCJCC) + radical(C=CJC=C)"""),
)
species(
label = '[CH]=C1C=CC[CH]C1(1325)',
structure = SMILES('[CH]=C1C=CC[CH]C1'),
E0 = (490.346,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.03012,0.0243799,6.93711e-05,-9.94423e-08,3.69741e-11,59062.6,19.552], Tmin=(100,'K'), Tmax=(1003.62,'K')), NASAPolynomial(coeffs=[11.995,0.0321649,-1.32584e-05,2.60382e-09,-1.92544e-13,54670.2,-40.4769], Tmin=(1003.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(490.346,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(Cyclohexane) + radical(Cds_P) + radical(RCCJCC)"""),
)
species(
label = 'C=C=CC=CC=C(1309)',
structure = SMILES('C=C=CC=CC=C'),
E0 = (286.272,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2995,3005,3015,3025,975,983.333,991.667,1000,1300,1325,1350,1375,400,433.333,466.667,500,1630,1646.67,1663.33,1680,540,610,2055,180,180],'cm^-1')),
HinderedRotor(inertia=(1.18076,'amu*angstrom^2'), symmetry=1, barrier=(27.148,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.18543,'amu*angstrom^2'), symmetry=1, barrier=(27.2555,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.762364,0.0568363,-1.08125e-05,-3.39638e-08,1.92028e-11,34560.2,23.1106], Tmin=(100,'K'), Tmax=(953.296,'K')), NASAPolynomial(coeffs=[18.2283,0.0185707,-5.70656e-06,1.00179e-09,-7.29345e-14,29638.8,-68.6632], Tmin=(953.296,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(286.272,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cdd-CdsCds)"""),
)
species(
label = 'C=C1C=CC=[C]C1(1311)',
structure = SMILES('C=C1C=CC=[C]C1'),
E0 = (406.989,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84681,0.0374744,9.0893e-06,-2.96497e-08,1.12605e-11,49035.2,17.6247], Tmin=(100,'K'), Tmax=(1110.2,'K')), NASAPolynomial(coeffs=[9.79846,0.0316567,-1.38984e-05,2.6783e-09,-1.90598e-13,45862.6,-27.9056], Tmin=(1110.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(406.989,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(13cyclohexadiene5methylene) + radical(Cds_S)"""),
)
species(
label = '[CH]=C1C=CC=CC1(1315)',
structure = SMILES('[CH]=C1C=CC=CC1'),
E0 = (416.243,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.80993,0.0364058,1.69186e-05,-4.07266e-08,1.57712e-11,50151.4,18.3763], Tmin=(100,'K'), Tmax=(1061.39,'K')), NASAPolynomial(coeffs=[10.8498,0.0299289,-1.29204e-05,2.50673e-09,-1.80563e-13,46678.3,-33.0955], Tmin=(1061.39,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(416.243,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + ring(13cyclohexadiene5methylene) + radical(Cds_P)"""),
)
species(
label = '[CH]=CCC(=C)C=[CH](1330)',
structure = SMILES('[CH]=CCC(=C)C=[CH]'),
E0 = (633.149,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3115,3125,620,680,785,800,1600,1700,2950,3100,1380,975,1025,1650,2995,3025,975,1000,1300,1375,400,500,1630,1680,350,440,435,1725,180],'cm^-1')),
HinderedRotor(inertia=(0.852542,'amu*angstrom^2'), symmetry=1, barrier=(19.6016,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.852181,'amu*angstrom^2'), symmetry=1, barrier=(19.5933,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.852211,'amu*angstrom^2'), symmetry=1, barrier=(19.594,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.608944,0.0638688,-3.67532e-05,-3.18596e-09,7.27055e-12,76281.9,27.2312], Tmin=(100,'K'), Tmax=(982.131,'K')), NASAPolynomial(coeffs=[16.8838,0.0211327,-7.44695e-06,1.33341e-09,-9.39821e-14,71949.4,-56.7737], Tmin=(982.131,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(633.149,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Cds_P)"""),
)
species(
label = '[CH]=CC=CC[C]=C(1331)',
structure = SMILES('[CH]=CC=CC[C]=C'),
E0 = (625.46,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1685,370,3120,650,792.5,1650,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,2950,3100,1380,975,1025,1650,180,180],'cm^-1')),
HinderedRotor(inertia=(0.759148,'amu*angstrom^2'), symmetry=1, barrier=(17.4543,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.759148,'amu*angstrom^2'), symmetry=1, barrier=(17.4543,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.759515,'amu*angstrom^2'), symmetry=1, barrier=(17.4627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.797979,0.0610843,-3.68141e-05,2.45206e-09,3.90436e-12,75348.8,27.8293], Tmin=(100,'K'), Tmax=(1023.56,'K')), NASAPolynomial(coeffs=[14.8975,0.0240695,-9.07318e-06,1.64577e-09,-1.14834e-13,71515.1,-45.1492], Tmin=(1023.56,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(625.46,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Cds_S)"""),
)
species(
label = 'C=C1C=CC[C]C1(1334)',
structure = SMILES('C=C1C=CC[C]C1'),
E0 = (475.186,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.59861,0.0364243,4.09126e-05,-7.03508e-08,2.64429e-11,57252.3,25.4053], Tmin=(100,'K'), Tmax=(1036.51,'K')), NASAPolynomial(coeffs=[12.2374,0.0353215,-1.53106e-05,2.99961e-09,-2.18426e-13,52900.6,-36.6554], Tmin=(1036.51,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(475.186,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(CsJ2_singlet-CsH) + ring(Cyclohexane)"""),
)
species(
label = 'C=C1[C]CC=CC1(1335)',
structure = SMILES('C=C1[C]CC=CC1'),
E0 = (492.33,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88704,0.0316605,4.55683e-05,-6.8795e-08,2.43472e-11,59302.3,27.1141], Tmin=(100,'K'), Tmax=(1068.11,'K')), NASAPolynomial(coeffs=[9.89702,0.0391346,-1.75506e-05,3.44601e-09,-2.49179e-13,55453.8,-22.0629], Tmin=(1068.11,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(492.33,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsCsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(CsJ2_singlet-CsH) + ring(Cyclohexane)"""),
)
species(
label = 'C=C1C=C[C]CC1(1336)',
structure = SMILES('C=C1C=C[C]CC1'),
E0 = (404.158,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.72005,0.0284078,6.96392e-05,-1.06874e-07,4.10743e-11,48710.3,17.8971], Tmin=(100,'K'), Tmax=(993.657,'K')), NASAPolynomial(coeffs=[15.2544,0.0280132,-1.14157e-05,2.28905e-09,-1.73317e-13,43350.4,-60.7486], Tmin=(993.657,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(404.158,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsCsHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(CsJ2_singlet-(Cds-Cds-Cds-Cds)Cs_6_ring) + ring(Cyclohexane)"""),
)
species(
label = 'C=C1C[C]C=CC1(1337)',
structure = SMILES('C=C1C[C]C=CC1'),
E0 = (492.33,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88704,0.0316605,4.55683e-05,-6.8795e-08,2.43472e-11,59302.3,27.1141], Tmin=(100,'K'), Tmax=(1068.11,'K')), NASAPolynomial(coeffs=[9.89702,0.0391346,-1.75506e-05,3.44601e-09,-2.49179e-13,55453.8,-22.0629], Tmin=(1068.11,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(492.33,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsCsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(CsJ2_singlet-CsH) + ring(Cyclohexane)"""),
)
species(
label = '[CH]C1C=CC=CC1(1338)',
structure = SMILES('[CH]C1C=CC=CC1'),
E0 = (505.516,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.67477,0.0365951,2.87729e-05,-5.90221e-08,2.36693e-11,60896,19.0558], Tmin=(100,'K'), Tmax=(1004.94,'K')), NASAPolynomial(coeffs=[12.4829,0.0285111,-1.13073e-05,2.16009e-09,-1.56855e-13,56959.6,-41.9176], Tmin=(1004.94,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(505.516,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(CsJ2_singlet-CsH) + ring(1,3-Cyclohexadiene)"""),
)
species(
label = 'C=C1C[C]2C=CC21(1473)',
structure = SMILES('C=C1C[C]2C=CC21'),
E0 = (330.453,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.08707,0.0298981,3.21169e-05,-5.55252e-08,2.1186e-11,39823.8,13.6681], Tmin=(100,'K'), Tmax=(1013.51,'K')), NASAPolynomial(coeffs=[9.49822,0.0301857,-1.20239e-05,2.26481e-09,-1.61861e-13,36804.5,-29.6698], Tmin=(1013.51,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.453,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_T)"""),
)
species(
label = 'C=C1CC2C=C[C]12(1474)',
structure = SMILES('C=C1CC2C=C[C]12'),
E0 = (330.453,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.08707,0.0298981,3.21169e-05,-5.55252e-08,2.1186e-11,39823.8,14.3613], Tmin=(100,'K'), Tmax=(1013.51,'K')), NASAPolynomial(coeffs=[9.49822,0.0301857,-1.20239e-05,2.26481e-09,-1.61861e-13,36804.5,-28.9767], Tmin=(1013.51,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.453,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_T)"""),
)
species(
label = 'C=C1[CH]C2C=CC12(1475)',
structure = SMILES('C=C1[CH]C2C=CC12'),
E0 = (339.584,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.0149,0.029982,3.59116e-05,-6.16231e-08,2.36478e-11,40926,14.5675], Tmin=(100,'K'), Tmax=(1012.12,'K')), NASAPolynomial(coeffs=[10.7653,0.0287308,-1.16324e-05,2.23141e-09,-1.61772e-13,37447.5,-36.1875], Tmin=(1012.12,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(339.584,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_S)"""),
)
species(
label = 'C=C1CC2[C]=CC12(1476)',
structure = SMILES('C=C1CC2[C]=CC12'),
E0 = (450.958,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.75575,0.0400835,1.47586e-06,-2.34122e-08,9.73005e-12,54326.3,16.388], Tmin=(100,'K'), Tmax=(1084.8,'K')), NASAPolynomial(coeffs=[9.96598,0.0300125,-1.25336e-05,2.36476e-09,-1.66791e-13,51356.3,-29.3689], Tmin=(1084.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(450.958,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutene-vinyl)"""),
)
species(
label = 'C=C1CC2C=[C]C12(1477)',
structure = SMILES('C=C1CC2C=[C]C12'),
E0 = (450.958,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.75575,0.0400835,1.47586e-06,-2.34122e-08,9.73005e-12,54326.3,16.388], Tmin=(100,'K'), Tmax=(1084.8,'K')), NASAPolynomial(coeffs=[9.96598,0.0300125,-1.25336e-05,2.36476e-09,-1.66791e-13,51356.3,-29.3689], Tmin=(1084.8,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(450.958,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutene-vinyl)"""),
)
species(
label = '[CH]=C1CC2C=CC12(1478)',
structure = SMILES('[CH]=C1CC2C=CC12'),
E0 = (445.568,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.72606,0.0389167,9.71224e-06,-3.51078e-08,1.45431e-11,53680.9,17.1146], Tmin=(100,'K'), Tmax=(1036.34,'K')), NASAPolynomial(coeffs=[11.087,0.0281789,-1.14995e-05,2.18079e-09,-1.5578e-13,50377.1,-34.9593], Tmin=(1036.34,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(445.568,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + polycyclic(s2_4_4_ene_1) + radical(Cds_P)"""),
)
species(
label = 'C=C1C[C]2C[CH]C21(1479)',
structure = SMILES('C=C1C[C]2C[CH]C21'),
E0 = (591.541,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.31428,0.0275348,3.20518e-05,-4.64453e-08,1.55019e-11,71214.5,21.3885], Tmin=(100,'K'), Tmax=(1114.32,'K')), NASAPolynomial(coeffs=[6.37293,0.0383703,-1.67315e-05,3.19974e-09,-2.26206e-13,68732.7,-5.70874], Tmin=(1114.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(591.541,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_4_ane) + radical(Tertalkyl) + radical(cyclobutane)"""),
)
species(
label = '[CH2]C1CC2C=C[C]12(1480)',
structure = SMILES('[CH2]C1CC2C=C[C]12'),
E0 = (413.399,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.15735,0.0241198,6.16929e-05,-9.25679e-08,3.60749e-11,49801.6,17.6473], Tmin=(100,'K'), Tmax=(959.802,'K')), NASAPolynomial(coeffs=[10.9044,0.02879,-9.87488e-06,1.78254e-09,-1.28063e-13,46228.3,-34.0607], Tmin=(959.802,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(413.399,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_T) + radical(Isobutyl)"""),
)
species(
label = 'C=C1CC2[CH]C[C]12(1481)',
structure = SMILES('C=C1CC2[CH]C[C]12'),
E0 = (538.099,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.50831,0.0148328,8.32598e-05,-1.08581e-07,3.95072e-11,64788,18.4947], Tmin=(100,'K'), Tmax=(992.276,'K')), NASAPolynomial(coeffs=[9.43439,0.0326691,-1.28715e-05,2.47698e-09,-1.81615e-13,61160.8,-26.2166], Tmin=(992.276,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(538.099,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_4_ane) + radical(cyclobutane) + radical(Allyl_T)"""),
)
species(
label = '[CH2]C1[CH]C2C=CC12(1060)',
structure = SMILES('[CH2]C1[CH]C2C=CC12'),
E0 = (469.257,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.03868,0.030161,3.78046e-05,-6.32735e-08,2.43887e-11,56520.6,21.4749], Tmin=(100,'K'), Tmax=(993.955,'K')), NASAPolynomial(coeffs=[9.60448,0.0314292,-1.19719e-05,2.21511e-09,-1.5749e-13,53450,-22.8608], Tmin=(993.955,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(469.257,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutane) + radical(Isobutyl)"""),
)
species(
label = 'C=C1CC2C[CH][C]12(1482)',
structure = SMILES('C=C1CC2C[CH][C]12'),
E0 = (538.099,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.50831,0.0148328,8.32598e-05,-1.08581e-07,3.95072e-11,64788,18.4947], Tmin=(100,'K'), Tmax=(992.276,'K')), NASAPolynomial(coeffs=[9.43439,0.0326691,-1.28715e-05,2.47698e-09,-1.81615e-13,61160.8,-26.2166], Tmin=(992.276,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(538.099,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_4_ane) + radical(Allyl_T) + radical(cyclobutane)"""),
)
species(
label = 'C=C1[CH]C2C[CH]C12(1483)',
structure = SMILES('[CH2]C1=CC2C[CH]C12'),
E0 = (409.44,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.86165,0.0330809,3.45227e-05,-6.12584e-08,2.35369e-11,49333.5,20.0961], Tmin=(100,'K'), Tmax=(1016.4,'K')), NASAPolynomial(coeffs=[10.9151,0.0312566,-1.26741e-05,2.42104e-09,-1.74679e-13,45746.9,-32.3186], Tmin=(1016.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(409.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_P) + radical(cyclobutane)"""),
)
species(
label = '[CH2]C1C[C]2C=CC21(1484)',
structure = SMILES('[CH2]C1C[C]2C=CC21'),
E0 = (413.399,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.15735,0.0241198,6.16929e-05,-9.25679e-08,3.60749e-11,49801.6,17.6473], Tmin=(100,'K'), Tmax=(959.802,'K')), NASAPolynomial(coeffs=[10.9044,0.02879,-9.87488e-06,1.78254e-09,-1.28063e-13,46228.3,-34.0607], Tmin=(959.802,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(413.399,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Isobutyl) + radical(Allyl_T)"""),
)
species(
label = 'C=C1C[C]2[CH]CC21(1485)',
structure = SMILES('C=C1C[C]2[CH]CC21'),
E0 = (591.541,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.31428,0.0275348,3.20518e-05,-4.64453e-08,1.55019e-11,71214.5,21.3885], Tmin=(100,'K'), Tmax=(1114.32,'K')), NASAPolynomial(coeffs=[6.37293,0.0383703,-1.67315e-05,3.19974e-09,-2.26206e-13,68732.7,-5.70874], Tmin=(1114.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(591.541,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_4_ane) + radical(Tertalkyl) + radical(cyclobutane)"""),
)
species(
label = '[CH2]C1CC2C=[C]C12(1486)',
structure = SMILES('[CH2]C1CC2C=[C]C12'),
E0 = (533.904,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84152,0.0341777,3.12112e-05,-6.0197e-08,2.42847e-11,64303.4,20.3082], Tmin=(100,'K'), Tmax=(980.605,'K')), NASAPolynomial(coeffs=[10.9796,0.0292518,-1.07375e-05,1.96363e-09,-1.39585e-13,60955.9,-31.5294], Tmin=(980.605,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(533.904,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutene-vinyl) + radical(Isobutyl)"""),
)
species(
label = 'C=C1[CH]C2[CH]CC12(1487)',
structure = SMILES('[CH2]C1=CC2[CH]CC12'),
E0 = (409.44,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.86167,0.0330807,3.45235e-05,-6.12595e-08,2.35374e-11,49333.5,20.096], Tmin=(100,'K'), Tmax=(1016.4,'K')), NASAPolynomial(coeffs=[10.915,0.0312567,-1.26741e-05,2.42106e-09,-1.74681e-13,45747,-32.3181], Tmin=(1016.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(409.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutane) + radical(Allyl_P)"""),
)
species(
label = 'C[C]1C[C]2C=CC12(1488)',
structure = SMILES('C[C]1C[C]2C=CC12'),
E0 = (393.739,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.20532,0.0299995,2.72859e-05,-4.37775e-08,1.53229e-11,47428.5,17.5272], Tmin=(100,'K'), Tmax=(1077.09,'K')), NASAPolynomial(coeffs=[6.57602,0.0372263,-1.54473e-05,2.89272e-09,-2.02819e-13,45126.2,-10.2001], Tmin=(1077.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(393.739,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_T) + radical(Tertalkyl)"""),
)
species(
label = 'C[C]1CC2C=[C]C12(1489)',
structure = SMILES('C[C]1CC2C=[C]C12'),
E0 = (514.244,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88276,0.0399717,-2.1002e-06,-1.40009e-08,5.15362e-12,61930.8,20.2231], Tmin=(100,'K'), Tmax=(1245.2,'K')), NASAPolynomial(coeffs=[7.97367,0.0356223,-1.51911e-05,2.82164e-09,-1.94166e-13,59234.2,-15.2347], Tmin=(1245.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(514.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutene-vinyl) + radical(Tertalkyl)"""),
)
species(
label = '[CH2]C1CC2[C]=CC12(1490)',
structure = SMILES('[CH2]C1CC2[C]=CC12'),
E0 = (533.904,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.84152,0.0341777,3.12112e-05,-6.0197e-08,2.42847e-11,64303.4,20.3082], Tmin=(100,'K'), Tmax=(980.605,'K')), NASAPolynomial(coeffs=[10.9796,0.0292518,-1.07375e-05,1.96363e-09,-1.39585e-13,60955.9,-31.5294], Tmin=(980.605,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(533.904,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Isobutyl) + radical(cyclobutene-vinyl)"""),
)
species(
label = '[CH]=C1CC2[CH]CC12(1491)',
structure = SMILES('[CH]=C1CC2[CH]CC12'),
E0 = (653.214,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.15386,0.0237823,6.10539e-05,-8.83487e-08,3.29077e-11,78644.8,21.224], Tmin=(100,'K'), Tmax=(1002.35,'K')), NASAPolynomial(coeffs=[10.9442,0.0307916,-1.24196e-05,2.40973e-09,-1.76903e-13,74768.3,-31.7516], Tmin=(1002.35,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(653.214,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_4_ane) + radical(Cds_P) + radical(cyclobutane)"""),
)
species(
label = 'C[C]1[CH]C2C=CC12(1058)',
structure = SMILES('C[C]1[CH]C2C=CC12'),
E0 = (449.597,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.11373,0.0355229,6.15926e-06,-1.94574e-08,6.36522e-12,54146.6,21.2706], Tmin=(100,'K'), Tmax=(1259.19,'K')), NASAPolynomial(coeffs=[6.77783,0.037538,-1.62913e-05,3.04424e-09,-2.09845e-13,51637.7,-7.60437], Tmin=(1259.19,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(449.597,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutane) + radical(Tertalkyl)"""),
)
species(
label = 'C[C]1CC2[C]=CC12(1492)',
structure = SMILES('C[C]1CC2[C]=CC12'),
E0 = (514.244,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88276,0.0399717,-2.1002e-06,-1.40009e-08,5.15362e-12,61930.8,20.2231], Tmin=(100,'K'), Tmax=(1245.2,'K')), NASAPolynomial(coeffs=[7.97367,0.0356223,-1.51911e-05,2.82164e-09,-1.94166e-13,59234.2,-15.2347], Tmin=(1245.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(514.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutene-vinyl) + radical(Tertalkyl)"""),
)
species(
label = 'C[C]1CC2C=C[C]12(1493)',
structure = SMILES('C[C]1CC2C=C[C]12'),
E0 = (393.739,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.20532,0.0299995,2.72859e-05,-4.37775e-08,1.53229e-11,47428.5,17.5272], Tmin=(100,'K'), Tmax=(1077.09,'K')), NASAPolynomial(coeffs=[6.57602,0.0372263,-1.54473e-05,2.89272e-09,-2.02819e-13,45126.2,-10.2001], Tmin=(1077.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(393.739,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(Allyl_T) + radical(Tertalkyl)"""),
)
species(
label = '[CH]=C1CC2C[CH]C12(1494)',
structure = SMILES('[CH]=C1CC2C[CH]C12'),
E0 = (653.214,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,3120,650,792.5,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.15386,0.0237823,6.10539e-05,-8.83487e-08,3.29077e-11,78644.8,21.224], Tmin=(100,'K'), Tmax=(1002.35,'K')), NASAPolynomial(coeffs=[10.9442,0.0307916,-1.24196e-05,2.40973e-09,-1.76903e-13,74768.3,-31.7516], Tmin=(1002.35,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(653.214,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + polycyclic(s2_4_4_ane) + radical(Cds_P) + radical(cyclobutane)"""),
)
species(
label = '[CH2]C1C=CC1[C]=C(1495)',
structure = SMILES('[CH2]C1C=CC1[C]=C'),
E0 = (627.341,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,1685,370,2950,3100,1380,975,1025,1650,3000,3100,440,815,1455,1000,300,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.36304,0.0474315,-3.1244e-06,-2.87347e-08,1.43716e-11,75556.1,23.9651], Tmin=(100,'K'), Tmax=(974.286,'K')), NASAPolynomial(coeffs=[12.3942,0.0268644,-9.52099e-06,1.68629e-09,-1.17187e-13,72233.3,-34.9878], Tmin=(974.286,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(627.341,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclobutene) + radical(Isobutyl) + radical(Cds_S)"""),
)
species(
label = 'C=[C]CC1[CH]C=C1(1496)',
structure = SMILES('C=[C]CC1[CH]C=C1'),
E0 = (595.322,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,1685,370,2950,3100,1380,975,1025,1650,2750,2850,1437.5,1250,1305,750,350,300,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.75637,0.0319308,4.67047e-05,-8.16229e-08,3.28006e-11,71697.1,22.9898], Tmin=(100,'K'), Tmax=(975.729,'K')), NASAPolynomial(coeffs=[13.8491,0.0253403,-9.2428e-06,1.75178e-09,-1.29524e-13,67291.1,-45.5385], Tmin=(975.729,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(595.322,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclobutene) + radical(Cds_S) + radical(cyclobutene-allyl)"""),
)
species(
label = '[CH]=CC1[CH]C(=C)C1(1497)',
structure = SMILES('[CH]=CC1[CH]C(=C)C1'),
E0 = (560.812,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1099.18,1099.18,1099.18,1099.18,1099.18,1099.18,1099.18,1206,3704.35,3704.35,3704.35,3704.35,3704.35,3704.35,3704.35],'cm^-1')),
HinderedRotor(inertia=(0.0451777,'amu*angstrom^2'), symmetry=1, barrier=(10.4257,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[10.634,-0.0193825,0.000143472,-1.35266e-07,3.12957e-11,67100.5,-17.3383], Tmin=(100,'K'), Tmax=(1690.71,'K')), NASAPolynomial(coeffs=[70.1805,0.0326372,-7.38209e-05,1.78986e-08,-1.33085e-12,19395.4,-417.42], Tmin=(1690.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(560.812,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(methylenecyclobutane) + radical(Cds_P) + radical(Allyl_S)"""),
)
species(
label = '[CH2]C(=C)C1[CH]C=C1(1498)',
structure = SMILES('[CH2]C(=C)C1[CH]C=C1'),
E0 = (493.157,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,3000,3100,440,815,1455,1000,2950,3100,1380,975,1025,1650,350,440,435,1725,300,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.42542,0.0407269,2.60325e-05,-6.23853e-08,2.64629e-11,59419.9,19.418], Tmin=(100,'K'), Tmax=(979.763,'K')), NASAPolynomial(coeffs=[14.6112,0.0253616,-9.33621e-06,1.7537e-09,-1.28183e-13,54989.8,-53.3486], Tmin=(979.763,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(493.157,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclobutene) + radical(cyclobutene-allyl) + radical(Allyl_P)"""),
)
species(
label = '[CH]=CC1[CH]CC1=C(1499)',
structure = SMILES('[CH]=CC1[CH]CC1=C'),
E0 = (613.694,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1109.01,1109.01,1109.01,1109.01,2780.9,4000,4000,4000,4000,4000,4000,4000,4000,4000,4000],'cm^-1')),
HinderedRotor(inertia=(1.00378,'amu*angstrom^2'), symmetry=1, barrier=(32.1589,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[11.8547,-0.0202652,0.000136514,-1.26839e-07,2.86711e-11,73394,-21.197], Tmin=(100,'K'), Tmax=(1743.15,'K')), NASAPolynomial(coeffs=[77.4855,0.0254868,-7.18216e-05,1.74595e-08,-1.29172e-12,20681.1,-459.868], Tmin=(1743.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(613.694,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(methylenecyclobutane) + radical(Cs_S) + radical(Cds_P)"""),
)
species(
label = 'H2CCCH2(837)',
structure = SMILES('C=C=C'),
E0 = (175.934,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,540,610,2055],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (40.0639,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.37448,0.00704613,2.78309e-05,-3.99448e-08,1.55731e-11,21188.6,7.62046], Tmin=(100,'K'), Tmax=(949.703,'K')), NASAPolynomial(coeffs=[6.79954,0.00959982,-3.0207e-06,5.37832e-10,-3.9261e-14,19772.3,-12.7581], Tmin=(949.703,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(175.934,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(157.975,'J/(mol*K)'), label="""allene""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'C1=CC=C1(1500)',
structure = SMILES('C1=CC=C1'),
E0 = (423.524,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,302.32,887.313,887.482,887.544,887.544,887.552,887.572,887.636,887.693,3398.76],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (52.0746,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.16892,0.00524741,5.47729e-05,-7.7291e-08,3.06591e-11,50980.1,7.66708], Tmin=(100,'K'), Tmax=(938.995,'K')), NASAPolynomial(coeffs=[10.542,0.00709254,-1.29542e-06,2.30939e-10,-2.16954e-14,48129.4,-35.2459], Tmin=(938.995,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(423.524,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(182.918,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(cyclobutadiene_13)"""),
)
species(
label = 'C=C1CC2[C]CC12(1501)',
structure = SMILES('C=C1CC2[C]CC12'),
E0 = (646.944,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.95183,0.0281782,5.57934e-05,-8.47338e-08,3.18776e-11,77897.9,27.8038], Tmin=(100,'K'), Tmax=(1004.03,'K')), NASAPolynomial(coeffs=[11.3737,0.0324773,-1.31308e-05,2.53183e-09,-1.84753e-13,73897.3,-28.1902], Tmin=(1004.03,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(646.944,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsCsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(CsJ2_singlet-CsH) + polycyclic(s2_4_4_ane)"""),
)
species(
label = 'C=C1CC2C[C]C12(1502)',
structure = SMILES('C=C1CC2C[C]C12'),
E0 = (644.739,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,2950,3100,1380,975,1025,1650,300,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88863,0.0321307,4.02732e-05,-6.55939e-08,2.43621e-11,77632.6,27.3299], Tmin=(100,'K'), Tmax=(1031.49,'K')), NASAPolynomial(coeffs=[10.2492,0.0347731,-1.4559e-05,2.80034e-09,-2.0177e-13,74042.5,-22.306], Tmin=(1031.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(644.739,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsCsHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(CsJ2_singlet-CsH) + polycyclic(s2_4_4_ane)"""),
)
species(
label = '[CH]C1CC2C=CC12(1503)',
structure = SMILES('[CH]C1CC2C=CC12'),
E0 = (527.436,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2816.67,2883.33,2950,3016.67,3083.33,3150,900,933.333,966.667,1000,1033.33,1066.67,1100,300,800,800,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.7843,0.0349815,2.93339e-05,-5.64519e-08,2.19336e-11,63527.6,18.4022], Tmin=(100,'K'), Tmax=(1022.64,'K')), NASAPolynomial(coeffs=[11.3755,0.0304882,-1.25117e-05,2.40361e-09,-1.73802e-13,59839.2,-36.5268], Tmin=(1022.64,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(527.436,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(CsJ2_singlet-CsH) + polycyclic(s2_4_4_ene_1)"""),
)
species(
label = '[CH]1C2C[C]3CC2C13(1504)',
structure = SMILES('[CH]1C2C[C]3CC2C13'),
E0 = (681.589,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2807.14,2864.29,2921.43,2978.57,3035.71,3092.86,3150,900,928.571,957.143,985.714,1014.29,1042.86,1071.43,1100,327.326,838.508,838.508,838.508,838.508,838.508,838.508,838.508,838.508,838.508,838.508,838.508,1682.09,1682.09,1682.09,1682.09,1682.09,1682.09,1682.09,1682.09,1682.09,1682.09,1682.09],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.05024,0.0223787,2.89452e-05,-3.20302e-08,8.23412e-12,82008.6,10.251], Tmin=(100,'K'), Tmax=(1431.4,'K')), NASAPolynomial(coeffs=[4.18241,0.045001,-2.1783e-05,4.18139e-09,-2.88432e-13,79042.8,-4.84479], Tmin=(1431.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(681.589,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + polycyclic(s3_4_5_ane) + polycyclic(s2_4_5_ane) + polycyclic(s2_4_4_ane) - ring(Cyclopentane) - ring(Cyclobutane) - ring(Cyclobutane) + radical(bicyclo[2.1.1]hexane-C5) + radical(bicyclo[2.1.1]hexane-C1)"""),
)
species(
label = '[CH]1C2C[C]3CC1C32(1505)',
structure = SMILES('[CH]1C2C[C]3CC1C32'),
E0 = (715.708,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2807.14,2864.29,2921.43,2978.57,3035.71,3092.86,3150,900,928.571,957.143,985.714,1014.29,1042.86,1071.43,1100,327.15,837.799,837.799,837.799,837.799,837.799,837.799,837.799,837.799,837.799,837.799,837.799,1676.16,1676.16,1676.16,1676.16,1676.16,1676.16,1676.16,1676.16,1676.16,1676.16,1676.16],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.19251,0.0270208,1.44979e-05,-1.89226e-08,4.52651e-12,86100.7,11.3768], Tmin=(100,'K'), Tmax=(1640.23,'K')), NASAPolynomial(coeffs=[7.05601,0.0410252,-1.97326e-05,3.69772e-09,-2.48382e-13,81682,-18.7803], Tmin=(1640.23,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(715.708,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(357.522,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + polycyclic(s2_4_4_ane) + polycyclic(s2_4_4_ane) + polycyclic(s2_4_4_ane) - ring(Cyclobutane) - ring(Cyclobutane) - ring(Cyclobutane) + radical(bicyclo[3.1.1]heptane-C6) + radical(bicyclo[2.2.0]hexane-tertiary)"""),
)
species(
label = 'CC12[CH]C=CC1[CH]2(1506)',
structure = SMILES('CC12[CH]C=CC1[CH]2'),
E0 = (462.758,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.19084,0.0177598,8.80872e-05,-1.21683e-07,4.56392e-11,55741.6,17.7333], Tmin=(100,'K'), Tmax=(987.503,'K')), NASAPolynomial(coeffs=[13.5924,0.0272311,-1.0838e-05,2.17399e-09,-1.65541e-13,50776.2,-50.869], Tmin=(987.503,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.758,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_3_5_ene_1) + radical(cyclopropane) + radical(cyclopentene-allyl)"""),
)
species(
label = 'CC1=C[CH]C2[CH]C12(1507)',
structure = SMILES('CC1=C[CH]C2[CH]C12'),
E0 = (459.543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.1885,0.0228951,6.32527e-05,-9.0472e-08,3.36469e-11,55350.4,18.255], Tmin=(100,'K'), Tmax=(1000.22,'K')), NASAPolynomial(coeffs=[10.8125,0.0308748,-1.24016e-05,2.40187e-09,-1.76258e-13,51500.9,-33.9719], Tmin=(1000.22,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(459.543,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + polycyclic(s2_3_5_ene_1) + radical(cyclopropane) + radical(cyclopentene-allyl)"""),
)
species(
label = 'CC1[CH]C=C=CC=1(1508)',
structure = SMILES('Cc1cc[c]cc1'),
E0 = (286.274,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.11576,0.0264434,4.59955e-05,-7.56148e-08,3.001e-11,34512.1,18.0285], Tmin=(100,'K'), Tmax=(969.56,'K')), NASAPolynomial(coeffs=[11.6004,0.0245549,-8.69846e-06,1.60902e-09,-1.17136e-13,30922.5,-36.4631], Tmin=(969.56,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(286.274,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(328.422,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CbHHH) + group(Cb-Cs) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene) + radical(CbJ)"""),
)
species(
label = 'CH3(15)(16)',
structure = SMILES('[CH3]'),
E0 = (136.188,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([604.263,1333.71,1492.19,2836.77,2836.77,3806.92],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.65718,0.0021266,5.45839e-06,-6.6181e-09,2.46571e-12,16422.7,1.67354], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.97812,0.00579785,-1.97558e-06,3.07298e-10,-1.79174e-14,16509.5,4.72248], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(136.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = 'C1=C[CH]C=CC=1(1509)',
structure = SMILES('[c]1ccccc1'),
E0 = (323.212,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (77.1039,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.84111,0.00698566,8.17877e-05,-1.13149e-07,4.4482e-11,38932.1,13.8754], Tmin=(100,'K'), Tmax=(938.833,'K')), NASAPolynomial(coeffs=[12.5686,0.012745,-2.83411e-06,4.97035e-10,-4.18281e-14,35025.3,-43.5195], Tmin=(938.833,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(323.212,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(257.749,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene) + radical(CbJ)"""),
)
species(
label = 'CC1[CH]C[C]=CC=1(1510)',
structure = SMILES('CC1[CH]C[C]=CC=1'),
E0 = (384.048,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.51948,0.0482638,-1.80993e-05,-1.66282e-09,1.68639e-12,46284.6,19.6103], Tmin=(100,'K'), Tmax=(1400.76,'K')), NASAPolynomial(coeffs=[11.2804,0.0324856,-1.41552e-05,2.62425e-09,-1.78895e-13,42363.5,-35.0055], Tmin=(1400.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(384.048,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Cds_S) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1[CH]CC=[C]C=1(1511)',
structure = SMILES('CC1[CH]CC=[C]C=1'),
E0 = (345.201,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.4413,0.050568,-2.35805e-05,3.44831e-09,2.10639e-13,41614.9,19.0167], Tmin=(100,'K'), Tmax=(1561.26,'K')), NASAPolynomial(coeffs=[12.2427,0.0311984,-1.29489e-05,2.31528e-09,-1.5344e-13,37230.1,-41.1466], Tmin=(1561.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(345.201,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CJC=C) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1=[C]C=CC[CH]1(1512)',
structure = SMILES('CC1=[C]C=CC[CH]1'),
E0 = (345.201,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.4413,0.050568,-2.35805e-05,3.44831e-09,2.10639e-13,41614.9,19.0167], Tmin=(100,'K'), Tmax=(1561.26,'K')), NASAPolynomial(coeffs=[12.2427,0.0311984,-1.29489e-05,2.31528e-09,-1.5344e-13,37230.1,-41.1466], Tmin=(1561.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(345.201,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CJC=C) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'C[C]1C2[CH]C=CC12(1513)',
structure = SMILES('C[C]1C2[CH]C=CC12'),
E0 = (425.286,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.27195,0.022552,5.94524e-05,-8.3682e-08,3.06598e-11,51225.8,17.2581], Tmin=(100,'K'), Tmax=(1008.35,'K')), NASAPolynomial(coeffs=[9.53544,0.032565,-1.32003e-05,2.5384e-09,-1.84423e-13,47787.1,-27.632], Tmin=(1008.35,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(425.286,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_3_5_ene_1) + radical(Tertalkyl) + radical(cyclopentene-allyl)"""),
)
species(
label = 'CC1[CH]C2[CH]C2C=1(1514)',
structure = SMILES('CC1[CH]C2[CH]C2C=1'),
E0 = (459.543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.1885,0.0228951,6.32527e-05,-9.0472e-08,3.36469e-11,55350.4,17.5618], Tmin=(100,'K'), Tmax=(1000.22,'K')), NASAPolynomial(coeffs=[10.8125,0.0308748,-1.24016e-05,2.40187e-09,-1.76258e-13,51500.9,-34.665], Tmin=(1000.22,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(459.543,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + polycyclic(s2_3_5_ene_1) + radical(cyclopropane) + radical(cyclopentene-allyl)"""),
)
species(
label = '[CH]1[CH]C=CC=C1(1038)',
structure = SMILES('[CH]1[CH]C=CC=C1'),
E0 = (282.149,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2830,2910,2990,3070,3150,900,940,980,1020,1060,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (78.1118,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.65505,0.030509,-1.75379e-06,-8.37309e-09,2.50596e-12,33982.1,11.3122], Tmin=(100,'K'), Tmax=(1572.61,'K')), NASAPolynomial(coeffs=[9.21399,0.0284297,-1.37001e-05,2.5963e-09,-1.76603e-13,30113.3,-29.0418], Tmin=(1572.61,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(282.149,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(282.692,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Aromatic_pi_S_1_3) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1[C]=CC=C[CH]1(1039)',
structure = SMILES('CC1[C]=CC=C[CH]1'),
E0 = (391.35,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.67552,0.0411614,5.65849e-06,-2.75448e-08,1.07406e-11,47160.6,19.5808], Tmin=(100,'K'), Tmax=(1108.77,'K')), NASAPolynomial(coeffs=[9.95366,0.0336692,-1.44718e-05,2.75697e-09,-1.94909e-13,43949.7,-27.4126], Tmin=(1108.77,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(391.35,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Cds_S) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1=C=C[CH]C=C1(1515)',
structure = SMILES('Cc1[c]cccc1'),
E0 = (286.274,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.11576,0.0264434,4.59955e-05,-7.56148e-08,3.001e-11,34512.1,18.7216], Tmin=(100,'K'), Tmax=(969.56,'K')), NASAPolynomial(coeffs=[11.6004,0.0245549,-8.69846e-06,1.60902e-09,-1.17136e-13,30922.5,-35.77], Tmin=(969.56,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(286.274,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(328.422,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CbHHH) + group(Cb-Cs) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene) + radical(CbJ)"""),
)
species(
label = 'CC1C=C=C[CH]C=1(1516)',
structure = SMILES('Cc1c[c]ccc1'),
E0 = (286.274,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2883.33,3016.67,3150,900,966.667,1033.33,1100,300,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (91.1305,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.11576,0.0264434,4.59955e-05,-7.56148e-08,3.001e-11,34512.1,18.7216], Tmin=(100,'K'), Tmax=(969.56,'K')), NASAPolynomial(coeffs=[11.6004,0.0245549,-8.69846e-06,1.60902e-09,-1.17136e-13,30922.5,-35.77], Tmin=(969.56,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(286.274,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(328.422,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CbHHH) + group(Cb-Cs) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + group(Cb-H) + ring(Benzene) + radical(CbJ)"""),
)
species(
label = 'CC1=[C]C[CH]C=C1(1517)',
structure = SMILES('CC1=[C]C[CH]C=C1'),
E0 = (385.512,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.51948,0.0482638,-1.80994e-05,-1.66277e-09,1.68638e-12,46460.7,19.4543], Tmin=(100,'K'), Tmax=(1400.76,'K')), NASAPolynomial(coeffs=[11.2804,0.0324856,-1.41552e-05,2.62425e-09,-1.78894e-13,42539.6,-35.1616], Tmin=(1400.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(385.512,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Cds_S) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1C=[C]C[CH]C=1(1518)',
structure = SMILES('CC1C=[C]C[CH]C=1'),
E0 = (385.512,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.51948,0.0482638,-1.80994e-05,-1.66277e-09,1.68638e-12,46460.7,19.4543], Tmin=(100,'K'), Tmax=(1400.76,'K')), NASAPolynomial(coeffs=[11.2804,0.0324856,-1.41552e-05,2.62425e-09,-1.78894e-13,42539.6,-35.1616], Tmin=(1400.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(385.512,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Cds_S) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1=[C][CH]CC=C1(1519)',
structure = SMILES('CC1[C]=CC[CH]C=1'),
E0 = (346.666,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.44128,0.0505681,-2.35808e-05,3.44859e-09,2.10555e-13,41791,18.8608], Tmin=(100,'K'), Tmax=(1561.28,'K')), NASAPolynomial(coeffs=[12.243,0.0311979,-1.29487e-05,2.31523e-09,-1.53436e-13,37406.1,-41.3047], Tmin=(1561.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(346.666,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(Aromatic_pi_S_1_3) + radical(C=CJC=C)"""),
)
species(
label = 'CC12[CH][CH]C1C=C2(1520)',
structure = SMILES('CC12[CH][CH]C1C=C2'),
E0 = (452.129,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.02841,0.0314245,3.08196e-05,-5.08192e-08,1.80182e-11,54459.5,20.9944], Tmin=(100,'K'), Tmax=(1086.07,'K')), NASAPolynomial(coeffs=[9.14252,0.0351367,-1.56218e-05,3.04837e-09,-2.19134e-13,51150,-22.0365], Tmin=(1086.07,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(452.129,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + polycyclic(s2_4_4_ene_1) + radical(cyclobutane) + radical(cyclobutane)"""),
)
species(
label = 'CC1=C=CCC=C1(1521)',
structure = SMILES('CC1=C=CCC=C1'),
E0 = (350.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.61591,0.0364003,3.39287e-05,-6.86778e-08,2.83529e-11,42305.2,18.5516], Tmin=(100,'K'), Tmax=(977.742,'K')), NASAPolynomial(coeffs=[13.881,0.0253879,-9.26138e-06,1.73925e-09,-1.27361e-13,38034.7,-49.9175], Tmin=(977.742,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(350.912,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsCsH) + group(Cdd-CdsCds) + ring(124cyclohexatriene)"""),
)
species(
label = 'CC1C=C=CCC=1(1522)',
structure = SMILES('CC1C=C=CCC=1'),
E0 = (350.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.61589,0.0364005,3.39277e-05,-6.86766e-08,2.83523e-11,42305.2,18.5517], Tmin=(100,'K'), Tmax=(977.746,'K')), NASAPolynomial(coeffs=[13.8811,0.0253878,-9.2613e-06,1.73923e-09,-1.27359e-13,38034.7,-49.918], Tmin=(977.746,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(350.912,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cdd-CdsCds) + ring(124cyclohexatriene)"""),
)
species(
label = 'CC1=CC2C=CC12(1047)',
structure = SMILES('CC1=CC2C=CC12'),
E0 = (350.525,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.42189,0.0517704,-2.85408e-05,7.32027e-09,-7.38392e-13,42255.6,14.1261], Tmin=(100,'K'), Tmax=(2259.39,'K')), NASAPolynomial(coeffs=[18.2081,0.0220524,-8.81129e-06,1.49882e-09,-9.42557e-14,34670.2,-80.5402], Tmin=(2259.39,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(350.525,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + Estimated bicyclic component: polycyclic(s2_4_4_ane) - ring(Cyclobutane) - ring(Cyclobutane) + ring(Cyclobutene) + ring(Cyclobutene)"""),
)
species(
label = 'CC1[CH]C=C[C]=C1(1040)',
structure = SMILES('CC1[CH]C=C[C]=C1'),
E0 = (352.504,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.60944,0.0433834,2.33583e-07,-2.22646e-08,9.12346e-12,42490.2,18.939], Tmin=(100,'K'), Tmax=(1103.49,'K')), NASAPolynomial(coeffs=[9.52416,0.0343753,-1.42754e-05,2.66406e-09,-1.86046e-13,39545.1,-25.454], Tmin=(1103.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(352.504,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CJC=C) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1[CH]C=[C]C=C1(1041)',
structure = SMILES('CC1[CH]C=[C]C=C1'),
E0 = (352.504,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.60944,0.0433834,2.33583e-07,-2.22646e-08,9.12346e-12,42490.2,18.2458], Tmin=(100,'K'), Tmax=(1103.49,'K')), NASAPolynomial(coeffs=[9.52416,0.0343753,-1.42754e-05,2.66406e-09,-1.86046e-13,39545.1,-26.1472], Tmin=(1103.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(352.504,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + ring(1,3-Cyclohexadiene) + radical(C=CJC=C) + radical(Aromatic_pi_S_1_3)"""),
)
species(
label = 'CC1C=C=CC=C1(1032)',
structure = SMILES('CC1C=C=CC=C1'),
E0 = (354.933,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,2950,3050,3150,900,950,1000,1050,1100,2750,2800,2850,1350,1500,750,1050,1375,1000,300,800,800,800,800,800,800,800,800,800,1600,1600,1600,1600,1600,1600,1600,1600,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.51932,0.04084,1.83655e-05,-5.052e-08,2.14499e-11,42790.1,17.1911], Tmin=(100,'K'), Tmax=(991.681,'K')), NASAPolynomial(coeffs=[13.0177,0.0271087,-1.02481e-05,1.91402e-09,-1.37771e-13,38904.1,-46.2801], Tmin=(991.681,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(354.933,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cdd-CdsCds) + ring(124cyclohexatriene)"""),
)
species(
label = 'CC12C=CC1C=C2(1523)',
structure = SMILES('CC12C=CC1C=C2'),
E0 = (356.288,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.61039,0.0395734,1.71689e-05,-4.39499e-08,1.73348e-11,42948.7,13.8676], Tmin=(100,'K'), Tmax=(1053.66,'K')), NASAPolynomial(coeffs=[11.9865,0.0305253,-1.31459e-05,2.56114e-09,-1.85368e-13,39077.7,-44.7263], Tmin=(1053.66,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(356.288,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsCs) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + Estimated bicyclic component: polycyclic(s2_4_4_ane) - ring(Cyclobutane) - ring(Cyclobutane) + ring(Cyclobutene) + ring(Cyclobutene)"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ar(8)',
structure = SMILES('[Ar]'),
E0 = (-6.19426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,-745,4.3663], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,-745,4.3663], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-6.19426,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: BurkeH2O2"""),
)
transitionState(
label = 'TS1',
E0 = (415.204,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (560.898,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (596.729,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (582.851,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (342.509,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (349.727,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (349.727,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (357.355,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (363.952,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (327.662,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (289.235,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (468.256,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (444.394,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (272.169,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (419.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (347.022,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (563.182,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (466.837,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (538.279,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (536.546,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (410.677,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (448.806,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (495.074,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (486.701,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (476.537,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (466.841,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (441.843,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (696.024,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (434.769,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (532.679,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (599.953,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS32',
E0 = (804.182,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS33',
E0 = (601.004,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS34',
E0 = (601.79,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS35',
E0 = (594.259,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS36',
E0 = (452.859,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS37',
E0 = (540.45,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS38',
E0 = (402.569,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS39',
E0 = (552.396,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS40',
E0 = (599.28,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS41',
E0 = (643.763,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS42',
E0 = (560.473,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS43',
E0 = (459.066,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS44',
E0 = (553.869,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS45',
E0 = (606.814,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS46',
E0 = (662.324,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS47',
E0 = (641.522,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS48',
E0 = (653.776,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS49',
E0 = (630.016,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS50',
E0 = (469.288,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS51',
E0 = (514.748,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS52',
E0 = (622.682,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS53',
E0 = (469.756,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS54',
E0 = (643.871,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS55',
E0 = (443.517,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS56',
E0 = (635.323,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS57',
E0 = (534.655,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS58',
E0 = (468.256,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS59',
E0 = (450.557,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS60',
E0 = (404.681,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS61',
E0 = (618.781,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS62',
E0 = (584.131,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS63',
E0 = (549.279,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS64',
E0 = (570.253,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS65',
E0 = (628.035,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS66',
E0 = (381.452,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS67',
E0 = (292.114,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS68',
E0 = (522.406,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS69',
E0 = (472.743,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS70',
E0 = (503.953,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS71',
E0 = (588.513,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS72',
E0 = (529.882,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS73',
E0 = (549.667,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS74',
E0 = (508.291,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS75',
E0 = (541.936,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS76',
E0 = (407.275,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS77',
E0 = (450.42,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS78',
E0 = (405.869,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS79',
E0 = (450.748,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS80',
E0 = (405.594,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS81',
E0 = (432.539,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS82',
E0 = (499.923,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS83',
E0 = (515.319,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS84',
E0 = (370.286,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS85',
E0 = (467.353,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS86',
E0 = (391.342,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS87',
E0 = (324.143,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS88',
E0 = (370.174,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS89',
E0 = (268.067,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS90',
E0 = (513.208,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS91',
E0 = (641.35,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS92',
E0 = (633.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS93',
E0 = (515.568,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS94',
E0 = (435.589,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS95',
E0 = (492.414,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS96',
E0 = (509.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS97',
E0 = (425.9,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS98',
E0 = (509.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS99',
E0 = (562.364,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS100',
E0 = (444.394,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS101',
E0 = (291.56,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS102',
E0 = (569.973,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS103',
E0 = (542.763,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS104',
E0 = (542.245,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS105',
E0 = (557.14,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS106',
E0 = (662.75,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS107',
E0 = (662.75,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS108',
E0 = (657.36,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS109',
E0 = (614.687,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS110',
E0 = (435.7,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS111',
E0 = (561.245,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS112',
E0 = (491.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS113',
E0 = (627.067,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS114',
E0 = (509.13,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS115',
E0 = (476.799,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS116',
E0 = (654.941,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS117',
E0 = (622.873,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS118',
E0 = (448.665,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS119',
E0 = (402.107,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS120',
E0 = (574.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS121',
E0 = (573.129,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS122',
E0 = (687.732,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS123',
E0 = (474.57,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS124',
E0 = (574.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS125',
E0 = (432.964,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS126',
E0 = (678.187,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS127',
E0 = (635.626,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS128',
E0 = (603.23,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS129',
E0 = (568.344,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS130',
E0 = (501.064,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS131',
E0 = (621.602,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS132',
E0 = (720.794,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS133',
E0 = (734.383,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS134',
E0 = (732.178,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS135',
E0 = (584.284,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS136',
E0 = (591.443,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS137',
E0 = (831.901,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS138',
E0 = (715.708,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS139',
E0 = (462.758,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS140',
E0 = (459.543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS141',
E0 = (505.84,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS142',
E0 = (493.292,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS143',
E0 = (540.948,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS144',
E0 = (492.478,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS145',
E0 = (499.781,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS146',
E0 = (492.478,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS147',
E0 = (381.832,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS148',
E0 = (389.51,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS149',
E0 = (448.806,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS150',
E0 = (488.415,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS151',
E0 = (459.543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS152',
E0 = (449.597,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS153',
E0 = (349.448,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS154',
E0 = (701.07,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS155',
E0 = (538.202,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS156',
E0 = (243.094,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS157',
E0 = (505.84,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS158',
E0 = (505.84,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS159',
E0 = (548.291,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS160',
E0 = (548.291,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS161',
E0 = (459.215,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS162',
E0 = (358.582,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS163',
E0 = (452.129,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS164',
E0 = (350.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS165',
E0 = (350.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS166',
E0 = (546.393,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS167',
E0 = (349.448,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS168',
E0 = (350.525,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS169',
E0 = (412.455,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS170',
E0 = (500.079,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS171',
E0 = (535.085,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS172',
E0 = (596.207,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS173',
E0 = (354.933,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS174',
E0 = (260.876,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS175',
E0 = (356.288,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS176',
E0 = (459.4,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS177',
E0 = (404.681,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS178',
E0 = (498.066,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS179',
E0 = (498.066,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS180',
E0 = (498.066,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS181',
E0 = (386.444,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS182',
E0 = (419.203,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS183',
E0 = (413.365,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS184',
E0 = (412.354,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS185',
E0 = (413.365,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS186',
E0 = (411.9,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS187',
E0 = (438.295,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS188',
E0 = (445.598,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS189',
E0 = (439.76,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS190',
E0 = (452.194,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS191',
E0 = (438.295,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS192',
E0 = (289.235,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS193',
E0 = (384.426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS194',
E0 = (405.869,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS195',
E0 = (391.729,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS196',
E0 = (324.143,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS197',
E0 = (487.441,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction48',
reactants = ['H(3)(3)', 'C=C1[CH]C=CC=C1(1310)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(9.22566,'m^3/(mol*s)'), n=2.04274, Ea=(10.5229,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 101 used for Cds-CdH_Cds-CdH;HJ
Exact match found for rate rule [Cds-CdH_Cds-CdH;HJ]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction49',
reactants = ['H(3)(3)', 'C=C1C=C[C]=CC1(1313)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(5.46e+08,'cm^3/(mol*s)'), n=1.64, Ea=(15.8155,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2714 used for Ca_Cds-CsH;HJ
Exact match found for rate rule [Ca_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction81',
reactants = ['H(3)(3)', 'C=C1[C]=CC=CC1(1312)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(1.149e+09,'cm^3/(mol*s)'), n=1.595, Ea=(16.7946,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Ca_Cds-OneDeH;HJ] for rate rule [Ca_Cds-CdH;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction71',
reactants = ['H(3)(3)', 'C=C1C=[C]C=CC1(1314)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(1.149e+09,'cm^3/(mol*s)'), n=1.595, Ea=(16.7946,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Ca_Cds-OneDeH;HJ] for rate rule [Ca_Cds-CdH;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction64',
reactants = ['C7H8(693)(692)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(4.00798e+09,'s^-1'), n=0.37, Ea=(78.2471,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad;XH_Rrad_De] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction65',
reactants = ['C7H8(693)(692)'],
products = ['CC1=CC=C=CC1(1329)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(2.1261e+09,'s^-1'), n=0.137, Ea=(85.4656,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction87',
reactants = ['C7H8(693)(692)'],
products = ['CC1=C=CC=CC1(1470)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(2.00399e+09,'s^-1'), n=0.37, Ea=(85.4656,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad;XH_Rrad_De] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction8',
reactants = ['C7H8(693)(692)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(1.08e+10,'s^-1'), n=-0.305, Ea=(93.094,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction77',
reactants = ['C7H8(693)(692)'],
products = ['C=C1CC=C=CC1(1466)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(3.47101e+09,'s^-1'), n=0.37, Ea=(99.6901,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad_NDe] + [R3radExo;Y_rad;XH_Rrad_NDe] for rate rule [R3radExo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction78',
reactants = ['C7H8(693)(692)'],
products = ['C=C1C=C=CCC1(1089)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3radExo;Y_rad_NDe;XH_Rrad] for rate rule [R3radExo;Y_rad_NDe;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction102',
reactants = ['C7H8(693)(692)'],
products = ['C=C1C=CCC=C1(1006)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction68',
reactants = ['C7H8(693)(692)'],
products = ['C1=CC2CC(=C1)C2(1308)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.8e+12,'s^-1'), n=-0.1525, Ea=(203.995,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;C_rad_out_H/OneDe;Cpri_rad_out_2H] + [R4_SSS;C_rad_out_single;Cpri_rad_out_2H] for rate rule [R4_SSS;C_rad_out_H/OneDe;Cpri_rad_out_2H]
Euclidian distance = 2.0
family: Birad_recombination
Ea raised from 201.5 to 204.0 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction88',
reactants = ['C7H8(693)(692)'],
products = ['C1=CC2C=C(C1)C2(1339)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(180.132,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SDS;C_rad_out_2H;Cpri_rad_out_H/OneDe]
Euclidian distance = 2.2360679775
family: Birad_recombination
Ea raised from 177.5 to 180.1 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction80',
reactants = ['C7H8(693)(692)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.8e+12,'s^-1'), n=-0.1525, Ea=(7.90776,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;C_rad_out_H/OneDe;Cpri_rad_out_single] + [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/OneDe;Cpri_rad_out_H/OneDe]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
reaction(
label = 'reaction47',
reactants = ['C7H8(693)(692)'],
products = ['[CH]1C[C]2C=CC1C2(1340)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(2.82154e+11,'s^-1'), n=0.15, Ea=(155.399,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn;doublebond_intra_HCd_pri;radadd_intra_cs2H] for rate rule [R6;doublebond_intra_HCd_pri;radadd_intra_cs2H]
Euclidian distance = 1.0
family: Intra_R_Add_Exocyclic
Ea raised from 150.9 to 155.4 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction16',
reactants = ['C=C1[CH]C=CC[CH]1(1095)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(4.02296e+08,'s^-1'), n=1.43567, Ea=(82.7609,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;C_rad_out_H/OneDe;Cs_H_out_H/(Cd-Cd-Cd)] for rate rule [R2H_S;C_rad_out_H/(Cd-Cd-Cd);Cs_H_out_H/(Cd-Cd-Cd)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction51',
reactants = ['C=C1[CH]C=[C]CC1(1096)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(2.66329e+10,'s^-1'), n=0.993, Ea=(157.679,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction52',
reactants = ['C7H8(693)(692)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.09894e+08,'s^-1'), n=1.58167, Ea=(202.575,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_2Cd;C_rad_out_2H;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction53',
reactants = ['C[C]1[C]=CC=CC1(1328)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(3.85113e+09,'s^-1'), n=1.0541, Ea=(193.078,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1600,'K'), comment="""From training reaction 288 used for R3H_DS;Cd_rad_out_singleDe_Cd;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleDe_Cd;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction54',
reactants = ['C=C1[CH][C]=CCC1(1097)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(6.1583e+09,'s^-1'), n=0.92705, Ea=(170.178,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3H_DS;Cd_rad_out_single;Cs_H_out_H/NonDeC] + [R3H_DS;Cd_rad_out_singleDe_Cd;Cs_H_out] for rate rule [R3H_DS;Cd_rad_out_singleDe_Cd;Cs_H_out_H/NonDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction55',
reactants = ['C=C1[C]C=CCC1(1102)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_H/Cd] for rate rule [R4H_DSS;Cd_rad_out_singleDe_Cd;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction56',
reactants = ['C[C]1C=[C]C=CC1(1323)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.33e+08,'s^-1'), n=1.1915, Ea=(103.605,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1600,'K'), comment="""From training reaction 284 used for R4H_SDS;Cd_rad_out_Cd;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction57',
reactants = ['C[C]1C=C[C]=CC1(1326)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(5.59786e+07,'s^-1'), n=1.58088, Ea=(142.57,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_Cd;Cs_H_out_2H] for rate rule [R5HJ_1;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction58',
reactants = ['C7H8(693)(692)'],
products = ['[CH]1C=CC2C[C]2C1(1071)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(5.72653e+12,'s^-1'), n=0.0526095, Ea=(222.439,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;doublebond_intra_secNd;radadd_intra_cs] + [R3_cyclic;doublebond_intra;radadd_intra_cs] for rate rule [Rn1c6_alpha_short;doublebond_intra_secNd_HCd;radadd_intra_cs2H]
Euclidian distance = 3.74165738677
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction59',
reactants = ['C7H8(693)(692)'],
products = ['[CH2]C1=CC2[CH]C2C1(1458)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(2.22857e+12,'s^-1'), n=0, Ea=(212.276,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0cx_beta;doublebond_intra_pri_HCd;radadd_intra_cs] for rate rule [Rn0c6_beta_short;doublebond_intra_pri_HCd;radadd_intra_csHCs]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic
Ea raised from 211.2 to 212.3 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction60',
reactants = ['C7H8(693)(692)'],
products = ['[CH2][C]1CC2C=CC12(1459)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(5.4227e+18,'s^-1'), n=-0.859165, Ea=(202.58,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0cx_gamma;doublebond_intra;radadd_intra_cs] for rate rule [Rn0c6_gamma;doublebond_intra;radadd_intra_csHCd]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic
Ea raised from 202.6 to 202.6 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction61',
reactants = ['C7H8(693)(692)'],
products = ['[CH2]C12[CH]C=CC1C2(1460)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.52918e+11,'s^-1'), n=0.426981, Ea=(177.581,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Rn0c6_beta_long_SS_D_HH;doublebond_intra_pri;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic
Ea raised from 175.9 to 177.6 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction62',
reactants = ['C7H8(693)(692)'],
products = ['[CH]1[CH]C2C=C(C1)C2(1461)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(3.125e+09,'s^-1'), n=0.76, Ea=(431.762,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6_cyclic;doublebond_intra_pri_HCd;radadd_intra_cs] for rate rule [Rn1c6_beta_long;doublebond_intra_pri_HCd;radadd_intra_cs2H]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Endocyclic
Ea raised from 428.8 to 431.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction63',
reactants = ['[CH2]C=CC=C[C]=C(1333)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(4.57e+10,'s^-1'), n=0.43, Ea=(8.05002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6_DSM_D;doublebond_intra_pri_2H;radadd_intra]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction66',
reactants = ['[CH2]C1=CC=CC1[CH2](1462)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction67',
reactants = ['[C]1=C[CH]C=CCC1(911)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction69',
reactants = ['CH2(17)(18)', '[C]1=CC=C[CH]C1(1463)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS32',
kinetics = Arrhenius(A=(2.23625e+06,'m^3/(mol*s)'), n=0.36814, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/NonDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -1.7 to 0 kJ/mol."""),
)
reaction(
label = 'reaction82',
reactants = ['[CH2]C1=[C]CC=CC1(1468)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS33',
kinetics = Arrhenius(A=(1.448e+10,'s^-1'), n=0.82, Ea=(156.9,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 154 used for R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction83',
reactants = ['C=C1[CH]C[C]=CC1(1316)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS34',
kinetics = Arrhenius(A=(1.448e+10,'s^-1'), n=0.82, Ea=(156.9,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 154 used for R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction84',
reactants = ['C=C1[CH]CC=[C]C1(1319)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS35',
kinetics = Arrhenius(A=(1.182e+10,'s^-1'), n=0.86, Ea=(149.369,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3H_DS;Cd_rad_out_Cs;Cs_H_out_1H] for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction85',
reactants = ['C[C]1C=CC=[C]C1(1321)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS36',
kinetics = Arrhenius(A=(60051,'s^-1'), n=2.135, Ea=(63.3667,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4H_SS(Cd)S;Y_rad_out;Cs_H_out_2H] + [R4H_RSS;Cd_rad_out;Cs_H_out] for rate rule [R4H_SS(Cd)S;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 3.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction86',
reactants = ['C7H8(693)(692)'],
products = ['[CH2]C12[CH]C1C=CC2(1469)'],
transitionState = 'TS37',
kinetics = Arrhenius(A=(9.4423e+12,'s^-1'), n=-0.141781, Ea=(276.189,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn0cx_beta;doublebond_intra_pri;radadd_intra_csHCd] + [Rn0c6_beta_short;doublebond_intra_pri;radadd_intra_csHDe] for rate rule [Rn0c6_beta_short;doublebond_intra_pri;radadd_intra_csHCd]
Euclidian distance = 1.0
family: Intra_R_Add_Endocyclic
Ea raised from 275.0 to 276.2 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction76',
reactants = ['C7H8(693)(692)'],
products = ['[CH]1C=CC2C[C]1C2(1465)'],
transitionState = 'TS38',
kinetics = Arrhenius(A=(3.78932e+07,'s^-1'), n=1.19089, Ea=(138.307,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_Cs_HH_D;doublebond_intra;radadd_intra_cs] for rate rule [R4_Cs_HH_D;doublebond_intra;radadd_intra_csHCd]
Euclidian distance = 2.0
family: Intra_R_Add_Endocyclic
Ea raised from 133.9 to 138.3 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction70',
reactants = ['C7H8(693)(692)'],
products = ['C=C1C[CH]C2[CH]C12(1464)'],
transitionState = 'TS39',
kinetics = Arrhenius(A=(2.28717e+07,'s^-1'), n=0.927697, Ea=(288.135,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6;doublebond_intra;radadd_intra_cs]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 285.7 to 288.1 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction40',
reactants = ['C=C1C[C]=C[CH]C1(1324)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS40',
kinetics = Arrhenius(A=(1.448e+10,'s^-1'), n=0.82, Ea=(156.9,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 154 used for R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction73',
reactants = ['[CH]=C1C[CH]C=CC1(1318)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS41',
kinetics = Arrhenius(A=(26875.4,'s^-1'), n=2.58467, Ea=(192.129,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_DS;Cd_rad_out_singleH;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction74',
reactants = ['C=C1C[CH][C]=CC1(1327)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS42',
kinetics = Arrhenius(A=(4.34148e+10,'s^-1'), n=0.96, Ea=(118.093,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_Cd;Cs_H_out_H/Cd] for rate rule [R3HJ;Cd_rad_out_Cd;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 4.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction75',
reactants = ['[CH]=C1[CH]C=CCC1(1098)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS43',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction79',
reactants = ['C=[C]C1C=C[CH]C1(1467)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS44',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction90',
reactants = ['[CH2]C1C=C[CH]C=C1(1011)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS45',
kinetics = Arrhenius(A=(7.0617e+11,'s^-1'), n=0.637333, Ea=(248.223,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;C_rad_out_H/(Cd-Cd-Cd);XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction91',
reactants = ['[CH2]C1[C]=CC=CC1(1082)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS46',
kinetics = Arrhenius(A=(4.96519e+09,'s^-1'), n=1.05826, Ea=(162.779,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction92',
reactants = ['[CH2]C1C=CC=[C]C1(1083)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS47',
kinetics = Arrhenius(A=(2.4115e+09,'s^-1'), n=1.00333, Ea=(141.977,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction93',
reactants = ['[CH2]C1C=[C]C=CC1(1085)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS48',
kinetics = Arrhenius(A=(1.28371e+09,'s^-1'), n=1.0541, Ea=(193.078,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1600,'K'), comment="""Estimated using an average for rate rule [R3H_DS;Cd_rad_out_singleDe_Cd;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction94',
reactants = ['[CH2]C1C=C[C]=CC1(1084)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS49',
kinetics = Arrhenius(A=(5.66043e+07,'s^-1'), n=1.66125, Ea=(169.318,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4H_RSR;Cd_rad_out_singleDe_Cd;XH_out] + [R4H_DSS;Cd_rad_out_single;XH_out] for rate rule [R4H_DSS;Cd_rad_out_singleDe_Cd;XH_out]
Euclidian distance = 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction95',
reactants = ['C7H8(693)(692)'],
products = ['[CH2]C12C=CC1[CH]C2(1471)'],
transitionState = 'TS50',
kinetics = Arrhenius(A=(5.4227e+18,'s^-1'), n=-0.859165, Ea=(205.027,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0cx_gamma;doublebond_intra_pri_HCd;radadd_intra_cs] for rate rule [Rn0c6_gamma;doublebond_intra_pri_HCd;radadd_intra_cs]
Euclidian distance = 1.0
family: Intra_R_Add_Endocyclic
Ea raised from 204.8 to 205.0 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction96',
reactants = ['[CH]=CC=CC([CH2])=C(1332)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS51',
kinetics = Arrhenius(A=(9.14e+10,'s^-1'), n=0.43, Ea=(8.05002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6_DSM_D;doublebond_intra;radadd_intra_cdsingleH]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction97',
reactants = ['[CH2]C1([CH2])C=CC=C1(1472)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS52',
kinetics = Arrhenius(A=(5.32e+08,'s^-1'), n=1.36, Ea=(157.318,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [cCsCJ;CsJ-HH;C]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction89',
reactants = ['[CH]=C1[CH]CC=CC1(1322)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS53',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4Hall;Cd_rad_out_singleH;Cs_H_out_H/Cd] for rate rule [R4HJ_2;Cd_rad_out_singleH;Cs_H_out_H/Cd]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction98',
reactants = ['C=C1C=[C]C[CH]C1(1317)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS54',
kinetics = Arrhenius(A=(9.93038e+09,'s^-1'), n=1.05826, Ea=(162.779,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction99',
reactants = ['[CH2][C]1C=CCC=C1(1010)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS55',
kinetics = Arrhenius(A=(7.6e+10,'s^-1'), n=0.87, Ea=(144.348,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3Hall;C_rad_out_H/Cd;Cs_H_out_H/Cd] for rate rule [R3HJ;C_rad_out_H/Cd;Cs_H_out_H/(Cd-Cd-Cd)]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 4.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction100',
reactants = ['C=C1[C]=CC[CH]C1(1320)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS56',
kinetics = Arrhenius(A=(2.56742e+09,'s^-1'), n=1.0541, Ea=(193.078,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1600,'K'), comment="""Estimated using an average for rate rule [R3H_DS;Cd_rad_out_singleDe_Cd;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction101',
reactants = ['[CH]=C1C=CC[CH]C1(1325)'],
products = ['C7H8(693)(692)'],
transitionState = 'TS57',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_singleH;Cs_H_out_H/Cd] for rate rule [R5HJ_3;Cd_rad_out_singleH;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction1',
reactants = ['C7H8(690)(689)'],
products = ['C1=CC2CC(=C1)C2(1308)'],
transitionState = 'TS58',
kinetics = Arrhenius(A=(5.67327e+12,'s^-1'), n=-0.101958, Ea=(299.109,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1_3_5_unsaturated_hexane] for rate rule [linear_1_3_5_hexatriene]
Euclidian distance = 1.0
family: Intra_Diels_alder_monocyclic
Ea raised from 297.0 to 299.1 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction2',
reactants = ['C=C=CC=CC=C(1309)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS59',
kinetics = Arrhenius(A=(5.67327e+12,'s^-1'), n=-0.101958, Ea=(164.285,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1_3_5_unsaturated_hexane] for rate rule [linear_1_3_5_hexatriene]
Euclidian distance = 1.0
family: Intra_Diels_alder_monocyclic"""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)(3)', 'C=C1[CH]C=CC=C1(1310)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS60',
kinetics = Arrhenius(A=(8.28e-13,'cm^3/(molecule*s)'), n=0.611, Ea=(0,'kcal/mol'), T0=(1,'K'), comment="""Matched reaction 53 C7H7-2 + H <=> C7H8-3 in R_Recombination/training
This reaction matched rate rule [C_rad/H/CdCd;H_rad]
family: R_Recombination
Ea raised from -1.8 to 0 kJ/mol."""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)(3)', 'C=C1C=CC=[C]C1(1311)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS61',
kinetics = Arrhenius(A=(1e+13,'cm^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 40 used for Cd_rad/NonDe;H_rad
Exact match found for rate rule [Cd_rad/NonDe;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction5',
reactants = ['H(3)(3)', 'C=C1[C]=CC=CC1(1312)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS62',
kinetics = Arrhenius(A=(6.117e+14,'cm^3/(mol*s)'), n=-0.152, Ea=(4.19655,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 49 used for Cd_rad/Cd;H_rad
Exact match found for rate rule [Cd_rad/Cd;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction6',
reactants = ['H(3)(3)', 'C=C1C=C[C]=CC1(1313)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS63',
kinetics = Arrhenius(A=(6.117e+14,'cm^3/(mol*s)'), n=-0.152, Ea=(4.19655,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 49 used for Cd_rad/Cd;H_rad
Exact match found for rate rule [Cd_rad/Cd;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction7',
reactants = ['H(3)(3)', 'C=C1C=[C]C=CC1(1314)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS64',
kinetics = Arrhenius(A=(6.117e+14,'cm^3/(mol*s)'), n=-0.152, Ea=(4.19655,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 49 used for Cd_rad/Cd;H_rad
Exact match found for rate rule [Cd_rad/Cd;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction8',
reactants = ['H(3)(3)', '[CH]=C1C=CC=CC1(1315)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS65',
kinetics = Arrhenius(A=(1.21e+14,'cm^3/(mol*s)','+|-',4.82e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), comment="""From training reaction 60 used for H_rad;Cd_pri_rad
Exact match found for rate rule [Cd_pri_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2]C1C=C[CH]C=C1(1011)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS66',
kinetics = Arrhenius(A=(3.898e+11,'s^-1'), n=0.486, Ea=(22.8614,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction67',
reactants = ['C=C1[CH]C=CC[CH]1(1095)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS67',
kinetics = Arrhenius(A=(2.94659e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_De;XH_Rrad_De] + [R2radExo;Y_rad_De;XH_Rrad] for rate rule [R2radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]C1[C]=CC=CC1(1082)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS68',
kinetics = Arrhenius(A=(1.949e+11,'s^-1'), n=0.486, Ea=(22.8614,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction12',
reactants = ['C=C1[CH]C[C]=CC1(1316)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS69',
kinetics = Arrhenius(A=(2.94659e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_De;XH_Rrad_De] + [R2radExo;Y_rad_De;XH_Rrad] for rate rule [R2radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction13',
reactants = ['C=C1C=[C]C[CH]C1(1317)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS70',
kinetics = Arrhenius(A=(3.898e+11,'s^-1'), n=0.486, Ea=(22.8614,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 0 used for R2radExo;Y_rad_De;XH_Rrad_NDe
Exact match found for rate rule [R2radExo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]C1C=CC=[C]C1(1083)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS71',
kinetics = Arrhenius(A=(2.6374e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH]=C1C[CH]C=CC1(1318)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS72',
kinetics = Arrhenius(A=(8.01596e+09,'s^-1'), n=0.37, Ea=(78.2471,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad;XH_Rrad_De] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 4.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]C1C=[C]C=CC1(1085)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS73',
kinetics = Arrhenius(A=(2.6374e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction18',
reactants = ['C=C1[CH]CC=[C]C1(1319)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS74',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3radExo;Y_rad_NDe;XH_Rrad] for rate rule [R3radExo;Y_rad_NDe;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction19',
reactants = ['C=C1[C]=CC[CH]C1(1320)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS75',
kinetics = Arrhenius(A=(6.94203e+09,'s^-1'), n=0.37, Ea=(99.6901,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad_NDe] + [R3radExo;Y_rad;XH_Rrad_NDe] for rate rule [R3radExo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction20',
reactants = ['C[C]1C=CC=[C]C1(1321)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS76',
kinetics = Arrhenius(A=(7.77e+08,'s^-1'), n=0.311, Ea=(17.782,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad_De] for rate rule [R4radEndo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction21',
reactants = ['[CH]=C1[CH]CC=CC1(1322)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS77',
kinetics = Arrhenius(A=(1.02844e+09,'s^-1'), n=0.311, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad;XH_Rrad] for rate rule [R4radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction22',
reactants = ['C[C]1C=[C]C=CC1(1323)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS78',
kinetics = Arrhenius(A=(2.328e+09,'s^-1'), n=0.311, Ea=(60.668,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad_NDe] for rate rule [R4radEndo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction23',
reactants = ['C=C1C[C]=C[CH]C1(1324)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS79',
kinetics = Arrhenius(A=(1.552e+09,'s^-1'), n=0.311, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad_NDe;XH_Rrad] for rate rule [R4radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['C=C1[C]C=CCC1(1102)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS80',
kinetics = Arrhenius(A=(8.96625e+08,'s^-1'), n=0.311, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad] for rate rule [R4radEndo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction25',
reactants = ['[CH]=C1[CH]C=CCC1(1098)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS81',
kinetics = Arrhenius(A=(5.18e+08,'s^-1'), n=0.311, Ea=(17.782,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad;XH_Rrad_De] for rate rule [R4radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction26',
reactants = ['[CH2]C1C=C[C]=CC1(1084)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS82',
kinetics = Arrhenius(A=(4.48312e+08,'s^-1'), n=0.311, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad] for rate rule [R4radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction27',
reactants = ['[CH]=C1C=CC[CH]C1(1325)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS83',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction28',
reactants = ['C[C]1C=C[C]=CC1(1326)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS84',
kinetics = Arrhenius(A=(3.21e+09,'s^-1'), n=0.137, Ea=(17.782,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_De;XH_Rrad_De] for rate rule [R5radEndo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction29',
reactants = ['C=C1C[CH][C]=CC1(1327)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS85',
kinetics = Arrhenius(A=(8.50442e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 4.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction30',
reactants = ['C=C1[CH][C]=CCC1(1097)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS86',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction31',
reactants = ['[CH2][C]1C=CCC=C1(1010)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS87',
kinetics = Arrhenius(A=(8.50442e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 4.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction32',
reactants = ['C[C]1[C]=CC=CC1(1328)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS88',
kinetics = Arrhenius(A=(6.37831e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7;Y_rad;XH_Rrad] for rate rule [R7radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction33',
reactants = ['C7H8(697)(696)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS89',
kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7;Y_rad;XH_Rrad] for rate rule [R7radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction34',
reactants = ['CC1=CC=C=CC1(1329)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS90',
kinetics = Arrhenius(A=(2.53605e+09,'s^-1'), n=1.02346, Ea=(163.761,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [1_3_unsaturated_pentane_backbone;CH_end;CddC_2] + [1_3_pentadiene;CH_end;unsaturated_end] for rate rule [1_3_pentadiene;CH3_1;CddC_2]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 3.0
family: Intra_ene_reaction"""),
)
reaction(
label = 'reaction35',
reactants = ['[CH]=CCC(=C)C=[CH](1330)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS91',
kinetics = Arrhenius(A=(2.53377e+11,'s^-1'), n=0.0685, Ea=(8.20064,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6;Y_rad_out;Ypri_rad_out] for rate rule [R6_DSSSD;CdsingleH_rad_out;CdsinglepriH_rad_out]
Euclidian distance = 3.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction36',
reactants = ['[CH]=CC=CC[C]=C(1331)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS92',
kinetics = Arrhenius(A=(2.53377e+11,'s^-1'), n=0.0685, Ea=(8.20064,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6;Y_rad_out;Ypri_rad_out] for rate rule [R6_SSDSD;Y_rad_out;CdsinglepriH_rad_out]
Euclidian distance = 2.2360679775
family: Birad_recombination"""),
)
reaction(
label = 'reaction37',
reactants = ['[CH]=CC=CC([CH2])=C(1332)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS93',
kinetics = Arrhenius(A=(6.42e+10,'s^-1'), n=0.137, Ea=(8.87008,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6;C_rad_out_2H;Ypri_rad_out] for rate rule [R6_SSDSD;C_rad_out_2H;CdsinglepriH_rad_out]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction38',
reactants = ['[CH2]C=CC=C[C]=C(1333)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS94',
kinetics = Arrhenius(A=(3.21e+10,'s^-1'), n=0.137, Ea=(8.87008,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6;C_rad_out_2H;Ypri_rad_out] for rate rule [R6_SDSDS;C_rad_out_2H;Ypri_rad_out]
Euclidian distance = 1.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction39',
reactants = ['C=C1C=CC[C]C1(1334)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS95',
kinetics = Arrhenius(A=(6.84965e+11,'s^-1'), n=0.4135, Ea=(17.2276,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [singletcarbene_CH;CsJ2C;CH2(C=C)] for rate rule [CsJ2-C;CsJ2(CsC);CH2(C=C)]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction40',
reactants = ['C=C1[C]CC=CC1(1335)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS96',
kinetics = Arrhenius(A=(6.84965e+11,'s^-1'), n=0.4135, Ea=(17.2276,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [singletcarbene_CH;CsJ2(C=C);CH2(C=C)] for rate rule [CsJ2-C;CsJ2(C=C);CH2(C=C)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction41',
reactants = ['C=C1C=C[C]CC1(1336)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS97',
kinetics = Arrhenius(A=(5.65514e+12,'s^-1'), n=-0.428961, Ea=(21.7426,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [singletcarbene_CH;CsJ2(C=C);CH2(C)] + [CsJ2-C;CsJ2(C=C);CH] for rate rule [CsJ2-C;CsJ2(C=C);CH2(C)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction42',
reactants = ['C=C1C[C]C=CC1(1337)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS98',
kinetics = Arrhenius(A=(6.84965e+11,'s^-1'), n=0.4135, Ea=(17.2276,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [singletcarbene_CH;CsJ2(C=C);CH2(C=C)] for rate rule [CsJ2-C;CsJ2(C=C);CH2(C=C)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction43',
reactants = ['[CH]C1C=CC=CC1(1338)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS99',
kinetics = Arrhenius(A=(6.14647e+14,'s^-1'), n=-1.07844, Ea=(56.8484,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [CsJ2-C;singletcarbene;CH] for rate rule [CsJ2-C;CsJ2H;CH(C)C]
Euclidian distance = 1.41421356237
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction44',
reactants = ['C7H8(690)(689)'],
products = ['C1=CC2C=C(C1)C2(1339)'],
transitionState = 'TS100',
kinetics = Arrhenius(A=(4.99998e+11,'s^-1'), n=0.0559095, Ea=(275.247,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1,3-butadiene_backbone;C=C_1;C=C_2] for rate rule [1,3-butadiene_backbone;CdH2_1;CdH(C)_2]
Euclidian distance = 1.41421356237
family: Intra_2+2_cycloaddition_Cd
Ea raised from 273.0 to 275.2 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction45',
reactants = ['C7H8(690)(689)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS101',
kinetics = Arrhenius(A=(4.99998e+11,'s^-1'), n=0.0559095, Ea=(122.413,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1,3-butadiene_backbone;C=C_1;C=C_2] for rate rule [1,3-butadiene_backbone;CdH(C)_1;CdH(C)_2]
Euclidian distance = 1.41421356237
family: Intra_2+2_cycloaddition_Cd"""),
)
reaction(
label = 'reaction46',
reactants = ['[CH]1C[C]2C=CC1C2(1340)'],
products = ['C7H8(690)(689)'],
transitionState = 'TS102',
kinetics = Arrhenius(A=(7.69248e+14,'s^-1'), n=-0.917475, Ea=(150.312,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 1 used for R5JJ
Exact match found for rate rule [R5JJ]
Euclidian distance = 0
family: 1,4_Cyclic_birad_scission"""),
)
reaction(
label = 'reaction103',
reactants = ['H(3)(3)', 'C=C1C[C]2C=CC21(1473)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS103',
kinetics = Arrhenius(A=(2.92e+13,'cm^3/(mol*s)'), n=0.18, Ea=(0.518816,'kJ/mol'), T0=(1,'K'), Tmin=(200,'K'), Tmax=(2000,'K'), comment="""From training reaction 123 used for H_rad;C_rad/OneDeCs2
Exact match found for rate rule [C_rad/OneDeCs2;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction104',
reactants = ['H(3)(3)', 'C=C1CC2C=C[C]12(1474)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS104',
kinetics = Arrhenius(A=(3.62e+13,'cm^3/(mol*s)'), n=0.228, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 13 used for C_rad/TwoDeCs;H_rad
Exact match found for rate rule [C_rad/TwoDeCs;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -0.1 to 0 kJ/mol."""),
)
reaction(
label = 'reaction105',
reactants = ['H(3)(3)', 'C=C1[CH]C2C=CC12(1475)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS105',
kinetics = Arrhenius(A=(2.71464e+07,'m^3/(mol*s)'), n=0.107721, Ea=(5.76381,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 36 used for C_rad/H/CdCs;H_rad
Exact match found for rate rule [C_rad/H/CdCs;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction106',
reactants = ['H(3)(3)', 'C=C1CC2[C]=CC12(1476)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS106',
kinetics = Arrhenius(A=(1e+13,'cm^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 40 used for Cd_rad/NonDe;H_rad
Exact match found for rate rule [Cd_rad/NonDe;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction107',
reactants = ['H(3)(3)', 'C=C1CC2C=[C]C12(1477)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS107',
kinetics = Arrhenius(A=(1e+13,'cm^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 40 used for Cd_rad/NonDe;H_rad
Exact match found for rate rule [Cd_rad/NonDe;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction108',
reactants = ['H(3)(3)', '[CH]=C1CC2C=CC12(1478)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS108',
kinetics = Arrhenius(A=(1.21e+14,'cm^3/(mol*s)','+|-',4.82e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), comment="""From training reaction 60 used for H_rad;Cd_pri_rad
Exact match found for rate rule [Cd_pri_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction109',
reactants = ['C=C1C[C]2C[CH]C21(1479)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS109',
kinetics = Arrhenius(A=(5.10299e+10,'s^-1'), n=0.2847, Ea=(23.1459,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_NDe;XH_Rrad_NDe] + [R2radExo;Y_rad;XH_Rrad_NDe] for rate rule [R2radExo;Y_rad_NDe;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction110',
reactants = ['[CH2]C1CC2C=C[C]12(1480)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS110',
kinetics = Arrhenius(A=(2.24409e+10,'s^-1'), n=0.34095, Ea=(22.3009,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_NDe;XH_Rrad] + [R2radExo;Y_rad;XH_Rrad] for rate rule [R2radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction111',
reactants = ['C=C1CC2[CH]C[C]12(1481)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS111',
kinetics = Arrhenius(A=(5.10299e+10,'s^-1'), n=0.2847, Ea=(23.1459,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_NDe;XH_Rrad_NDe] + [R2radExo;Y_rad;XH_Rrad_NDe] for rate rule [R2radExo;Y_rad_NDe;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction112',
reactants = ['[CH2]C1[CH]C2C=CC12(1060)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS112',
kinetics = Arrhenius(A=(2.24409e+10,'s^-1'), n=0.34095, Ea=(22.3009,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_NDe;XH_Rrad] + [R2radExo;Y_rad;XH_Rrad] for rate rule [R2radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction113',
reactants = ['C=C1CC2C[CH][C]12(1482)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS113',
kinetics = Arrhenius(A=(5.2748e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction114',
reactants = ['C=C1[CH]C2C[CH]C12(1483)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS114',
kinetics = Arrhenius(A=(6.94203e+09,'s^-1'), n=0.37, Ea=(99.6901,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad_NDe] + [R3radExo;Y_rad;XH_Rrad_NDe] for rate rule [R3radExo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction115',
reactants = ['[CH2]C1C[C]2C=CC21(1484)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS115',
kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction116',
reactants = ['C=C1C[C]2[CH]CC21(1485)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS116',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction117',
reactants = ['[CH2]C1CC2C=[C]C12(1486)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS117',
kinetics = Arrhenius(A=(2.6374e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction118',
reactants = ['C=C1[CH]C2[CH]CC12(1487)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS118',
kinetics = Arrhenius(A=(8.96625e+08,'s^-1'), n=0.311, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad] for rate rule [R4radEndo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction119',
reactants = ['C[C]1C[C]2C=CC12(1488)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS119',
kinetics = Arrhenius(A=(2.328e+09,'s^-1'), n=0.311, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad_NDe;XH_Rrad_NDe] for rate rule [R4radEndo;Y_rad_NDe;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction120',
reactants = ['C[C]1CC2C=[C]C12(1489)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS120',
kinetics = Arrhenius(A=(2.328e+09,'s^-1'), n=0.311, Ea=(60.668,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad_NDe] for rate rule [R4radEndo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction121',
reactants = ['[CH2]C1CC2[C]=CC12(1490)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS121',
kinetics = Arrhenius(A=(4.48312e+08,'s^-1'), n=0.311, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad] for rate rule [R4radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction122',
reactants = ['[CH]=C1CC2[CH]CC12(1491)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS122',
kinetics = Arrhenius(A=(1.552e+09,'s^-1'), n=0.311, Ea=(34.518,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad;XH_Rrad_NDe] for rate rule [R4radExo;Y_rad;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction123',
reactants = ['C[C]1[CH]C2C=CC12(1058)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS123',
kinetics = Arrhenius(A=(6.37831e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction124',
reactants = ['C[C]1CC2[C]=CC12(1492)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS124',
kinetics = Arrhenius(A=(9.63e+09,'s^-1'), n=0.137, Ea=(60.668,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_De;XH_Rrad_NDe] for rate rule [R5radEndo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction125',
reactants = ['C[C]1CC2C=C[C]12(1493)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS125',
kinetics = Arrhenius(A=(5.55988e+09,'s^-1'), n=0.137, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad_De;XH_Rrad] for rate rule [R5radEndo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction126',
reactants = ['[CH]=C1CC2C[CH]C12(1494)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS126',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction127',
reactants = ['[CH2]C1C=CC1[C]=C(1495)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS127',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [R4_SSS;C_rad_out_2H;Ypri_rad_out]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction128',
reactants = ['C=[C]CC1[CH]C=C1(1496)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS128',
kinetics = Arrhenius(A=(3.6e+12,'s^-1'), n=-0.1525, Ea=(7.90776,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;C_rad_out_H/OneDe;Ypri_rad_out] + [R4_SSS;C_rad_out_single;Ypri_rad_out] for rate rule [R4_SSS;C_rad_out_H/OneDe;Ypri_rad_out]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction129',
reactants = ['[CH]=CC1[CH]C(=C)C1(1497)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS129',
kinetics = Arrhenius(A=(2e+12,'s^-1'), n=0, Ea=(7.5312,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn;C_rad_out_H/OneDe;Ypri_rad_out] for rate rule [R4_SSD;C_rad_out_H/OneDe;CdsinglepriH_rad_out]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
reaction(
label = 'reaction130',
reactants = ['[CH2]C(=C)C1[CH]C=C1(1498)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS130',
kinetics = Arrhenius(A=(7.2e+12,'s^-1'), n=-0.1525, Ea=(7.90776,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;C_rad_out_H/OneDe;Cpri_rad_out_2H] + [R4_SSS;C_rad_out_single;Cpri_rad_out_2H] for rate rule [R4_SSS;C_rad_out_H/OneDe;Cpri_rad_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 4.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction131',
reactants = ['[CH]=CC1[CH]CC1=C(1499)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS131',
kinetics = Arrhenius(A=(1.8e+12,'s^-1'), n=-0.1525, Ea=(7.90776,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;C_rad_out_1H;Ypri_rad_out] + [R4;C_rad_out_single;Ypri_rad_out] for rate rule [R4_SSD;C_rad_out_H/NonDeC;CdsinglepriH_rad_out]
Euclidian distance = 3.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction132',
reactants = ['H2CCCH2(837)', 'C1=CC=C1(1500)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS132',
kinetics = Arrhenius(A=(1.416,'cm^3/(mol*s)'), n=2.94, Ea=(121.336,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using an average for rate rule [diene_out;diene_in_2H;allene_unsub]
Euclidian distance = 0
Multiplied by reaction path degeneracy 8.0
family: Diels_alder_addition"""),
)
reaction(
label = 'reaction133',
reactants = ['C=C1CC2[C]CC12(1501)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS133',
kinetics = Arrhenius(A=(3.23663e+16,'s^-1'), n=-0.885455, Ea=(87.4392,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [CsJ2-C;CsJ2(CsC);CH] for rate rule [CsJ2-C;CsJ2(CsC);CH2(C)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction134',
reactants = ['C=C1CC2C[C]C12(1502)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS134',
kinetics = Arrhenius(A=(3.23663e+16,'s^-1'), n=-0.885455, Ea=(87.4392,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [CsJ2-C;CsJ2(CsC);CH] for rate rule [CsJ2-C;CsJ2(CsC);CH2(C)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction135',
reactants = ['[CH]C1CC2C=CC12(1503)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS135',
kinetics = Arrhenius(A=(6.14647e+14,'s^-1'), n=-1.07844, Ea=(56.8484,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [CsJ2-C;singletcarbene;CH] for rate rule [CsJ2-C;CsJ2H;CH(C)C]
Euclidian distance = 1.41421356237
family: Singlet_Carbene_Intra_Disproportionation"""),
)
reaction(
label = 'reaction136',
reactants = ['C1=CC2C=C(C1)C2(1339)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS136',
kinetics = Arrhenius(A=(6.21184e+10,'s^-1'), n=0.288169, Ea=(147.049,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1_5_unsaturated_hexane] for rate rule [1_5_hexadiene]
Euclidian distance = 1.0
family: 6_membered_central_C-C_shift"""),
)
reaction(
label = 'reaction137',
reactants = ['[CH]1C2C[C]3CC2C13(1504)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS137',
kinetics = Arrhenius(A=(7.69248e+14,'s^-1'), n=-0.917475, Ea=(150.312,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 1 used for R5JJ
Exact match found for rate rule [R5JJ]
Euclidian distance = 0
family: 1,4_Cyclic_birad_scission"""),
)
reaction(
label = 'reaction138',
reactants = ['[CH]1C2C[C]3CC1C32(1505)'],
products = ['C7H8(694)(693)'],
transitionState = 'TS138',
kinetics = Arrhenius(A=(2e+13,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [RJJ] for rate rule [R6JJ]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: 1,4_Cyclic_birad_scission"""),
)
reaction(
label = 'reaction139',
reactants = ['C7H8(697)(696)'],
products = ['CC12[CH]C=CC1[CH]2(1506)'],
transitionState = 'TS139',
kinetics = Arrhenius(A=(2.28717e+07,'s^-1'), n=0.927697, Ea=(219.664,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6;doublebond_intra;radadd_intra_cs]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 215.3 to 219.7 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction140',
reactants = ['C7H8(697)(696)'],
products = ['CC1=C[CH]C2[CH]C12(1507)'],
transitionState = 'TS140',
kinetics = Arrhenius(A=(2.28717e+07,'s^-1'), n=0.927697, Ea=(216.449,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R6;doublebond_intra;radadd_intra_cs]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 212.6 to 216.4 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction141',
reactants = ['H(3)(3)', 'CC1[CH]C=C=CC=1(1508)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS141',
kinetics = Arrhenius(A=(8.22e+08,'cm^3/(mol*s)'), n=1.533, Ea=(7.77387,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 192 used for Cd_R;HJ
Exact match found for rate rule [Cd_R;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction142',
reactants = ['CH3(15)(16)', 'C1=C[CH]C=CC=1(1509)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS142',
kinetics = Arrhenius(A=(0.125041,'m^3/(mol*s)'), n=2.08753, Ea=(33.8923,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cd_R;CsJ-HHH]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction143',
reactants = ['CC1[CH]C[C]=CC=1(1510)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS143',
kinetics = Arrhenius(A=(1.448e+10,'s^-1'), n=0.82, Ea=(156.9,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 154 used for R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction144',
reactants = ['C[C]1[C]=CC=CC1(1328)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS144',
kinetics = Arrhenius(A=(1.47715e+10,'s^-1'), n=0.8, Ea=(147.277,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_DS;Cd_rad_out;Cs_H_out_1H] for rate rule [R3H_DS;Cd_rad_out;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction145',
reactants = ['C[C]1C=C[C]=CC1(1326)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS145',
kinetics = Arrhenius(A=(1.47715e+10,'s^-1'), n=0.8, Ea=(147.277,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_DS;Cd_rad_out;Cs_H_out_1H] for rate rule [R3H_DS;Cd_rad_out;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction146',
reactants = ['CC1[CH]CC=[C]C=1(1511)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS146',
kinetics = Arrhenius(A=(1.47715e+10,'s^-1'), n=0.8, Ea=(147.277,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_DS;Cd_rad_out;Cs_H_out_1H] for rate rule [R3H_DS;Cd_rad_out;Cs_H_out_H/Cd]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction147',
reactants = ['C=C1[CH]C=CC[CH]1(1095)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS147',
kinetics = Arrhenius(A=(256000,'s^-1'), n=2, Ea=(117.57,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 95 used for R4H_SDS;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction148',
reactants = ['CC1=[C]C=CC[CH]1(1512)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS148',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4Hall;Cd_rad_out_single;Cs_H_out_H/Cd] for rate rule [R4HJ_2;Cd_rad_out_singleDe_Cd;Cs_H_out_H/Cd]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction149',
reactants = ['C[C]1C=[C]C=CC1(1323)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS149',
kinetics = Arrhenius(A=(2.22e+08,'s^-1'), n=1.1915, Ea=(103.605,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1600,'K'), comment="""Estimated using an average for rate rule [R4H_SDS;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction150',
reactants = ['C7H8(697)(696)'],
products = ['C[C]1C2[CH]C=CC12(1513)'],
transitionState = 'TS150',
kinetics = Arrhenius(A=(4.00063e+13,'s^-1'), n=-0.283562, Ea=(245.321,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0c6_beta_short;doublebond_intra;radadd_intra_cs] for rate rule [Rn0c6_beta_short;doublebond_intra_secNd_HCd;radadd_intra_cs]
Euclidian distance = 3.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction151',
reactants = ['C7H8(697)(696)'],
products = ['CC1[CH]C2[CH]C2C=1(1514)'],
transitionState = 'TS151',
kinetics = Arrhenius(A=(2.22857e+12,'s^-1'), n=0, Ea=(216.449,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0cx_beta;doublebond_intra_pri_HCd;radadd_intra_cs] for rate rule [Rn0c6_beta_short;doublebond_intra_pri_HCd;radadd_intra_cs]
Euclidian distance = 1.0
family: Intra_R_Add_Endocyclic
Ea raised from 212.6 to 216.4 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction152',
reactants = ['C7H8(697)(696)'],
products = ['C[C]1[CH]C2C=CC12(1058)'],
transitionState = 'TS152',
kinetics = Arrhenius(A=(1.08454e+19,'s^-1'), n=-0.859165, Ea=(206.503,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0cx_gamma;doublebond_intra;radadd_intra_cs] for rate rule [Rn0c6_gamma;doublebond_intra_secNd_HCd;radadd_intra_csHCd]
Euclidian distance = 3.74165738677
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic
Ea raised from 203.9 to 206.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction153',
reactants = ['C7H8(697)(696)'],
products = ['CC1=CC=C=CC1(1329)'],
transitionState = 'TS153',
kinetics = Arrhenius(A=(5.4e+09,'s^-1'), n=-0.305, Ea=(106.354,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation
Ea raised from 104.4 to 106.4 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction154',
reactants = ['CH2(S)(21)(22)', '[CH]1[CH]C=CC=C1(1038)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS154',
kinetics = Arrhenius(A=(431291,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [carbene;R_H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: 1,2_Insertion_carbene
Ea raised from -5.1 to 0 kJ/mol."""),
)
reaction(
label = 'reaction155',
reactants = ['CC1[C]=CC=C[CH]1(1039)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS155',
kinetics = Arrhenius(A=(1.09301e+10,'s^-1'), n=0.904833, Ea=(146.851,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CJ;CH3] + [cCsCJ;CdsJ;C] for rate rule [cCsCJ;CdsJ;CH3]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction156',
reactants = ['C7H8(697)(696)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS156',
kinetics = Arrhenius(A=(2.51e+07,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [Y_12_02] for rate rule [Y_12_02b]
Euclidian distance = 1.0
family: 1,2-Birad_to_alkene"""),
)
reaction(
label = 'reaction157',
reactants = ['H(3)(3)', 'CC1=C=C[CH]C=C1(1515)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS157',
kinetics = Arrhenius(A=(8.22e+08,'cm^3/(mol*s)'), n=1.533, Ea=(7.77387,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 192 used for Cd_R;HJ
Exact match found for rate rule [Cd_R;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction158',
reactants = ['H(3)(3)', 'CC1C=C=C[CH]C=1(1516)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS158',
kinetics = Arrhenius(A=(8.22e+08,'cm^3/(mol*s)'), n=1.533, Ea=(7.77387,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 192 used for Cd_R;HJ
Exact match found for rate rule [Cd_R;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction159',
reactants = ['CC1=[C]C[CH]C=C1(1517)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS159',
kinetics = Arrhenius(A=(9.93038e+09,'s^-1'), n=1.05826, Ea=(162.779,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction160',
reactants = ['CC1C=[C]C[CH]C=1(1518)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS160',
kinetics = Arrhenius(A=(9.93038e+09,'s^-1'), n=1.05826, Ea=(162.779,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Cd_rad_out_Cd;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction161',
reactants = ['CC1=[C][CH]CC=C1(1519)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS161',
kinetics = Arrhenius(A=(2.3e+10,'s^-1'), n=0.98, Ea=(112.55,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_Cd;Cs_H_out_H/(Cd-Cd-Cd)] for rate rule [R3HJ;Cd_rad_out_Cd;Cs_H_out_H/(Cd-Cd-Cd)]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction162',
reactants = ['[CH2][C]1C=CCC=C1(1010)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS162',
kinetics = Arrhenius(A=(62296.1,'s^-1'), n=1.86, Ea=(59.4128,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;Cs_H_out_H/Cd] for rate rule [R5HJ_3;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction163',
reactants = ['C7H8(697)(696)'],
products = ['CC12[CH][CH]C1C=C2(1520)'],
transitionState = 'TS163',
kinetics = Arrhenius(A=(5.4227e+18,'s^-1'), n=-0.859165, Ea=(209.035,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn0cx_gamma;doublebond_intra_pri;radadd_intra_cs] for rate rule [Rn0c6_gamma;doublebond_intra_pri_NdCd;radadd_intra_csHCd]
Euclidian distance = 3.0
family: Intra_R_Add_Endocyclic
Ea raised from 206.2 to 209.0 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction164',
reactants = ['C7H8(697)(696)'],
products = ['CC1=C=CCC=C1(1521)'],
transitionState = 'TS164',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(107.818,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation
Ea raised from 105.9 to 107.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction165',
reactants = ['C7H8(697)(696)'],
products = ['CC1C=C=CCC=1(1522)'],
transitionState = 'TS165',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(107.818,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation
Ea raised from 105.9 to 107.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction166',
reactants = ['C[C]1C=CC=[C]C1(1321)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS166',
kinetics = Arrhenius(A=(1.448e+10,'s^-1'), n=0.82, Ea=(156.9,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 154 used for R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;Cd_rad_out_Cd;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction167',
reactants = ['C7H8(697)(696)'],
products = ['CC1=C=CC=CC1(1470)'],
transitionState = 'TS167',
kinetics = Arrhenius(A=(1.08e+10,'s^-1'), n=-0.305, Ea=(106.354,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation
Ea raised from 104.4 to 106.4 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction168',
reactants = ['C7H8(697)(696)'],
products = ['CC1=CC2C=CC12(1047)'],
transitionState = 'TS168',
kinetics = Arrhenius(A=(4e+12,'s^-1'), n=0, Ea=(107.431,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn;C_rad_out_H/OneDe;Cpri_rad_out_single] for rate rule [R4_SDS;C_rad_out_H/OneDe;Cpri_rad_out_H/OneDe]
Euclidian distance = 2.82842712475
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination
Ea raised from 107.2 to 107.4 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction169',
reactants = ['H(3)(3)', 'C=C1[CH]C=CC=C1(1310)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS169',
kinetics = Arrhenius(A=(8.22e+08,'cm^3/(mol*s)'), n=1.533, Ea=(7.77387,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 192 used for Cd_R;HJ
Exact match found for rate rule [Cd_R;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction170',
reactants = ['[CH2]C1C=C[CH]C=C1(1011)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS170',
kinetics = Arrhenius(A=(17481.2,'s^-1'), n=2.56136, Ea=(141.488,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;C_rad_out_2H;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction171',
reactants = ['CC1[CH]C=C[C]=C1(1040)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS171',
kinetics = Arrhenius(A=(3.677e+10,'s^-1'), n=0.839, Ea=(182.581,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_Cd;Cs_H_out_(CdCdCd)] for rate rule [R3HJ;Cd_rad_out_Cd;Cs_H_out_(CdCdCd)]
Euclidian distance = 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction172',
reactants = ['CC1[CH]C=[C]C=C1(1041)'],
products = ['C7H8(697)(696)'],
transitionState = 'TS172',
kinetics = Arrhenius(A=(1.05292e+10,'s^-1'), n=1.0733, Ea=(243.703,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [RnH;Cd_rad_out_singleDe_Cd;Cs_H_out] + [R4Hall;Cd_rad_out_singleDe_Cd;XH_out] for rate rule [R4HJ_2;Cd_rad_out_singleDe_Cd;Cs_H_out_noH]
Euclidian distance = 2.2360679775
family: intra_H_migration"""),
)
reaction(
label = 'reaction173',
reactants = ['C7H8(697)(696)'],
products = ['CC1C=C=CC=C1(1032)'],
transitionState = 'TS173',
kinetics = Arrhenius(A=(6.42e+09,'s^-1'), n=0.137, Ea=(111.839,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation
Ea raised from 110.4 to 111.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction174',
reactants = ['C7H8(697)(696)'],
products = ['C=C1C=CCC=C1(1006)'],
transitionState = 'TS174',
kinetics = Arrhenius(A=(3.21e+09,'s^-1'), n=0.137, Ea=(17.782,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_De;XH_Rrad_De] for rate rule [R5radEndo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction175',
reactants = ['C7H8(697)(696)'],
products = ['CC12C=CC1C=C2(1523)'],
transitionState = 'TS175',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(113.194,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SDS;C_rad_out_OneDe/Cs;Cpri_rad_out_H/OneDe]
Euclidian distance = 3.74165738677
family: Birad_recombination
Ea raised from 111.5 to 113.2 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction176',
reactants = ['CH3(15)(16)', 'C1=C[CH]C=CC=1(1509)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS176',
kinetics = Arrhenius(A=(3.87e-10,'cm^3/(molecule*s)'), n=-0.283, Ea=(0,'kcal/mol'), T0=(1,'K'), comment="""Matched reaction 51 C6H5 + CH3 <=> C7H8 in R_Recombination/training
This reaction matched rate rule [Cb_rad;C_methyl]
family: R_Recombination
Ea raised from -0.8 to 0 kJ/mol."""),
)
reaction(
label = 'reaction177',
reactants = ['H(3)(3)', 'C=C1[CH]C=CC=C1(1310)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS177',
kinetics = Arrhenius(A=(1.2e-10,'cm^3/(molecule*s)'), n=0.062, Ea=(0,'kcal/mol'), T0=(1,'K'), comment="""Matched reaction 52 C7H7 + H <=> C7H8-2 in R_Recombination/training
This reaction matched rate rule [C_rad/H2/Cb;H_rad]
family: R_Recombination
Ea raised from -0.2 to 0 kJ/mol."""),
)
reaction(
label = 'reaction178',
reactants = ['H(3)(3)', 'CC1=C=C[CH]C=C1(1515)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS178',
kinetics = Arrhenius(A=(2.2e+14,'cm^3/(mol*s)','+|-',8e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1200,'K'), comment="""From training reaction 62 used for H_rad;Cb_rad
Exact match found for rate rule [Cb_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction179',
reactants = ['H(3)(3)', 'CC1C=C=C[CH]C=1(1516)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS179',
kinetics = Arrhenius(A=(2.2e+14,'cm^3/(mol*s)','+|-',8e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1200,'K'), comment="""From training reaction 62 used for H_rad;Cb_rad
Exact match found for rate rule [Cb_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction180',
reactants = ['H(3)(3)', 'CC1[CH]C=C=CC=1(1508)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS180',
kinetics = Arrhenius(A=(2.2e+14,'cm^3/(mol*s)','+|-',8e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1200,'K'), comment="""From training reaction 62 used for H_rad;Cb_rad
Exact match found for rate rule [Cb_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction181',
reactants = ['[CH2]C1C=C[CH]C=C1(1011)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS181',
kinetics = Arrhenius(A=(1.4733e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad;XH_Rrad_De] + [R2radExo;Y_rad;XH_Rrad] for rate rule [R2radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction182',
reactants = ['CC1[C]=CC=C[CH]1(1039)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS182',
kinetics = Arrhenius(A=(1.4733e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_De;XH_Rrad_De] + [R2radExo;Y_rad_De;XH_Rrad] for rate rule [R2radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction183',
reactants = ['CC1=[C]C[CH]C=C1(1517)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS183',
kinetics = Arrhenius(A=(2.94659e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_De;XH_Rrad_De] + [R2radExo;Y_rad_De;XH_Rrad] for rate rule [R2radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction184',
reactants = ['C[C]1C=CC=[C]C1(1321)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS184',
kinetics = Arrhenius(A=(3.898e+11,'s^-1'), n=0.486, Ea=(22.8614,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 0 used for R2radExo;Y_rad_De;XH_Rrad_NDe
Exact match found for rate rule [R2radExo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction185',
reactants = ['CC1C=[C]C[CH]C=1(1518)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS185',
kinetics = Arrhenius(A=(2.94659e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_De;XH_Rrad_De] + [R2radExo;Y_rad_De;XH_Rrad] for rate rule [R2radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction186',
reactants = ['CC1[CH]C[C]=CC=1(1510)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS186',
kinetics = Arrhenius(A=(2.94659e+10,'s^-1'), n=0.2847, Ea=(27.8529,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;Y_rad_De;XH_Rrad_De] + [R2radExo;Y_rad_De;XH_Rrad] for rate rule [R2radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction187',
reactants = ['C[C]1[C]=CC=CC1(1328)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS187',
kinetics = Arrhenius(A=(1.08e+10,'s^-1'), n=-0.305, Ea=(93.094,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction188',
reactants = ['CC1[CH]C=C[C]=C1(1040)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS188',
kinetics = Arrhenius(A=(5.4e+09,'s^-1'), n=-0.305, Ea=(93.094,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction189',
reactants = ['CC1=[C][CH]CC=C1(1519)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS189',
kinetics = Arrhenius(A=(1.08e+10,'s^-1'), n=-0.305, Ea=(93.094,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction190',
reactants = ['C[C]1C=C[C]=CC1(1326)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS190',
kinetics = Arrhenius(A=(6.94203e+09,'s^-1'), n=0.37, Ea=(99.6901,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad_NDe] + [R3radExo;Y_rad;XH_Rrad_NDe] for rate rule [R3radExo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction191',
reactants = ['CC1[CH]CC=[C]C=1(1511)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS191',
kinetics = Arrhenius(A=(1.08e+10,'s^-1'), n=-0.305, Ea=(93.094,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3;Y_rad_De;XH_Rrad_De] for rate rule [R3radExo;Y_rad_De;XH_Rrad_De]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction192',
reactants = ['C=C1[CH]C=CC[CH]1(1095)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS192',
kinetics = Arrhenius(A=(1.02844e+09,'s^-1'), n=0.311, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad;XH_Rrad] for rate rule [R4radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction193',
reactants = ['CC1=[C]C=CC[CH]1(1512)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS193',
kinetics = Arrhenius(A=(8.96625e+08,'s^-1'), n=0.311, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad] for rate rule [R4radEndo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction194',
reactants = ['C[C]1C=[C]C=CC1(1323)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS194',
kinetics = Arrhenius(A=(1.552e+09,'s^-1'), n=0.311, Ea=(60.668,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad_NDe] for rate rule [R4radEndo;Y_rad_De;XH_Rrad_NDe]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction195',
reactants = ['CC1[CH]C=[C]C=C1(1041)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS195',
kinetics = Arrhenius(A=(4.48312e+08,'s^-1'), n=0.311, Ea=(39.225,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_De;XH_Rrad] for rate rule [R4radEndo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction196',
reactants = ['[CH2][C]1C=CCC=C1(1010)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS196',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction197',
reactants = ['CH2(S)(21)(22)', 'C6H6(468)(467)'],
products = ['C7H8(699)(698)'],
transitionState = 'TS197',
kinetics = Arrhenius(A=(431291,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [carbene;R_H] for rate rule [carbene;Cb_H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: 1,2_Insertion_carbene
Ea raised from -5.1 to 0 kJ/mol."""),
)
network(
label = '137',
isomers = [
'C7H8(690)(689)',
'C7H8(693)(692)',
'C7H8(694)(693)',
'C7H8(697)(696)',
'C7H8(699)(698)',
],
reactants = [
('CH2(S)(21)(22)', 'C6H6(468)(467)'),
],
bathGas = {
'Ne': 0.333333,
'N2': 0.333333,
'Ar(8)': 0.333333,
},
)
pressureDependence(
label = '137',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
] | |
6301e5ce5711488ed50bb66343401def93e6eb09 | d7320f2f599d1d81e14aec5f62e9d48ee4fddfa2 | /backend/mobile_23_dec_dev_17193/settings.py | de579c228368f904f01a84b5395bb4ed2917c868 | [] | no_license | crowdbotics-apps/mobile-23-dec-dev-17193 | be7f357b35147a9b4264f3b93482b18975e034ce | 632ed98d9fa87fab09c91f41eea01b001fb40dae | refs/heads/master | 2023-02-04T23:41:19.953146 | 2020-12-23T13:37:07 | 2020-12-23T13:37:07 | 323,806,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,057 | py | """
Django settings for mobile_23_dec_dev_17193 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"modules",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "mobile_23_dec_dev_17193.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "mobile_23_dec_dev_17193.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
5bc1b9c932355a1fa5a5500db6aa6d0f9ca3cc41 | ee82d5f781f9b2ad8d710e8b4ca723dd8b51fe22 | /cryptomate/strategy/persistence/__init__.py | 9205cb8a22c040162889979211d4230bff71b273 | [
"MIT"
] | permissive | solid-abstractions/cryptomate | 32538e9a65305cf1865452d29242a0b45e3a68ba | 49b4fea8f2f0f1ad61309a44167d1732ef2af0e4 | refs/heads/master | 2020-03-29T15:53:08.362898 | 2018-10-30T17:08:33 | 2018-10-30T17:15:04 | 150,085,416 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from cryptomate.strategy.persistence.base import Persister
__all__ = ('Persister',)
| [
"[email protected]"
] | |
3b0cb6bf6324296f08c74e0e0da7621c7fddbbad | 9ec6d7f7c98159c3c98fa8b107eb2e221d7dd37b | /graph_lm/models/__init__.py | 06c429f9750262ec8ed2e9f0657b799250503bf7 | [] | no_license | bstriner/graph-lm | 8f4d8df4ba5595e616e1fbe78da92acf0eac4403 | bc31caa1fed7d87bb57f0754fab1e1f96e410dbf | refs/heads/master | 2020-04-24T18:33:40.416780 | 2019-05-10T18:40:05 | 2019-05-10T18:40:05 | 172,183,417 | 0 | 0 | null | 2019-04-08T13:59:52 | 2019-02-23T07:07:24 | Python | UTF-8 | Python | false | false | 61 | py | AAE_RE = 'aae'
VAE = 'vae'
AAE_STOCH = 'aae_stoch'
AE = 'ae'
| [
"[email protected]"
] | |
ee319cd6d901830a3b33ecf66eb0941f348ecb2e | e0527bce5c53a196752d3a16adf50cb60754de5f | /02-Workshop/Workshop-Questions/6_pollock.py | b27d6fead14feaff42588a9ffef6d02b37a30352 | [] | no_license | ARWA-ALraddadi/python-tutorial-for-beginners | ddeb657f419fbc176bea273bc9fb6b88d1894191 | 21cedfc47871ca4d25c2382464c60ab0a2121205 | refs/heads/master | 2023-06-30T20:24:30.688800 | 2021-08-08T08:22:29 | 2021-08-08T08:22:29 | 193,094,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | ## Jackson Pollock's Final Masterpiece
##
## 20th century "artists" such as Jackson Pollock achieved fame
## by stumbling drunkenly around a canvas on the floor
## dribbling paint out of tins. (We're not being rude - he openly
## admitted to being drunk when creating his paintings.) However,
## today we can achieve the same effect without getting our hands
## dirty or taking a swig by using Python!
##
## Using Turtle graphics develop a program to draw many blobs
## (dots) of random colour, size and location on the screen.
## The blobs should be connected by lines of "paint" of
## various widths as if paint had been dribbled from one
## blob to the next. The "paint" should not go off the edge
## of the "canvas". Use the following solution strategy.
##
## 1. Set up the blank canvas of a known size
## 2. Ensure the pen is down
## 3. For each "blob" in a large range:
## a. Select a random pen colour
## b. Pick a random pen width
## c. Go to a random location on the screen
## (drawing as you go)
## d. Draw a blob (dot)
## 4. Exit the program gracefully
##
## Hint: Although you could select colours from a list of
## names, you can get a wider range of colours, by noting
## that Turtle's "color" function can accept three numbers
## as arguments, representing red-green-blue pixel densities.
## These numbers are floating point values between 0.0 and 1.0.
## Also note that the "random" module's "uniform" function
## produces a random floating point number.
##
## Hint: This exercise is actually very similar to the
## previous "Starry, Starry Night" one, so you can develop
## your solution as an extension of that.
# Import the functions required
from turtle import *
from random import uniform, randint
## DEVELOP YOUR PROGRAM HERE
| [
"[email protected]"
] | |
3b2bfe90e1713e4a3c44becca9465e02d8b99ecc | 53ccc4f5198d10102c8032e83f9af25244b179cf | /SoftUni Lessons/Python Development/Python Fundamentals September 2019/Problems And Files/05 EX. B. SYNTAX, CONDITIONAL STATE. AND LOOPS - Дата 20-ти септември, 1430 - 1730/More Exercises/02. Find The Capitals.py | 7eeea2620bed701edeaa70159043400ad4480ba4 | [] | no_license | SimeonTsvetanov/Coding-Lessons | aad32e0b4cc6f5f43206cd4a937fec5ebea64f2d | 8f70e54b5f95911d0bdbfda7d03940cb824dcd68 | refs/heads/master | 2023-06-09T21:29:17.790775 | 2023-05-24T22:58:48 | 2023-05-24T22:58:48 | 221,786,441 | 13 | 6 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | """
Basic Syntax, Conditional Statements and Loops - Exercise
Check your code: https://judge.softuni.bg/Contests/Practice/Index/1720#1
Video: https://www.youtube.com/watch?time_continue=4&v=7sHE4HEUqi8
SUPyF2 Basic Exercise More - 02. Find The Capitals
Problem:
Write a program that takes a single string and prints a list of all the indices of all the capital letters
Examples:
Input: Output:
pYtHoN [1, 3, 5]
CApiTAls [0, 1, 4, 5]
Hint:
If you don't know what lists are, search them in google, find out how to create them and add elements to them
"""
# text = [letter for letter in input()]
# list_capitals = []
#
# for letter in range(len(text)):
# if text[letter].isupper():
# list_capitals += [letter]
#
# print(list_capitals)
# input_string = input()
input_string = input()
n = [i for i in range(len(input_string)) if input_string[i].isupper()]
print(n)
| [
"[email protected]"
] | |
b2a3106a379c22fe0e7d4e763e29f6abfdc97c43 | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py | d6184838495e80aa3d5676b5ce34964c83c10ace | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 12,119 | py | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This is an implementation of the Port interface that overrides other
ports and changes the Driver binary to "MockDRT".
The MockDRT objects emulate what a real DRT would do. In particular, they
return the output a real DRT would return for a given test, assuming that
test actually passes (except for reftests, which currently cause the
MockDRT to crash).
"""
import base64
import optparse
import os
import sys
import types
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.port.factory import PortFactory
class MockDRTPort(object):
port_name = 'mock'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
return port_name
def __init__(self, host, port_name, **kwargs):
self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
self.__delegate_driver_class = self.__delegate._driver_class
self.__delegate._driver_class = types.MethodType(self._driver_class, self.__delegate)
def __getattr__(self, name):
return getattr(self.__delegate, name)
def check_build(self, needs_http, printer):
return test_run_results.OK_EXIT_STATUS
def check_sys_deps(self, needs_http):
return test_run_results.OK_EXIT_STATUS
def _driver_class(self, delegate):
return self._mocked_driver_maker
def _mocked_driver_maker(self, port, worker_number, pixel_tests, no_timeout=False):
path_to_this_file = self.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
driver = self.__delegate_driver_class()(self, worker_number, pixel_tests, no_timeout)
driver.cmd_line = self._overriding_cmd_line(driver.cmd_line,
self.__delegate._path_to_driver(),
sys.executable,
path_to_this_file,
self.__delegate.name())
return driver
@staticmethod
def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
def new_cmd_line(pixel_tests, per_test_args):
cmd_line = original_cmd_line(pixel_tests, per_test_args)
index = cmd_line.index(driver_path)
cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
return cmd_line
return new_cmd_line
def start_http_server(self, additional_dirs, number_of_servers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _make_wdiff_available(self):
self.__delegate._wdiff_available = True
def setup_environ_for_server(self):
env = self.__delegate.setup_environ_for_server()
# We need to propagate PATH down so the python code can find the checkout.
env['PATH'] = self.host.environ.get('PATH')
return env
def lookup_virtual_test_args(self, test_name):
suite = self.__delegate.lookup_virtual_suite(test_name)
return suite.args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
def lookup_virtual_reference_args(self, test_name):
suite = self.__delegate.lookup_virtual_suite(test_name)
return suite.reference_args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
def main(argv, host, stdin, stdout, stderr):
"""Run the tests."""
options, args = parse_options(argv)
drt = MockDRT(options, args, host, stdin, stdout, stderr)
return drt.run()
def parse_options(argv):
# We do custom arg parsing instead of using the optparse module
# because we don't want to have to list every command line flag DRT
# accepts, and optparse complains about unrecognized flags.
def get_arg(arg_name):
if arg_name in argv:
index = argv.index(arg_name)
return argv[index + 1]
return None
options = optparse.Values({
'actual_directory': get_arg('--actual-directory'),
'platform': get_arg('--platform'),
'virtual_test_suite_base': get_arg('--virtual-test-suite-base'),
'virtual_test_suite_name': get_arg('--virtual-test-suite-name'),
})
return (options, argv)
class MockDRT(object):
def __init__(self, options, args, host, stdin, stdout, stderr):
self._options = options
self._args = args
self._host = host
self._stdout = stdout
self._stdin = stdin
self._stderr = stderr
port_name = None
if options.platform:
port_name = options.platform
self._port = PortFactory(host).get(port_name=port_name, options=options)
self._driver = self._port.create_driver(0)
def run(self):
self._stdout.write("#READY\n")
self._stdout.flush()
while True:
line = self._stdin.readline()
if not line:
return 0
driver_input = self.input_from_line(line)
dirname, basename = self._port.split_test(driver_input.test_name)
is_reftest = (self._port.reference_files(driver_input.test_name) or
self._port.is_reference_html_file(self._port.host.filesystem, dirname, basename))
output = self.output_for_test(driver_input, is_reftest)
self.write_test_output(driver_input, output, is_reftest)
def input_from_line(self, line):
vals = line.strip().split("'")
uri = vals[0]
checksum = None
should_run_pixel_tests = False
if len(vals) == 2 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
elif len(vals) == 3 and vals[1] == '--pixel-test':
should_run_pixel_tests = True
checksum = vals[2]
elif len(vals) != 1:
raise NotImplementedError
if uri.startswith('http://') or uri.startswith('https://'):
test_name = self._driver.uri_to_test(uri)
else:
test_name = self._port.relative_test_filename(uri)
return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
def output_for_test(self, test_input, is_reftest):
port = self._port
if self._options.virtual_test_suite_name:
test_input.test_name = test_input.test_name.replace(
self._options.virtual_test_suite_base, self._options.virtual_test_suite_name)
actual_text = port.expected_text(test_input.test_name)
actual_audio = port.expected_audio(test_input.test_name)
actual_image = None
actual_checksum = None
if is_reftest:
# Make up some output for reftests.
actual_text = 'reference text\n'
actual_checksum = 'mock-checksum'
actual_image = 'blank'
if test_input.test_name.endswith('-mismatch.html'):
actual_text = 'not reference text\n'
actual_checksum = 'not-mock-checksum'
actual_image = 'not blank'
elif test_input.should_run_pixel_test and test_input.image_hash:
actual_checksum = port.expected_checksum(test_input.test_name)
actual_image = port.expected_image(test_input.test_name)
if self._options.actual_directory:
actual_path = port.host.filesystem.join(self._options.actual_directory, test_input.test_name)
root, _ = port.host.filesystem.splitext(actual_path)
text_path = root + '-actual.txt'
if port.host.filesystem.exists(text_path):
actual_text = port.host.filesystem.read_binary_file(text_path)
audio_path = root + '-actual.wav'
if port.host.filesystem.exists(audio_path):
actual_audio = port.host.filesystem.read_binary_file(audio_path)
image_path = root + '-actual.png'
if port.host.filesystem.exists(image_path):
actual_image = port.host.filesystem.read_binary_file(image_path)
with port.host.filesystem.open_binary_file_for_reading(image_path) as filehandle:
actual_checksum = read_checksum_from_png.read_checksum(filehandle)
return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def write_test_output(self, test_input, output, is_reftest):
if output.audio:
self._stdout.write('Content-Type: audio/wav\n')
self._stdout.write('Content-Transfer-Encoding: base64\n')
self._stdout.write(base64.b64encode(output.audio))
self._stdout.write('\n')
else:
self._stdout.write('Content-Type: text/plain\n')
# FIXME: Note that we don't ensure there is a trailing newline!
# This mirrors actual (Mac) DRT behavior but is a bug.
if output.text:
self._stdout.write(output.text)
self._stdout.write('#EOF\n')
if test_input.should_run_pixel_test and output.image_hash:
self._stdout.write('\n')
self._stdout.write('ActualHash: %s\n' % output.image_hash)
self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
if output.image_hash != test_input.image_hash:
self._stdout.write('Content-Type: image/png\n')
self._stdout.write('Content-Length: %s\n' % len(output.image))
self._stdout.write(output.image)
self._stdout.write('#EOF\n')
self._stdout.flush()
self._stderr.write('#EOF\n')
self._stderr.flush()
if __name__ == '__main__':
# Note that the Mock in MockDRT refers to the fact that it is emulating a
# real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
| [
"[email protected]"
] | |
1ec9ba794ac9baee1df67a506f295e337a25d7fb | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ESAWnF3ySrFusHhYF_17.py | e5d41b17cbf9e0e12f8f85c729de22a0dd160c66 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py |
def edit_words(lst):
return [
''.join(add_hyphen(words)[::-1].upper())
for words in lst
]
def add_hyphen(string):
half = len(string) // 2
return string[:half] + '-' + string[half:]
| [
"[email protected]"
] | |
d66a3005c423dd2c9ca316b67c11a1b9299da949 | 3777658387aa9e78d7c04202d7fd47d59b9e1271 | /datavisualization/candlestick.py | a58d3bacc7e90462ace43536b3decbf25c7a586e | [] | no_license | jocoder22/PythonDataScience | 709363ada65b6db61ee73c27d8be60587a74f072 | c5a9af42e41a52a7484db0732ac93b5945ade8bb | refs/heads/master | 2022-11-08T17:21:08.548942 | 2022-10-27T03:21:53 | 2022-10-27T03:21:53 | 148,178,242 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,956 | py | #!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.ticker as mticker
import mplfinance as mpf
import yfinance as yf
import matplotlib.pyplot as plt
import matplotlib.dates as mpdates
import finplot as fplt
import pandas_datareader as pdr
from datetime import datetime, date
# aapl = yf.download('BABA', '2020-1-1','2021-2-18')
sp = {"end":"\n\n", "sep":"\n\n"}
symbol = 'AAPL' # "VZ" # "CMCSA" #"VZ" #'BABA' # 'AAPL' #'AMZN'
starttime = datetime(2021, 1, 1)
endtime = date.today()
aapl = pdr.get_data_yahoo(symbol, starttime, endtime)
print(aapl.head(), aapl.tail(), **sp)
d = [12,26,9]
w = [5,35,5]
dayy = False
if dayy == True:
dd = d
else: dd = w
# computer MACD and signal
macd = aapl.Close.ewm(span=dd[0]).mean() - aapl.Close.ewm(span=dd[1]).mean()
signal = macd.ewm(span=dd[2]).mean()
aapl['macd_diff'] = macd - signal
# form dataframe
aapl['MACD'] = macd
aapl['MACDsig'] = signal
# compute period mean volume
aapl['numb'] = np.arange(1, aapl.shape[0]+1)
aapl['CUMSUM_C'] = aapl['Volume'].cumsum()
aapl["aveg"] = aapl['CUMSUM_C']/aapl['numb']
# print dataset head and tail
print(aapl.head(), aapl.tail(), **sp)
# display graph period average volume and daily volume
fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize =(24,10))
ax1.grid(alpha=0.7); ax2.grid(alpha=0.7)
ax1.plot(aapl.aveg, color="black")
ax2.plot(aapl.Volume, color="red")
plt.show()
# plot candlesticks and MACD
fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize =(24,10))
ax1.grid(alpha=0.7); ax2.grid(alpha=0.7)
ax1.set_title("Candlestick"); ax2.set_title("MACD")
#### plot candlestick
color = ["green" if close_price > open_price else "red" for close_price, open_price in zip(aapl.Close, aapl.Open)]
ax1.bar(x=aapl.index, height=np.abs(aapl.Open-aapl.Close), bottom=np.min((aapl.Open,aapl.Close), axis=0), width=0.6, color=color)
ax1.bar(x=aapl.index, height=aapl.High - aapl.Low, bottom=aapl.Low, width=0.1, color=color)
# ax3 = ax2.twinx()
# ax3.plot(aapl.Volume, color="black")
# plt.title(f'MACD chart {symbol}')
### plot MACD
color2 = ["green" if close_price > open_price else "red" for close_price, open_price in zip(aapl.MACD, aapl.MACDsig)]
ax2.plot( aapl['MACD'], label='MACD')
ax2.plot( aapl['MACDsig'], label='MACDsig')
# ax2.plot( aapl['macd_diff'], label='MACDhist')
ax2.bar( aapl.index, aapl['macd_diff'], snap=False, color = color2, width=0.6, label='MACDhist')
ax2.legend()
plt.show()
def candlestick(t, o, h, l, c):
plt.figure(figsize=(12,4))
color = ["green" if close_price > open_price else "red" for close_price, open_price in zip(c, o)]
plt.bar(x=t, height=np.abs(o-c), bottom=np.min((o,c), axis=0), width=0.6, color=color)
plt.bar(x=t, height=h-l, bottom=l, width=0.1, color=color)
candlestick(
aapl.index,
aapl.Open,
aapl.High,
aapl.Low,
aapl.Close
)
plt.grid(alpha=0.9)
plt.show()
"""
# mpf.plot(aapl)
### plot using the mplfinance module
mpf.plot(aapl, type='candle')
mpf.plot(aapl, type='candle', mav=(12,26,9))
# plot using finplot module
fplt.background = '#B0E0E6'
fplt.candlestick_ochl(aapl[['Open', 'Close', 'High', 'Low']])
fplt.show()
fplt.background = '#F5F5F5'
fplt.candlestick_ochl(aapl[['Open', 'Close', 'High', 'Low']])
fplt.show()
fplt.background = "#BDB76B"
fplt.odd_plot_background = '#f0f' # purple
fplt.plot(aapl.Close)
fplt.show()
fplt.background = "#B0C4DE"
fplt.candlestick_ochl(aapl[['Open', 'Close', 'High', 'Low']])
fplt.show()
fplt.background = "#fff"
ax, ax2 = fplt.create_plot('Apple MACD', rows=2)
fplt.background = "#fff"
# plot macd with standard colors first
fplt.background = "#fff"
fplt.volume_ocv(aapl[['Open','Close','macd_diff']], ax=ax2, colorfunc=fplt.strength_colorfilter)
fplt.background = "#fff"
fplt.plot(macd, ax=ax2, legend='MACD')
fplt.plot(signal, ax=ax2, legend='Signal')
# change to b/w coloring templates for next plots
fplt.candle_bull_color = fplt.candle_bear_color = '#000'
fplt.volume_bull_color = fplt.volume_bear_color = '#333'
fplt.candle_bull_body_color = fplt.volume_bull_body_color = '#fff'
# plot price and volume
fplt.background = "#fff"
fplt.candlestick_ochl(aapl[['Open','Close','High','Low']], ax=ax)
hover_label = fplt.add_legend('', ax=ax)
axo = ax.overlay()
fplt.volume_ocv(aapl[['Open','Close','Volume']], ax=axo)
fplt.plot(aapl.Volume.ewm(span=24).mean(), ax=axo, color=1)
fplt.show()
# https://pypi.org/project/finplot/
aapl['MACD'] = macd
aapl['MACDsig'] = signal
plt.title(f'MACD chart {symbol}')
plt.plot( aapl['MACD'].fillna(0), label='MACD')
plt.plot( aapl['MACDsig'].fillna(0), label='MACDsig')
plt.plot( aapl['macd_diff'].fillna(0), label='MACDhist')
plt.bar( aapl.index, aapl['macd_diff'].fillna(0), width=0.1, snap=False, label='MACDhist')
plt.legend()
plt.show()
"""
# from numpy import mean, absolute
def mad2(data):
return np.mean(np.abs(data - np.mean(data)))
mad4 = lambda x : np.mean(np.abs(x-np.mean(x)))
# Compute CCI
def cci(Data, lookback=20, wherett ="Tprice", constant=0.015):
# Calculating Typical Price
Data[wherett] = np.mean(Data[['Close', 'High', 'Low']], axis=1)
# Calculating the Absolute Mean Deviation
specimen = Data[wherett]
MAD_Data = pd.Series(specimen)
for i in range(len(Data)):
Data["where1"] = MAD_Data[i - lookback:i].mad()
Data['where1b'] = Data[wherett].rolling(window=20).apply(mad2)
# Data['where1bb'] = Data[wherett].rolling(window=20).map('mad')
# Calculating Mean of Typical Price
Data['where2'] = Data[wherett].rolling(window=20).mean()
# CCI
for i in range(len(Data)):
Data[wherett+ "3"] = (Data[wherett] - Data['where2']) / (constant * Data['where1'])
return Data
# # Calculating Mean of Typical Price
# Data = ma(Data, lookback, where, where + 2)
ddata = aapl.loc[:,['Open', 'Close', 'High', 'Low']]
ccc = cci(ddata)
print(ccc.iloc[:,4:9].tail(), **sp)
ddata["man"] = np.mean(aapl[['Close', 'High', 'Low']], axis=1)
ddata['CCI']= (ddata["man"]-ddata["man"].rolling(20).mean())/(0.015 * ddata["man"].rolling(20).apply(mad4,True))
# ddata["sma20"] = ddata.man.rolling(window=20).mean()
print(ddata.tail())
upper_barrier = 250.0
lower_barrier = -250.0
def signal33(Data, what=8):
Data['buy'], Data['sell']= np.zeros(Data.shape[0]), np.zeros(Data.shape[0])
for i in range(2,len(Data)):
# buy, sell = 11, 12
if Data.iloc[i, what] < lower_barrier and Data.iloc[i - 1, what] > lower_barrier and Data.iloc[i - 2, what] > lower_barrier :
# Data.iloc[i, 11] = 1
Data.iat[i, 11] = 1
if Data.iloc[i, what] > upper_barrier and Data.iloc[i - 1, what] < upper_barrier and Data.iloc[i - 2, what] < upper_barrier :
Data.iloc[i, 12] = -1
return Data
print(ddata.info())
signal33(ddata)
print(ddata.iloc[:,4:].tail(), ddata.shape, **sp)
print(ddata.describe())
plt.plot(ddata.Tprice3)
plt.grid(alpha=0.9)
plt.show()
print(ddata.sum(axis=0)) | [
"[email protected]"
] | |
47cf462efc28a2d3cf998b03dac3d81ebc268a70 | 20b9e875d2701ad198635c495625b49530338b46 | /tzgx_tzxw/tzgx_tzxw/items.py | 7e80ad77f4311a154dd8ed56a02f7c78d8381e56 | [] | no_license | leading/crawler-scrapy | 816339483447fb9c59db4327e5e65e83bde383fb | 06b37be4ce34252c4f3f23b22d9b3634cac57fad | refs/heads/master | 2023-01-03T17:48:34.233613 | 2020-11-03T14:01:49 | 2020-11-03T14:01:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import re
from urllib import parse
from scrapy.item import Field
import scrapy
from scrapy.loader.processors import TakeFirst, MapCompose, Join
class tzgx_tzxwItem(scrapy.Item):
title = scrapy.Field()
source = scrapy.Field()
date = scrapy.Field()
content = scrapy.Field()
website = scrapy.Field()
link = scrapy.Field()
txt = scrapy.Field()
spider_name = scrapy.Field()
module_name = scrapy.Field()
| [
"[email protected]"
] | |
f687ff7c7c299afcc80daf17f334f6524c99faf4 | d6a2d13d5a62c19b4072f26c649aacc8bc0d3309 | /pages/urls.py | 7f7d702a5aca4d0be76f883c50c64cab14328662 | [] | no_license | rifqirosyidi/django-polls | d4894add29063136592f0a788c043f687f416237 | deeb46483344543163bb469d9df9642757025f83 | refs/heads/master | 2023-04-29T09:13:31.524308 | 2021-04-12T09:42:23 | 2021-04-12T09:42:23 | 210,758,484 | 1 | 0 | null | 2023-04-21T20:37:50 | 2019-09-25T04:55:44 | Python | UTF-8 | Python | false | false | 127 | py | from django.urls import path
from . import views
app_name = 'pages'
urlpatterns = [
path('', views.index, name='index')
]
| [
"[email protected]"
] | |
ef5846c17626f8661bc8939b16710bf0b4dbc461 | e8bf00dba3e81081adb37f53a0192bb0ea2ca309 | /domains/explore/problems/training/problem107_EE.py | c88780c7450b5b528861aa509adde0683f99158f | [
"BSD-3-Clause"
] | permissive | patras91/rae_release | 1e6585ee34fe7dbb117b084df982ca8a8aed6795 | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | refs/heads/master | 2023-07-13T20:09:41.762982 | 2021-08-11T17:02:58 | 2021-08-11T17:02:58 | 394,797,515 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | __author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z7']
rv.EDGES = {'base': {'z1': 15, 'z4': 15, 'z5': 35, 'z6': 35, 'z7': 35}, 'z1': {'base': 15, 'z2': 30}, 'z2': {'z1': 30, 'z3': 30}, 'z3': {'z2': 30, 'z4': 30}, 'z4': {'z3': 30, 'base': 15}, 'z5': {'base': 35}, 'z6': {'base': 35}, 'z7': {'base': 35}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 50, 'r1': 80, 'r2': 50}
state.data = { 'UAV': 3, 'r1': 1, 'r2': 3}
state.pos = {'c1': 'base', 'e1': 'r2', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': 'e1', 'UAV': 'o1'}
state.storm = {'active': True}
tasks = {
3: [['doActivities', 'UAV', [['survey', 'z3'], ['survey', 'base']]]],
5: [['doActivities', 'r1', [['survey', 'z1'], ['process', 'z2'], ['process', 'base']]]],
}
eventsEnv = {
} | [
"[email protected]"
] | |
7781ecc296df81052241d659b46eacbafd2217d6 | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/share/pyshared/checkbox/message.py | f81e761880abaeabd4e31698b0696aeda026851d | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,966 | py | #
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
import os
import logging
import itertools
import posixpath
from checkbox.contrib import bpickle
HELD = "h"
BROKEN = "b"
ANCIENT = 1
class Message(dict):
def __init__(self, message, filename):
super(Message, self).__init__(message)
self.filename = filename
class MessageStore(object):
"""A message store which stores its messages in a file system hierarchy."""
def __init__(self, persist, directory, directory_size=1000):
self._directory = directory
self._directory_size = directory_size
self._original_persist = persist
self._persist = persist.root_at("message-store")
message_dir = self._message_dir()
if not posixpath.isdir(message_dir):
os.makedirs(message_dir)
def commit(self):
"""Save metadata to disk."""
self._original_persist.save()
def get_sequence(self):
"""
Get the sequence number of the message that the server expects us to
send on the next exchange.
"""
return self._persist.get("sequence", 0)
def set_sequence(self, number):
"""
Set the sequence number of the message that the server expects us to
send on the next exchange.
"""
self._persist.set("sequence", number)
def get_pending_offset(self):
return self._persist.get("pending_offset", 0)
def set_pending_offset(self, val):
"""
Set the offset into the message pool to consider assigned to the
current sequence number as returned by l{get_sequence}.
"""
self._persist.set("pending_offset", val)
def add_pending_offset(self, val=1):
self.set_pending_offset(self.get_pending_offset() + val)
def remove_pending_offset(self, val=1):
pending_offset = self.get_pending_offset()
if pending_offset - val < 0:
return False
self.set_pending_offset(pending_offset - val)
return True
def count_pending_messages(self):
"""Return the number of pending messages."""
return sum(1 for x in self._walk_pending_messages())
def get_pending_messages(self, max=None):
"""Get any pending messages that aren't being held, up to max."""
messages = []
for filename in self._walk_pending_messages():
if max is not None and len(messages) >= max:
break
try:
message = self._read_message(filename)
except ValueError, e:
logging.exception(e)
self._add_flags(filename, BROKEN)
else:
messages.append(message)
return messages
def set_pending_flags(self, flags):
for filename in self._walk_pending_messages():
self._set_flags(filename, flags)
break
def add_pending_flags(self, flags):
for filename in self._walk_pending_messages():
self._add_flags(filename, flags)
break
def delete_old_messages(self):
"""Delete messages which are unlikely to be needed in the future."""
filenames = self._get_sorted_filenames()
for fn in itertools.islice(self._walk_messages(exclude=HELD+BROKEN),
self.get_pending_offset()):
os.unlink(fn)
containing_dir = posixpath.split(fn)[0]
if not os.listdir(containing_dir):
os.rmdir(containing_dir)
def delete_all_messages(self):
"""Remove ALL stored messages."""
self.set_pending_offset(0)
for filename in self._walk_messages():
os.unlink(filename)
def is_pending(self, message_id):
"""Return bool indicating if C{message_id} still hasn't been delivered.
@param message_id: Identifier returned by the L{add()} method.
"""
i = 0
pending_offset = self.get_pending_offset()
for filename in self._walk_messages(exclude=BROKEN):
flags = self._get_flags(filename)
if ((HELD in flags or i >= pending_offset) and
os.stat(filename).st_ino == message_id):
return True
if BROKEN not in flags and HELD not in flags:
i += 1
return False
def add(self, message):
"""Queue a message for delivery.
@return: message_id, which is an identifier for the added message.
"""
filename = self._get_next_message_filename()
return self._write_message(message, filename)
def update(self, message):
return self._write_message(message)
def _get_next_message_filename(self):
message_dirs = self._get_sorted_filenames()
if message_dirs:
newest_dir = message_dirs[-1]
else:
os.makedirs(self._message_dir("0"))
newest_dir = "0"
message_filenames = self._get_sorted_filenames(newest_dir)
if not message_filenames:
filename = self._message_dir(newest_dir, "0")
elif len(message_filenames) < self._directory_size:
filename = str(int(message_filenames[-1].split("_")[0]) + 1)
filename = self._message_dir(newest_dir, filename)
else:
newest_dir = self._message_dir(str(int(newest_dir) + 1))
os.makedirs(newest_dir)
filename = posixpath.join(newest_dir, "0")
return filename
def _walk_pending_messages(self):
"""Walk the files which are definitely pending."""
pending_offset = self.get_pending_offset()
for i, filename in enumerate(self._walk_messages(exclude=HELD+BROKEN)):
if i >= pending_offset:
yield filename
def _walk_messages(self, exclude=None):
if exclude:
exclude = set(exclude)
message_dirs = self._get_sorted_filenames()
for message_dir in message_dirs:
for filename in self._get_sorted_filenames(message_dir):
flags = set(self._get_flags(filename))
if (not exclude or not exclude & flags):
yield self._message_dir(message_dir, filename)
def _get_sorted_filenames(self, dir=""):
message_files = [x for x in os.listdir(self._message_dir(dir))
if not x.endswith(".tmp")]
message_files = sorted(message_files,
key=lambda x: int(x.split("_")[0]))
return message_files
def _message_dir(self, *args):
return posixpath.join(self._directory, *args)
def _get_content(self, filename):
file = open(filename)
try:
return file.read()
finally:
file.close()
def _get_flags(self, path):
basename = posixpath.basename(path)
if "_" in basename:
return basename.split("_")[1]
return ""
def _set_flags(self, path, flags):
dirname, basename = posixpath.split(path)
new_path = posixpath.join(dirname, basename.split("_")[0])
if flags:
new_path += "_"+"".join(sorted(set(flags)))
os.rename(path, new_path)
return new_path
def _add_flags(self, path, flags):
self._set_flags(path, self._get_flags(path)+flags)
def _load_message(self, data):
return bpickle.loads(data)
def _dump_message(self, message):
return bpickle.dumps(message)
def _read_message(self, filename):
data = self._get_content(filename)
message = self._load_message(data)
return Message(message, filename)
def _write_message(self, message, filename=None):
if filename is None:
filename = message.filename
message_data = self._dump_message(message)
file = open(filename + ".tmp", "w")
file.write(message_data)
file.close()
os.rename(filename + ".tmp", filename)
# For now we use the inode as the message id, as it will work
# correctly even faced with holding/unholding. It will break
# if the store is copied over for some reason, but this shouldn't
# present an issue given the current uses. In the future we
# should have a nice transactional storage (e.g. sqlite) which
# will offer a more strong primary key.
return os.stat(filename).st_ino
def got_next_sequence(message_store, next_sequence):
"""Our peer has told us what it expects our next message's sequence to be.
Call this with the message store and sequence number that the peer
wants next; this will do various things based on what *this* side
has in its outbound queue store.
1. The peer expects a sequence greater than what we last
sent. This is the common case and generally it should be
expecting last_sent_sequence+len(messages_sent)+1.
2. The peer expects a sequence number our side has already sent,
and we no longer have that message. In this case, just send
*all* messages we have, including the previous generation,
starting at the sequence number the peer expects (meaning that
messages have probably been lost).
3. The peer expects a sequence number we already sent, and we
still have that message cached. In this case, we send starting
from that message.
If the next sequence from the server refers to a message older than
we have, then L{ANCIENT} will be returned.
"""
ret = None
old_sequence = message_store.get_sequence()
if next_sequence > old_sequence:
message_store.delete_old_messages()
pending_offset = next_sequence - old_sequence
elif next_sequence < (old_sequence - message_store.get_pending_offset()):
# "Ancient": The other side wants messages we don't have,
# so let's just reset our counter to what it expects.
pending_offset = 0
ret = ANCIENT
else:
# No messages transferred, or
# "Old": We'll try to send these old messages that the
# other side still wants.
pending_offset = (message_store.get_pending_offset() + next_sequence
- old_sequence)
message_store.set_pending_offset(pending_offset)
message_store.set_sequence(next_sequence)
return ret
| [
"[email protected]"
] | |
32204a3385fce81b05f66e16dfef23f0a4512653 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-cloudpipeline/huaweicloudsdkcloudpipeline/v2/cloudpipeline_client.py | daa42b91636e7d7f61ee082bc79f05b30fa09bb9 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,026 | py | # coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class CloudPipelineClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(CloudPipelineClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkcloudpipeline.v2.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "CloudPipelineClient":
raise TypeError("client type error, support client type is CloudPipelineClient")
return ClientBuilder(clazz)
def batch_show_pipelines_status(self, request):
"""批量获取流水线状态
批量获取流水线状态和阶段信息
:param BatchShowPipelinesStatusRequest request
:return: BatchShowPipelinesStatusResponse
"""
return self.batch_show_pipelines_status_with_http_info(request)
def batch_show_pipelines_status_with_http_info(self, request):
"""批量获取流水线状态
批量获取流水线状态和阶段信息
:param BatchShowPipelinesStatusRequest request
:return: BatchShowPipelinesStatusResponse
"""
all_params = ['pipeline_ids', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'pipeline_ids' in local_var_params:
query_params.append(('pipeline_ids', local_var_params['pipeline_ids']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/pipelines/status',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='BatchShowPipelinesStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_pipeline_by_template(self, request):
"""基于模板快速创建流水线及流水线内任务
基于模板快速创建流水线及流水线内任务
:param CreatePipelineByTemplateRequest request
:return: CreatePipelineByTemplateResponse
"""
return self.create_pipeline_by_template_with_http_info(request)
def create_pipeline_by_template_with_http_info(self, request):
"""基于模板快速创建流水线及流水线内任务
基于模板快速创建流水线及流水线内任务
:param CreatePipelineByTemplateRequest request
:return: CreatePipelineByTemplateResponse
"""
all_params = ['create_pipeline_by_template_request_body', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/templates/task',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreatePipelineByTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_pipleine_build_result(self, request):
"""获取项目下流水线执行状况
获取项目下流水线执行状况
:param ListPipleineBuildResultRequest request
:return: ListPipleineBuildResultResponse
"""
return self.list_pipleine_build_result_with_http_info(request)
def list_pipleine_build_result_with_http_info(self, request):
"""获取项目下流水线执行状况
获取项目下流水线执行状况
:param ListPipleineBuildResultRequest request
:return: ListPipleineBuildResultResponse
"""
all_params = ['project_id', 'start_date', 'end_date', 'offset', 'limit', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'project_id' in local_var_params:
query_params.append(('project_id', local_var_params['project_id']))
if 'start_date' in local_var_params:
query_params.append(('start_date', local_var_params['start_date']))
if 'end_date' in local_var_params:
query_params.append(('end_date', local_var_params['end_date']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/pipelines/build-result',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPipleineBuildResultResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_templates(self, request):
"""查询模板列表
查询模板列表,支持分页查询,支持模板名字模糊查询
:param ListTemplatesRequest request
:return: ListTemplatesResponse
"""
return self.list_templates_with_http_info(request)
def list_templates_with_http_info(self, request):
"""查询模板列表
查询模板列表,支持分页查询,支持模板名字模糊查询
:param ListTemplatesRequest request
:return: ListTemplatesResponse
"""
all_params = ['template_type', 'is_build_in', 'x_language', 'offset', 'limit', 'name', 'sort', 'asc']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'template_type' in local_var_params:
query_params.append(('template_type', local_var_params['template_type']))
if 'is_build_in' in local_var_params:
query_params.append(('is_build_in', local_var_params['is_build_in']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort']))
if 'asc' in local_var_params:
query_params.append(('asc', local_var_params['asc']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/templates',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListTemplatesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def register_agent(self, request):
"""register注册Slave接口
注册创建Slave接口
:param RegisterAgentRequest request
:return: RegisterAgentResponse
"""
return self.register_agent_with_http_info(request)
def register_agent_with_http_info(self, request):
"""register注册Slave接口
注册创建Slave接口
:param RegisterAgentRequest request
:return: RegisterAgentResponse
"""
all_params = ['register_agent_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/agentregister/v1/agent/register',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RegisterAgentResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def remove_pipeline(self, request):
"""删除流水线
根据id删除流水线
:param RemovePipelineRequest request
:return: RemovePipelineResponse
"""
return self.remove_pipeline_with_http_info(request)
def remove_pipeline_with_http_info(self, request):
"""删除流水线
根据id删除流水线
:param RemovePipelineRequest request
:return: RemovePipelineResponse
"""
all_params = ['pipeline_id', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'pipeline_id' in local_var_params:
path_params['pipeline_id'] = local_var_params['pipeline_id']
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/pipelines/{pipeline_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RemovePipelineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_agent_status(self, request):
"""Agent状态查询
Agent状态查询
:param ShowAgentStatusRequest request
:return: ShowAgentStatusResponse
"""
return self.show_agent_status_with_http_info(request)
def show_agent_status_with_http_info(self, request):
"""Agent状态查询
Agent状态查询
:param ShowAgentStatusRequest request
:return: ShowAgentStatusResponse
"""
all_params = ['agent_id', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agent_id' in local_var_params:
path_params['agent_id'] = local_var_params['agent_id']
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v1/agents/{agent_id}/status',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAgentStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_instance_status(self, request):
"""检查流水线创建状态
检查流水线创建状态
:param ShowInstanceStatusRequest request
:return: ShowInstanceStatusResponse
"""
return self.show_instance_status_with_http_info(request)
def show_instance_status_with_http_info(self, request):
"""检查流水线创建状态
检查流水线创建状态
:param ShowInstanceStatusRequest request
:return: ShowInstanceStatusResponse
"""
all_params = ['task_id', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'task_id' in local_var_params:
path_params['task_id'] = local_var_params['task_id']
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/templates/{task_id}/status',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowInstanceStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_pipleine_status(self, request):
"""获取流水线状态
获取流水线状态,阶段及任务信息
:param ShowPipleineStatusRequest request
:return: ShowPipleineStatusResponse
"""
return self.show_pipleine_status_with_http_info(request)
def show_pipleine_status_with_http_info(self, request):
"""获取流水线状态
获取流水线状态,阶段及任务信息
:param ShowPipleineStatusRequest request
:return: ShowPipleineStatusResponse
"""
all_params = ['pipeline_id', 'x_language', 'build_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'pipeline_id' in local_var_params:
path_params['pipeline_id'] = local_var_params['pipeline_id']
query_params = []
if 'build_id' in local_var_params:
query_params.append(('build_id', local_var_params['build_id']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/pipelines/{pipeline_id}/status',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPipleineStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_template_detail(self, request):
"""查询模板详情
查询模板详情
:param ShowTemplateDetailRequest request
:return: ShowTemplateDetailResponse
"""
return self.show_template_detail_with_http_info(request)
def show_template_detail_with_http_info(self, request):
"""查询模板详情
查询模板详情
:param ShowTemplateDetailRequest request
:return: ShowTemplateDetailResponse
"""
all_params = ['template_id', 'template_type', 'x_language', 'source']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'template_id' in local_var_params:
path_params['template_id'] = local_var_params['template_id']
query_params = []
if 'template_type' in local_var_params:
query_params.append(('template_type', local_var_params['template_type']))
if 'source' in local_var_params:
query_params.append(('source', local_var_params['source']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/templates/{template_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowTemplateDetailResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def start_new_pipeline(self, request):
"""启动流水线
启动流水线
:param StartNewPipelineRequest request
:return: StartNewPipelineResponse
"""
return self.start_new_pipeline_with_http_info(request)
def start_new_pipeline_with_http_info(self, request):
"""启动流水线
启动流水线
:param StartNewPipelineRequest request
:return: StartNewPipelineResponse
"""
all_params = ['pipeline_id', 'x_language', 'start_new_pipeline_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'pipeline_id' in local_var_params:
path_params['pipeline_id'] = local_var_params['pipeline_id']
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/pipelines/{pipeline_id}/start',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='StartNewPipelineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def stop_pipeline_new(self, request):
"""停止流水线
停止流水线
:param StopPipelineNewRequest request
:return: StopPipelineNewResponse
"""
return self.stop_pipeline_new_with_http_info(request)
def stop_pipeline_new_with_http_info(self, request):
"""停止流水线
停止流水线
:param StopPipelineNewRequest request
:return: StopPipelineNewResponse
"""
all_params = ['pipeline_id', 'build_id', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'pipeline_id' in local_var_params:
path_params['pipeline_id'] = local_var_params['pipeline_id']
query_params = []
if 'build_id' in local_var_params:
query_params.append(('build_id', local_var_params['build_id']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = ['apig-auth-iam']
return self.call_api(
resource_path='/v3/pipelines/{pipeline_id}/stop',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='StopPipelineNewResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| [
"[email protected]"
] | |
44100cd8a56941562abc28b6cc540426d38ee2b7 | 7faec297f7dc533e883ba10e930a8b322db0069c | /src/mercury/log_service/service.py | 4f552ac953476dd9df5ca6d0987ad9a435bde444 | [
"Apache-2.0"
] | permissive | SovietPanda/mercury | 50c8bc1411a15a2c4d6f1d0373c072f034895192 | 3a6a16e5b176246f1df077df46249463e20736f2 | refs/heads/master | 2020-03-27T00:19:27.909490 | 2018-08-10T15:36:40 | 2018-08-10T15:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py | import logging
import time
from mercury.common.configuration import MercuryConfiguration
from mercury.common.exceptions import MercuryClientException
from mercury.common.mongo import get_collection, get_connection
from mercury.common.transport import SimpleRouterReqService
LOG = logging.getLogger(__name__)
MERCURY_LOG_CONFIG = 'mercury-log.yaml'
def options():
configuration = MercuryConfiguration(
'mercury-log',
MERCURY_LOG_CONFIG,
description='The mercury logging backend')
configuration.add_option('log_service.bind_address',
default='tcp://127.0.0.1:9006',
help_string='The address to bind to'
)
configuration.add_option('log_service.db.servers',
default='127.0.0.1:27017',
special_type=list,
help_string='Server or coma separated list of '
'servers to connect to')
configuration.add_option('log_service.db.name',
config_address='log_service.db.name',
default='test',
help_string='The database for our collections')
configuration.add_option('log_service.db.collection',
default='log',
help_string='The collection for our documents')
configuration.add_option('log_service.db.replica_name',
help_string='An optional replica')
return configuration.scan_options()
class AgentLogService(SimpleRouterReqService):
"""
Logging aggregation end point for MercuryAgents
"""
def __init__(self, bind_address, log_collection):
super(AgentLogService, self).__init__(bind_address)
self.log_collection = log_collection
@staticmethod
def validate_message(message):
LOG.debug(message)
required = [
'levelno',
'pathname',
'message',
'name'
]
for req in required:
if req not in message:
return False
return True
@staticmethod
def set_job_info_from_thread(message):
"""
The task runner thread (agent.task_runner) has the following naming
convention:
_<job_id>_<task_id>
This lets us associate logging messages to jobs/tasks from within the
execution thread.
:param message: reference to the log message
:return: None
"""
thread_name = message.get('threadName')
if thread_name and thread_name[0] == '_':
job_id, task_id = thread_name.split('_')[1:]
message['job_id'] = job_id
message['task_id'] = task_id
def process(self, message):
if not self.validate_message(message):
raise MercuryClientException('Invalid message')
message.update({'time_created': time.time()})
self.set_job_info_from_thread(message)
self.log_collection.insert(message)
return {'message': 'ok', 'error': False}
def main():
config = options()
logging.basicConfig(level=logging.getLevelName(config.logging.level),
format=config.logging.format)
db_connection = get_connection(config.log_service.db.servers,
config.log_service.db.replica_name)
collection = get_collection(config.log_service.db.name,
config.log_service.db.collection,
db_connection)
agent_log_service = AgentLogService(config.log_service.bind_address,
collection)
LOG.info('Starting logging backend on {}'.format(
config.log_service.bind_address))
agent_log_service.start()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
01c8bff9cacd76f8143b112377aaff0552c81f94 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/73/usersdata/174/37158/submittedfiles/triangulo.py | f7f57035688a7678ea328f2c23e7fbde5c853f11 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
import math
a = float(input('Comprimento A:'))
b = float(input('Comprimento B:'))
c = float(input('Comprimento C:'))
if a>=b>=c>0 and a<(b+c):
print('S')
if (a**2)==(b**2)+(c**2):
print ('Re')
elif (a**2)>(b**2)+(c**2):
print ('Ob')
elif (a**2)<(b**2)+(c**2):
print ('Ac')
elif a == b == c:
print ('Eq')
elif (b == c)!=a:
print('Is')
elif a!=b!=c:
print ('Es')
else:
print ('N')
| [
"[email protected]"
] | |
3a42de5559607d37d3040ba6ae5a378abcc45257 | 3591ab22e1cc0fc1362f909017a8aa5c2b53bd92 | /FundNavSpiders/LeiGenFund.py | 61a013e9b179634dde51908a984a1dffba1e555d | [] | no_license | Wnltc/ggscrapy | ef7e9559ce6140e7147f539778e25fc7f6cbee4c | bf929112e14b875a583803fe92980fe67129bdac | refs/heads/master | 2023-03-15T22:00:45.377540 | 2018-06-06T02:19:14 | 2018-06-06T02:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | from FundNavSpiders import GGFundNavItem
from FundNavSpiders import GGFundNavSpider
import json
from datetime import date, datetime
class LeiGenFundSpider(GGFundNavSpider):
name = 'FundNav_LeiGenFund'
sitename = '上海雷根资产'
channel = '投顾净值'
fps = [
{
'url': 'http://m.reganfund.com/weChat/fundQueryNew',
'body': "{'fundInfo': '', 'top100': '1', 'tagIds': ''}",
'headers': {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json; charset=UTF-8'
}
}
]
def parse_fund(self, response):
funds = json.loads(response.text)['resultList']
for fund in funds:
if fund['fundName'] == '雷根9号基金':
fund_code = 'S33704'
else:
fund_code = fund['fundCode']
body = json.dumps(
{"fundCode": fund_code, "startDate": "2001-01-01", "endDate": date.isoformat(datetime.now())})
self.ips.append({
'url': 'http://m.reganfund.com/weChat/dataOverview',
'body': body,
'headers': {'Content-Type': 'application/json; charset=UTF-8'}
})
def parse_item(self, response):
rows = json.loads(response.text)['result']['value']
fund_name = json.loads(response.text)['fundName']
for row in rows:
item = GGFundNavItem()
item['sitename'] = self.sitename
item['channel'] = self.channel
item['url'] = 'http://www.reganfund.com/product.html'
item['fund_name'] = fund_name
statistic_date = row['tradingDate']
item['statistic_date'] = datetime.strptime(statistic_date, '%Y-%m-%d')
nav = row['nav']
item['nav'] = float(nav) if nav is not None else None
added_nav = row['nav']
item['added_nav'] = float(added_nav) if added_nav is not None else None
yield item
| [
"[email protected]"
] | |
a13241bdf8a7f6673b13b51d99ce11ac7f5815e0 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/memcache/v1beta2/memcache-v1beta2-py/tests/unit/gapic/memcache_v1beta2/test_cloud_memcache.py | 6dae41f8cdc2469b105c660bd19d5e0700e7fedf | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120,536 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.memcache_v1beta2.services.cloud_memcache import CloudMemcacheAsyncClient
from google.cloud.memcache_v1beta2.services.cloud_memcache import CloudMemcacheClient
from google.cloud.memcache_v1beta2.services.cloud_memcache import pagers
from google.cloud.memcache_v1beta2.services.cloud_memcache import transports
from google.cloud.memcache_v1beta2.services.cloud_memcache.transports.base import _API_CORE_VERSION
from google.cloud.memcache_v1beta2.services.cloud_memcache.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CloudMemcacheClient._get_default_mtls_endpoint(None) is None
assert CloudMemcacheClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CloudMemcacheClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CloudMemcacheClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CloudMemcacheClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CloudMemcacheClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
CloudMemcacheClient,
CloudMemcacheAsyncClient,
])
def test_cloud_memcache_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'memcache.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
CloudMemcacheClient,
CloudMemcacheAsyncClient,
])
def test_cloud_memcache_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'memcache.googleapis.com:443'
def test_cloud_memcache_client_get_transport_class():
transport = CloudMemcacheClient.get_transport_class()
available_transports = [
transports.CloudMemcacheGrpcTransport,
]
assert transport in available_transports
transport = CloudMemcacheClient.get_transport_class("grpc")
assert transport == transports.CloudMemcacheGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc"),
(CloudMemcacheAsyncClient, transports.CloudMemcacheGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(CloudMemcacheClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudMemcacheClient))
@mock.patch.object(CloudMemcacheAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudMemcacheAsyncClient))
def test_cloud_memcache_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CloudMemcacheClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CloudMemcacheClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc", "true"),
(CloudMemcacheAsyncClient, transports.CloudMemcacheGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc", "false"),
(CloudMemcacheAsyncClient, transports.CloudMemcacheGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(CloudMemcacheClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudMemcacheClient))
@mock.patch.object(CloudMemcacheAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudMemcacheAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cloud_memcache_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc"),
(CloudMemcacheAsyncClient, transports.CloudMemcacheGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_cloud_memcache_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc"),
(CloudMemcacheAsyncClient, transports.CloudMemcacheGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_cloud_memcache_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_cloud_memcache_client_client_options_from_dict():
with mock.patch('google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CloudMemcacheClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_instances(transport: str = 'grpc', request_type=cloud_memcache.ListInstancesRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_memcache.ListInstancesResponse(
next_page_token='next_page_token_value',
unreachable=['unreachable_value'],
)
response = client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstancesPager)
assert response.next_page_token == 'next_page_token_value'
assert response.unreachable == ['unreachable_value']
def test_list_instances_from_dict():
test_list_instances(request_type=dict)
def test_list_instances_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
client.list_instances()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ListInstancesRequest()
@pytest.mark.asyncio
async def test_list_instances_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.ListInstancesRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_memcache.ListInstancesResponse(
next_page_token='next_page_token_value',
unreachable=['unreachable_value'],
))
response = await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInstancesAsyncPager)
assert response.next_page_token == 'next_page_token_value'
assert response.unreachable == ['unreachable_value']
@pytest.mark.asyncio
async def test_list_instances_async_from_dict():
await test_list_instances_async(request_type=dict)
def test_list_instances_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.ListInstancesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
call.return_value = cloud_memcache.ListInstancesResponse()
client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_instances_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.ListInstancesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_memcache.ListInstancesResponse())
await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_instances_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_memcache.ListInstancesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_instances(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_instances_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_instances(
cloud_memcache.ListInstancesRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_instances_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_memcache.ListInstancesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_memcache.ListInstancesResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_instances(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_instances_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_instances(
cloud_memcache.ListInstancesRequest(),
parent='parent_value',
)
def test_list_instances_pager():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
next_page_token='abc',
),
cloud_memcache.ListInstancesResponse(
resources=[],
next_page_token='def',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
],
next_page_token='ghi',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_instances(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_memcache.Instance)
for i in results)
def test_list_instances_pages():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
next_page_token='abc',
),
cloud_memcache.ListInstancesResponse(
resources=[],
next_page_token='def',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
],
next_page_token='ghi',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
),
RuntimeError,
)
pages = list(client.list_instances(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_instances_async_pager():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
next_page_token='abc',
),
cloud_memcache.ListInstancesResponse(
resources=[],
next_page_token='def',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
],
next_page_token='ghi',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
),
RuntimeError,
)
async_pager = await client.list_instances(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_memcache.Instance)
for i in responses)
@pytest.mark.asyncio
async def test_list_instances_async_pages():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_instances),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
next_page_token='abc',
),
cloud_memcache.ListInstancesResponse(
resources=[],
next_page_token='def',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
],
next_page_token='ghi',
),
cloud_memcache.ListInstancesResponse(
resources=[
cloud_memcache.Instance(),
cloud_memcache.Instance(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_instances(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_instance(transport: str = 'grpc', request_type=cloud_memcache.GetInstanceRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_memcache.Instance(
name='name_value',
display_name='display_name_value',
authorized_network='authorized_network_value',
zones=['zones_value'],
node_count=1070,
memcache_version=cloud_memcache.MemcacheVersion.MEMCACHE_1_5,
state=cloud_memcache.Instance.State.CREATING,
memcache_full_version='memcache_full_version_value',
discovery_endpoint='discovery_endpoint_value',
update_available=True,
)
response = client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_memcache.Instance)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.authorized_network == 'authorized_network_value'
assert response.zones == ['zones_value']
assert response.node_count == 1070
assert response.memcache_version == cloud_memcache.MemcacheVersion.MEMCACHE_1_5
assert response.state == cloud_memcache.Instance.State.CREATING
assert response.memcache_full_version == 'memcache_full_version_value'
assert response.discovery_endpoint == 'discovery_endpoint_value'
assert response.update_available is True
def test_get_instance_from_dict():
test_get_instance(request_type=dict)
def test_get_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
client.get_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.GetInstanceRequest()
@pytest.mark.asyncio
async def test_get_instance_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.GetInstanceRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_memcache.Instance(
name='name_value',
display_name='display_name_value',
authorized_network='authorized_network_value',
zones=['zones_value'],
node_count=1070,
memcache_version=cloud_memcache.MemcacheVersion.MEMCACHE_1_5,
state=cloud_memcache.Instance.State.CREATING,
memcache_full_version='memcache_full_version_value',
discovery_endpoint='discovery_endpoint_value',
update_available=True,
))
response = await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_memcache.Instance)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.authorized_network == 'authorized_network_value'
assert response.zones == ['zones_value']
assert response.node_count == 1070
assert response.memcache_version == cloud_memcache.MemcacheVersion.MEMCACHE_1_5
assert response.state == cloud_memcache.Instance.State.CREATING
assert response.memcache_full_version == 'memcache_full_version_value'
assert response.discovery_endpoint == 'discovery_endpoint_value'
assert response.update_available is True
@pytest.mark.asyncio
async def test_get_instance_async_from_dict():
await test_get_instance_async(request_type=dict)
def test_get_instance_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.GetInstanceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
call.return_value = cloud_memcache.Instance()
client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_instance_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.GetInstanceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_memcache.Instance())
await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_instance_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_memcache.Instance()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_instance(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_instance_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_instance(
cloud_memcache.GetInstanceRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_instance_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_memcache.Instance()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloud_memcache.Instance())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_instance(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_instance_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_instance(
cloud_memcache.GetInstanceRequest(),
name='name_value',
)
def test_create_instance(transport: str = 'grpc', request_type=cloud_memcache.CreateInstanceRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_instance_from_dict():
test_create_instance(request_type=dict)
def test_create_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
client.create_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.CreateInstanceRequest()
@pytest.mark.asyncio
async def test_create_instance_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.CreateInstanceRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_instance_async_from_dict():
await test_create_instance_async(request_type=dict)
def test_create_instance_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.CreateInstanceRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_instance_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.CreateInstanceRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_instance_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_instance(
parent='parent_value',
instance_id='instance_id_value',
resource=cloud_memcache.Instance(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].instance_id == 'instance_id_value'
assert args[0].resource == cloud_memcache.Instance(name='name_value')
def test_create_instance_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_instance(
cloud_memcache.CreateInstanceRequest(),
parent='parent_value',
instance_id='instance_id_value',
resource=cloud_memcache.Instance(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_instance_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_instance(
parent='parent_value',
instance_id='instance_id_value',
resource=cloud_memcache.Instance(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].instance_id == 'instance_id_value'
assert args[0].resource == cloud_memcache.Instance(name='name_value')
@pytest.mark.asyncio
async def test_create_instance_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_instance(
cloud_memcache.CreateInstanceRequest(),
parent='parent_value',
instance_id='instance_id_value',
resource=cloud_memcache.Instance(name='name_value'),
)
def test_update_instance(transport: str = 'grpc', request_type=cloud_memcache.UpdateInstanceRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.UpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_instance_from_dict():
test_update_instance(request_type=dict)
def test_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
client.update_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.UpdateInstanceRequest()
@pytest.mark.asyncio
async def test_update_instance_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.UpdateInstanceRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.UpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_instance_async_from_dict():
await test_update_instance_async(request_type=dict)
def test_update_instance_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.UpdateInstanceRequest()
request.resource.name = 'resource.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource.name=resource.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_instance_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.UpdateInstanceRequest()
request.resource.name = 'resource.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource.name=resource.name/value',
) in kw['metadata']
def test_update_instance_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_instance(
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
resource=cloud_memcache.Instance(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
assert args[0].resource == cloud_memcache.Instance(name='name_value')
def test_update_instance_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_instance(
cloud_memcache.UpdateInstanceRequest(),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
resource=cloud_memcache.Instance(name='name_value'),
)
@pytest.mark.asyncio
async def test_update_instance_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_instance(
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
resource=cloud_memcache.Instance(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
assert args[0].resource == cloud_memcache.Instance(name='name_value')
@pytest.mark.asyncio
async def test_update_instance_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_instance(
cloud_memcache.UpdateInstanceRequest(),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
resource=cloud_memcache.Instance(name='name_value'),
)
def test_update_parameters(transport: str = 'grpc', request_type=cloud_memcache.UpdateParametersRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.UpdateParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_parameters_from_dict():
test_update_parameters(request_type=dict)
def test_update_parameters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
client.update_parameters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.UpdateParametersRequest()
@pytest.mark.asyncio
async def test_update_parameters_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.UpdateParametersRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.UpdateParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_parameters_async_from_dict():
await test_update_parameters_async(request_type=dict)
def test_update_parameters_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.UpdateParametersRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_parameters_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.UpdateParametersRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_update_parameters_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_parameters(
name='name_value',
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
parameters=cloud_memcache.MemcacheParameters(id='id_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
assert args[0].parameters == cloud_memcache.MemcacheParameters(id='id_value')
def test_update_parameters_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_parameters(
cloud_memcache.UpdateParametersRequest(),
name='name_value',
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
parameters=cloud_memcache.MemcacheParameters(id='id_value'),
)
@pytest.mark.asyncio
async def test_update_parameters_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_parameters(
name='name_value',
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
parameters=cloud_memcache.MemcacheParameters(id='id_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
assert args[0].parameters == cloud_memcache.MemcacheParameters(id='id_value')
@pytest.mark.asyncio
async def test_update_parameters_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_parameters(
cloud_memcache.UpdateParametersRequest(),
name='name_value',
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
parameters=cloud_memcache.MemcacheParameters(id='id_value'),
)
def test_delete_instance(transport: str = 'grpc', request_type=cloud_memcache.DeleteInstanceRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_instance_from_dict():
test_delete_instance(request_type=dict)
def test_delete_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
client.delete_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.DeleteInstanceRequest()
@pytest.mark.asyncio
async def test_delete_instance_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.DeleteInstanceRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_instance_async_from_dict():
await test_delete_instance_async(request_type=dict)
def test_delete_instance_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.DeleteInstanceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_instance_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.DeleteInstanceRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_instance_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_instance(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_instance_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_instance(
cloud_memcache.DeleteInstanceRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_instance_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_instance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_instance(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_instance_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_instance(
cloud_memcache.DeleteInstanceRequest(),
name='name_value',
)
def test_apply_parameters(transport: str = 'grpc', request_type=cloud_memcache.ApplyParametersRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.apply_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ApplyParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_apply_parameters_from_dict():
test_apply_parameters(request_type=dict)
def test_apply_parameters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
client.apply_parameters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ApplyParametersRequest()
@pytest.mark.asyncio
async def test_apply_parameters_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.ApplyParametersRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.apply_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ApplyParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_apply_parameters_async_from_dict():
await test_apply_parameters_async(request_type=dict)
def test_apply_parameters_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.ApplyParametersRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.apply_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_apply_parameters_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.ApplyParametersRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.apply_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_apply_parameters_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.apply_parameters(
name='name_value',
node_ids=['node_ids_value'],
apply_all=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].node_ids == ['node_ids_value']
assert args[0].apply_all == True
def test_apply_parameters_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.apply_parameters(
cloud_memcache.ApplyParametersRequest(),
name='name_value',
node_ids=['node_ids_value'],
apply_all=True,
)
@pytest.mark.asyncio
async def test_apply_parameters_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.apply_parameters(
name='name_value',
node_ids=['node_ids_value'],
apply_all=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].node_ids == ['node_ids_value']
assert args[0].apply_all == True
@pytest.mark.asyncio
async def test_apply_parameters_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.apply_parameters(
cloud_memcache.ApplyParametersRequest(),
name='name_value',
node_ids=['node_ids_value'],
apply_all=True,
)
def test_apply_software_update(transport: str = 'grpc', request_type=cloud_memcache.ApplySoftwareUpdateRequest):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.apply_software_update(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ApplySoftwareUpdateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_apply_software_update_from_dict():
test_apply_software_update(request_type=dict)
def test_apply_software_update_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
client.apply_software_update()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ApplySoftwareUpdateRequest()
@pytest.mark.asyncio
async def test_apply_software_update_async(transport: str = 'grpc_asyncio', request_type=cloud_memcache.ApplySoftwareUpdateRequest):
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.apply_software_update(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_memcache.ApplySoftwareUpdateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_apply_software_update_async_from_dict():
await test_apply_software_update_async(request_type=dict)
def test_apply_software_update_field_headers():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.ApplySoftwareUpdateRequest()
request.instance = 'instance/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.apply_software_update(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'instance=instance/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_apply_software_update_field_headers_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_memcache.ApplySoftwareUpdateRequest()
request.instance = 'instance/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.apply_software_update(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'instance=instance/value',
) in kw['metadata']
def test_apply_software_update_flattened():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.apply_software_update(
instance='instance_value',
node_ids=['node_ids_value'],
apply_all=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].instance == 'instance_value'
assert args[0].node_ids == ['node_ids_value']
assert args[0].apply_all == True
def test_apply_software_update_flattened_error():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.apply_software_update(
cloud_memcache.ApplySoftwareUpdateRequest(),
instance='instance_value',
node_ids=['node_ids_value'],
apply_all=True,
)
@pytest.mark.asyncio
async def test_apply_software_update_flattened_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.apply_software_update),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.apply_software_update(
instance='instance_value',
node_ids=['node_ids_value'],
apply_all=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].instance == 'instance_value'
assert args[0].node_ids == ['node_ids_value']
assert args[0].apply_all == True
@pytest.mark.asyncio
async def test_apply_software_update_flattened_error_async():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.apply_software_update(
cloud_memcache.ApplySoftwareUpdateRequest(),
instance='instance_value',
node_ids=['node_ids_value'],
apply_all=True,
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CloudMemcacheGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CloudMemcacheGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudMemcacheClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.CloudMemcacheGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudMemcacheClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudMemcacheGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CloudMemcacheClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudMemcacheGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CloudMemcacheGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.CloudMemcacheGrpcTransport,
transports.CloudMemcacheGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CloudMemcacheGrpcTransport,
)
def test_cloud_memcache_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CloudMemcacheTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_cloud_memcache_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CloudMemcacheTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'list_instances',
'get_instance',
'create_instance',
'update_instance',
'update_parameters',
'delete_instance',
'apply_parameters',
'apply_software_update',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_cloud_memcache_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudMemcacheTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_cloud_memcache_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudMemcacheTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_cloud_memcache_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudMemcacheTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_cloud_memcache_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudMemcacheClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_cloud_memcache_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudMemcacheClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudMemcacheGrpcTransport,
transports.CloudMemcacheGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_cloud_memcache_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudMemcacheGrpcTransport,
transports.CloudMemcacheGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_cloud_memcache_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudMemcacheGrpcTransport, grpc_helpers),
(transports.CloudMemcacheGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_gte_1_26_0
def test_cloud_memcache_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"memcache.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="memcache.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudMemcacheGrpcTransport, grpc_helpers),
(transports.CloudMemcacheGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_cloud_memcache_transport_create_channel_old_api_core(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"memcache.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudMemcacheGrpcTransport, grpc_helpers),
(transports.CloudMemcacheGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_cloud_memcache_transport_create_channel_user_scopes(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"memcache.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.CloudMemcacheGrpcTransport, transports.CloudMemcacheGrpcAsyncIOTransport])
def test_cloud_memcache_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_cloud_memcache_host_no_port():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='memcache.googleapis.com'),
)
assert client.transport._host == 'memcache.googleapis.com:443'
def test_cloud_memcache_host_with_port():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='memcache.googleapis.com:8000'),
)
assert client.transport._host == 'memcache.googleapis.com:8000'
def test_cloud_memcache_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudMemcacheGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cloud_memcache_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudMemcacheGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.CloudMemcacheGrpcTransport, transports.CloudMemcacheGrpcAsyncIOTransport])
def test_cloud_memcache_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.CloudMemcacheGrpcTransport, transports.CloudMemcacheGrpcAsyncIOTransport])
def test_cloud_memcache_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_cloud_memcache_grpc_lro_client():
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_cloud_memcache_grpc_lro_async_client():
client = CloudMemcacheAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc_asyncio',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_instance_path():
project = "squid"
location = "clam"
instance = "whelk"
expected = "projects/{project}/locations/{location}/instances/{instance}".format(project=project, location=location, instance=instance, )
actual = CloudMemcacheClient.instance_path(project, location, instance)
assert expected == actual
def test_parse_instance_path():
expected = {
"project": "octopus",
"location": "oyster",
"instance": "nudibranch",
}
path = CloudMemcacheClient.instance_path(**expected)
# Check that the path construction is reversible.
actual = CloudMemcacheClient.parse_instance_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CloudMemcacheClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = CloudMemcacheClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudMemcacheClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder, )
actual = CloudMemcacheClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = CloudMemcacheClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CloudMemcacheClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization, )
actual = CloudMemcacheClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = CloudMemcacheClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CloudMemcacheClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project, )
actual = CloudMemcacheClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = CloudMemcacheClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CloudMemcacheClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CloudMemcacheClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = CloudMemcacheClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CloudMemcacheClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CloudMemcacheTransport, '_prep_wrapped_messages') as prep:
client = CloudMemcacheClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CloudMemcacheTransport, '_prep_wrapped_messages') as prep:
transport_class = CloudMemcacheClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
18b2aa782fb799a67516b52b792295ede0f6227a | 4a020c0a492d931f7da5c452c9569fba06703686 | /testing/web-platform/tests/tools/wptserve/wptserve/constants.py | a5a2f76445b270513df3377699efbbf93a7fe34c | [
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rbernon/wine-gecko | 353173511a790127ffa2ad39d630b8a0dcbbf5bf | 550ad9eac229b769992f421ce9492ca46edabaa0 | refs/heads/master | 2023-08-06T21:25:26.836672 | 2020-11-30T12:47:56 | 2021-09-30T08:14:19 | 411,965,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | from . import utils
content_types = utils.invert_dict({"text/html": ["htm", "html"],
"application/json": ["json"],
"application/xhtml+xml": ["xht", "xhtm", "xhtml"],
"application/xml": ["xml"],
"application/x-xpinstall": ["xpi"],
"text/javascript": ["js"],
"text/css": ["css"],
"text/plain": ["txt", "md"],
"image/svg+xml": ["svg"],
"image/gif": ["gif"],
"image/jpeg": ["jpg", "jpeg"],
"image/png": ["png"],
"image/bmp": ["bmp"],
"text/event-stream": ["event_stream"],
"text/cache-manifest": ["manifest"],
"video/mp4": ["mp4", "m4v"],
"audio/mp4": ["m4a"],
"audio/mpeg": ["mp3"],
"video/webm": ["webm"],
"audio/webm": ["weba"],
"video/ogg": ["ogg", "ogv"],
"audio/ogg": ["oga"],
"audio/x-wav": ["wav"],
"text/vtt": ["vtt"],})
response_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
| [
"[email protected]"
] | |
7034296f573c9cad883dc4d3d04c799c7699ccb5 | 4a1a3375b24a44be6c2926eb3dd2c18d8c23ebf9 | /test/nn/test_utils.py | af0081ccf4dd7233202326e6342a33311ea0286a | [
"MIT"
] | permissive | sufeidechabei/pytorch_geometric | a4a48339f4f115bee2973bb49ead71ee595d7ed3 | 3b478b2f9721f35cd1c93f7f8592691ad8f8a54d | refs/heads/master | 2020-04-13T19:14:50.187401 | 2018-12-28T09:04:09 | 2018-12-28T09:04:09 | 163,396,833 | 1 | 0 | null | 2018-12-28T10:16:45 | 2018-12-28T10:16:45 | null | UTF-8 | Python | false | false | 1,398 | py | import torch
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn.repeat import repeat
from torch_geometric.nn.inits import uniform, glorot, zeros, ones, reset
from torch_geometric.nn.reshape import Reshape
def test_repeat():
assert repeat(None, length=4) is None
assert repeat(4, length=4) == [4, 4, 4, 4]
assert repeat([2, 3, 4], length=4) == [2, 3, 4, 4]
assert repeat([1, 2, 3, 4], length=4) == [1, 2, 3, 4]
assert repeat([1, 2, 3, 4, 5], length=4) == [1, 2, 3, 4]
def test_inits():
x = torch.empty(1, 4)
uniform(size=4, tensor=x)
assert x.min() >= -0.5
assert x.max() <= 0.5
glorot(x)
assert x.min() >= -1.25
assert x.max() <= 1.25
zeros(x)
assert x.tolist() == [[0, 0, 0, 0]]
ones(x)
assert x.tolist() == [[1, 1, 1, 1]]
def test_reset():
nn = Linear(16, 16)
w = nn.weight.clone()
reset(nn)
assert not nn.weight.tolist() == w.tolist()
nn = Sequential(Linear(16, 16), ReLU(), Linear(16, 16))
w_1, w_2 = nn[0].weight.clone(), nn[2].weight.clone()
reset(nn)
assert not nn[0].weight.tolist() == w_1.tolist()
assert not nn[2].weight.tolist() == w_2.tolist()
def test_reshape():
x = torch.randn(10, 4)
op = Reshape(5, 2, 4)
assert op.__repr__() == 'Reshape(5, 2, 4)'
assert op(x).size() == (5, 2, 4)
assert op(x).view(10, 4).tolist() == x.tolist()
| [
"[email protected]"
] | |
8ebf7364f88d1f1fabd25cd184b44c4a4725b1ba | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2431/60668/302927.py | fde0d78fce890f17b957f82f0b5117e9cd2288ca | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | def tree_29_wide(s,k):
if s=="0 100":
if k=="150 750":
print(212.13,end='')
elif k=="0 1000":
print(291.55,end='')
else:
print(k)
else:print(s)
if __name__=='__main__':
m,n = input().split()
s = input()
k = input()
l = input()
ii = input()
tree_29_wide(s,ii) | [
"[email protected]"
] | |
4e34bf3f441b315b9c80825f31985e81b6195334 | 6fffacd69b4f642015520d6a5da079466d32bc1d | /carts/migrations/0043_auto_20191108_2058.py | 5efbcb1d8e5f4dc435f175c9059b8010e7b07898 | [] | no_license | imroon1/tost | 8a690f5ee446a11e3f8f653a5ca26233192dd450 | 7c95ec7a9a42bd842e4292d73cf9b0b6878f7ecb | refs/heads/master | 2023-04-19T16:36:50.346763 | 2021-05-02T20:52:51 | 2021-05-02T20:52:51 | 363,496,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2019-11-08 13:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0042_auto_20191108_2050'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='products',
field=models.ManyToManyField(blank=True, to='products.Product'),
),
]
| [
"[email protected]"
] | |
a520159790da1a022179186669023ec105abbdc6 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/sphinx-click/test.py | fa3a3b271b1591d4f29a2c1955f2b59d29285eea | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | import sphinx_click
| [
"[email protected]"
] | |
30115bbe8fa2933607723a3ce6b73ae268df8cde | 4a3e4c20cc7de2e80ed038bbfe9b359ba75daf07 | /configs/RS-data/VHR10/Faster-RCNN/CBAM/cbam-r101-Backbone-R32.py | 21195a128ff7eb1db6b308a335ce55155d04adec | [
"Apache-2.0"
] | permissive | CLIFF-BOT/mmdetection-1.0 | b0b4f9b5586476f96ef55cf68373110f28e503a7 | a16a7c6a8f0bf029100b85f0bd1b64093e7809af | refs/heads/master | 2023-01-02T07:02:35.496174 | 2020-10-29T03:26:18 | 2020-10-29T03:26:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,654 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
attention=dict(
type='CBAM',
inplanes=256,
reduction=32,
bias_c=True,
bias_s=True,
kernel_size=7)),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=11,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=-1)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'VHR10Dataset'
data_root = 'data/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'VHR10/ImageSets/Main/trainval.txt',
img_prefix=data_root + 'VHR10/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'VHR10/ImageSets/Main/test.txt',
img_prefix=data_root + 'VHR10/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'VHR10/ImageSets/Main/test.txt',
img_prefix=data_root + 'VHR10/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=500,
# warmup_ratio=1.0 / 3,
# step=[8, 11])
lr_config = dict(policy='step', step=[8, 11])
checkpoint_config = dict(interval=1)
evaluation = dict(interval=12)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
find_unused_parameters=True
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
5ef229b7e8ffe1fd6e506214f4574a932bdf561c | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /CondTools/IntegrationTest/python/validate_dt_orcon_cfg.py | a1aaab0a17d8bfc6ba547ce4cb804c635d78318a | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 1,754 | py | # The following comments couldn't be translated into the new config version:
# Configuration file for EventSetupTest_t
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
loadAll = cms.bool(True),
toGet = cms.VPSet(cms.PSet(
record = cms.string('DTT0Rcd'),
tag = cms.string('MTCC_t0')
),
cms.PSet(
record = cms.string('DTTtrigRcd'),
tag = cms.string('MTCC_tTrig')
),
cms.PSet(
record = cms.string('DTReadOutMappingRcd'),
tag = cms.string('MTCC_map')
)),
messagelevel = cms.untracked.uint32(2),
catalog = cms.untracked.string('relationalcatalog_oracle://orcon/CMS_COND_GENERAL'), ##orcon/CMS_COND_GENERAL"
timetype = cms.string('runnumber'),
connect = cms.string('oracle://orcon/CMS_COND_DT'), ##orcon/CMS_COND_DT"
authenticationMethod = cms.untracked.uint32(1)
)
process.source = cms.Source("EmptySource",
maxEvents = cms.untracked.int32(5),
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.get = cms.EDAnalyzer("EventSetupRecordDataGetter",
toGet = cms.VPSet(cms.PSet(
record = cms.string('DTT0Rcd'),
data = cms.vstring('DTT0')
),
cms.PSet(
record = cms.string('DTTtrigRcd'),
data = cms.vstring('DTTtrig')
),
cms.PSet(
record = cms.string('DTReadOutMappingRcd'),
data = cms.vstring('DTReadOutMapping')
)),
verbose = cms.untracked.bool(True)
)
process.printer = cms.OutputModule("AsciiOutputModule")
process.p = cms.Path(process.get)
process.ep = cms.EndPath(process.printer)
| [
"[email protected]"
] | |
15dab3acd9bfe7df5a470c2924edd5a0932585fb | 1b62a66e8d7b2bbdfce222b53d2ab05202291f4b | /hs_info/models.py | 57b5c9eb24565c88c2a586fc99a7f24ee3f0e241 | [] | no_license | lotaku/hushua_old | ca7c1aecfa1d344c262e61378b6a795c8ed6e866 | 665a4a13fdf7f0e59a0e3db136a94d53b997d321 | refs/heads/master | 2020-04-20T00:11:34.948600 | 2014-08-12T14:53:34 | 2014-08-12T14:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # encoding: utf-8
from django.db import models
# Create your models here.
class HsInfo(models.Model):
#小号要求
week_limited = IntegerField(null=True, blank=True)
month_limited = IntegerField(null=True, blank=True)
is_seller = BooleanField()
# name = models.CharField(max_length=100, null=True, blank=True)
# email = models.EmailField(max_length=100, null=True, blank=True)
# password = models.CharField(max_length=8, null=True, blank=True) | [
"[email protected]"
] | |
e1b1e493e9f4053d3e5a580041c774a2a73ca7e0 | 55983026405316f2fc6de40291022544a4a1be5d | /PartI_Introduction_To_Programming/Video6_Intro_To_Programming/ex3.py | c21fdec2980245694ccf1600095f0a8aff5a5480 | [] | no_license | deborabr21/goselftaught | 3fd8cf9bb7119b2665101b950de5eac083d0d410 | 04919ce8db8e2bf7a6595ff822791a2d6c95dda5 | refs/heads/master | 2022-12-22T00:37:48.538075 | 2020-09-16T20:11:01 | 2020-09-16T20:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | print("Hi!")
print("Hola!")
| [
"[email protected]"
] | |
52170475bb3e4e2512553f18cf6180eb8ff63de9 | 1187f02013f2c0d785e8a12701226d4c99fea48d | /4153번 직각삼각형.py | d6cba8ccf5891ba4bf61bcc2f283bce6ad05ee24 | [] | no_license | rheehot/BOJ-Algorithm | 5b2745a648fd635aa727b84afa2a4787ee07c507 | 7aae709fb193f228ef7c2d5accee6af9ecc19090 | refs/heads/master | 2023-04-20T23:42:53.271984 | 2021-05-07T09:08:12 | 2021-05-07T09:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import sys
while True:
a, b, c = map(int, sys.stdin.readline().split())
if a==0 and b==0 and c==0:
break
else:
tri_len = [a, b, c]
max_num = max(tri_len)
tri_len.remove(max_num)
if max_num**2 == tri_len[0]**2 + tri_len[1]**2:
print('right')
else:
print('wrong')
| [
"[email protected]"
] | |
fc76f26de5ed891c29829aaa79d4c9b0e43e00bf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_219/ch4_2019_03_14_18_25_45_666376.py | 58b1c4b3456ddc1302a823856e76d8b79a964285 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | y=int(input('Digite sua idade'))
if y<=11:
print('crianca')
elif 12<=y<=17:
print(' adolescente ')
else:
print('adulto') | [
"[email protected]"
] | |
2fc62581d21ba5e0d9531a665125e34b6e4b25f9 | 8a42be3f930d8a215394a96ad2e91c95c3b7ff86 | /Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/linux_HSQLDB_Edit/TestCases/Y1_NamedFldTests/Filter/Filter035_TextGT.py | d2606ef2bcfb6fbb6ba5439919f5a3ceb57e15ed | [] | no_license | java-tools/jrec | 742e741418c987baa4350390d126d74c0d7c4689 | 9ece143cdd52832804eca6f3fb4a1490e2a6f891 | refs/heads/master | 2021-09-27T19:24:11.979955 | 2017-11-18T06:35:31 | 2017-11-18T06:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,798 | py | useFixture(RecordEditor)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_22'
if window('Record Editor'):
select('File_Txt', commonBits.sampleDir() + 'csv2DTAR020_tst1.bin.csv')
click(commonBits.fl('Edit') + '1')
select_menu(commonBits.fl('Edit') + '>>' + commonBits.fl('Update Csv Columns'))
select('FieldChange_JTbl', commonBits.fl('Number'), commonBits.fl('Type') + ',5')
select('FieldChange_JTbl', commonBits.fl('Number'), commonBits.fl('Type') + ',1')
select('FieldChange_JTbl', 'cell:' + commonBits.fl('Type') + ',1(Number)')
click(commonBits.fl('Apply'))
click('Filter1')
select('Fields.FieldRelationship_JTbl', 'cell:' + commonBits.fl('Field') + ',0(null)')
select('Fields.FieldRelationship_JTbl', 'SALE-PRICE', commonBits.fl('Field') + ',0')
select('Fields.FieldRelationship_JTbl', commonBits.fl('> (Text)'), commonBits.fl('Operator') + ',0')
select('Fields.FieldRelationship_JTbl', '3.99', commonBits.fl('Value') + ',0')
select('Fields.FieldRelationship_JTbl', 'cell:' + commonBits.fl('Value') + ',1()')
commonBits.filter(click)
assert_p('LineList.FileDisplay_JTbl', 'Content', '[[63604808, 20, 40118, 170, 1, 4.87], [69694158, 20, 40118, 280, 1, 5.01], [62684671, 20, 40118, 685, 1, 69.99], [68634752, 59, 40118, 410, 1, 8.99], [60614487, 59, 40118, 878, 1, 5.95], [68654655, 166, 40118, 60, 1, 5.08], [68674560, 166, 40118, 170, 1, 5.99]]')
click('BasicInternalFrameTitlePane$NoFocusButton2')
select('Fields.FieldRelationship_JTbl', 'cell:' + commonBits.fl('Field') + ',1(null)')
select('Fields.FieldRelationship_JTbl', 'STORE-NO', commonBits.fl('Field') + ',1')
select('Fields.FieldRelationship_JTbl', '59', commonBits.fl('Value') + ',1')
select('Fields.FieldRelationship_JTbl', 'cell:' + commonBits.fl('Value') + ',2()')
commonBits.filter(click)
assert_p('LineList.FileDisplay_JTbl', 'Content', '[[68634752, 59, 40118, 410, 1, 8.99], [60614487, 59, 40118, 878, 1, 5.95]]')
click('BasicInternalFrameTitlePane$NoFocusButton2')
select('Fields.FieldRelationship_JTbl', '20', commonBits.fl('Value') + ',1')
select('Fields.FieldRelationship_JTbl', 'cell:' + commonBits.fl('Value') + ',2()')
commonBits.filter(click)
assert_p('LineList.FileDisplay_JTbl', 'Content', '[[63604808, 20, 40118, 170, 1, 4.87], [69694158, 20, 40118, 280, 1, 5.01], [62684671, 20, 40118, 685, 1, 69.99]]')
click('BasicInternalFrameTitlePane$NoFocusButton2')
select('Fields.FieldRelationship_JTbl', '166', commonBits.fl('Value') + ',1')
select('Fields.FieldRelationship_JTbl', 'cell:' + commonBits.fl('Value') + ',2()')
commonBits.filter(click)
assert_p('LineList.FileDisplay_JTbl', 'Content', '[[68654655, 166, 40118, 60, 1, 5.08], [68674560, 166, 40118, 170, 1, 5.99]]')
close()
| [
"bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec"
] | bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec |
c39941499870277a946c40da8b7ff72e4713e97a | ed4921d289f9318e0792694a55ab49990199a857 | /openbudget/dashboard.py | a8a910e731a262b077369a6654c48e4145e17640 | [
"BSD-2-Clause"
] | permissive | ofri/omuni-budget | 918b340e6d213785dac252ed0549f918b5a84da4 | 9f30edd1e0d025bbcacba64172b1ecb02172497b | refs/heads/master | 2021-01-18T05:25:13.366328 | 2013-03-31T11:11:02 | 2013-03-31T11:11:02 | 9,343,753 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,621 | py | from django.utils.translation import ugettext_lazy as _
#from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
class OpenBudgetDashboard(Dashboard):
"""Custom admin dashboard for Open Budget"""
def init_with_context(self, context):
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('User management'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=('openbudget.apps.accounts.*',),
))
self.children.append(modules.AppList(
_('Government entities'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=('openbudget.apps.entities.*',),
))
self.children.append(modules.AppList(
_('Budget records'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=('openbudget.apps.budgets.*',),
))
self.children.append(modules.AppList(
_('Budget taxonomies'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=('openbudget.apps.taxonomies.*',),
))
self.children.append(modules.AppList(
_('Transport'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=('openbudget.apps.transport.*',),
))
self.children.append(modules.AppList(
_('Generic pages'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=('openbudget.apps.pages.*',),
))
self.children.append(modules.LinkList(
_('Media management'),
column=2,
children=[
{
'title': _('FileBrowser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
{
'title': _('Static translations'),
'url': '/rosetta/',
'external': False,
},
]
))
self.children.append(modules.LinkList(
_('Support'),
column=2,
children=[
{
'title': _('Django Documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Grappelli Documentation'),
'url': 'http://packages.python.org/django-grappelli/',
'external': True,
},
{
'title': _('Built by prjts'),
'url': 'http://prjts.com/',
'external': True,
},
{
'title': _('Email Paul Walsh (developer)'),
'url': 'mailto:[email protected]',
'external': True,
},
{
'title': _('Email Yehonatan Daniv (developer)'),
'url': 'mailto:[email protected]',
'external': True,
},
]
))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
| [
"[email protected]"
] | |
db9c468c186605058bdc1e319cf6eef5d0cc402b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/189/usersdata/264/65136/submittedfiles/al2.py | 78c78df05d8662e4336e80c333a6ef1d26db18e4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # -*- coding: utf-8 -*-
#ENTRADA: NÚMERO QUALQUER:x
#SAIDA: PARTE INTEIRA:q, PARTE FRACIONÁRIA:j
x= float(input('digite um número real qualquer:'))
q= (x//3)
| [
"[email protected]"
] | |
f86d5e23e993a62bf8066dc12320d183477f2526 | 1d590f611c9ae02f5f0f369e479cccd8b8d7fb66 | /soc/bffbook/profiles/migrations/0004_auto_20210510_1308.py | 7b76e1b1ac139bee3abbd516d9265db853ccb6c6 | [] | no_license | Alan-thapa98/SOCIAL-ME | 3cf1c515e0560ca2967cf847727bf1438aeba3f4 | 362e11edb449528a7bc6f413c134a8dfb9296e67 | refs/heads/master | 2023-07-01T07:21:37.303320 | 2021-07-30T15:15:59 | 2021-07-30T15:15:59 | 391,106,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Generated by Django 3.1.2 on 2021-05-10 07:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_auto_20210510_1256'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='avatar',
field=models.ImageField(default='user.jpg', upload_to='avatars/'),
),
]
| [
"alanthapa98.gmail.com"
] | alanthapa98.gmail.com |
d496d59c836446f81c61a2822643119a8ebad097 | 3505b3e494dabca84b730033f55bdf369877f41a | /pydiscord/types/role.py | c02b50201c0bae2132560db80e3614f71a0b0ccd | [
"MIT"
] | permissive | AryamanSrii/PyDiscord | 5a87192b942228348a4b7320b6cdac8e885e4e42 | 3366d20e2725672ae7e6b29335119cac1aee76f9 | refs/heads/main | 2023-07-18T03:42:00.634806 | 2021-09-03T15:46:48 | 2021-09-03T15:46:48 | 402,948,162 | 0 | 0 | MIT | 2021-09-04T02:48:33 | 2021-09-04T02:48:32 | null | UTF-8 | Python | false | false | 1,589 | py | """
The MIT License (MIT)
Copyright (c) 2021 The PyDiscord Developers
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TypedDict
from .snowflake import Snowflake
class _RoleOptional(TypedDict, total=False):
tags: RoleTags
class Role(_RoleOptional):
id: Snowflake
name: str
color: int
hoist: bool
position: int
permissions: str
managed: bool
mentionable: bool
class RoleTags(TypedDict, total=False):
bot_id: Snowflake
integration_id: Snowflake
premium_subscriber: None
| [
"[email protected]"
] | |
045421d3e8906598267f9544324d41b41a921a87 | f8baff291cdf02ea92141c7a7eb48b859776c224 | /google/cloud/dialogflow_v2beta1/services/session_entity_types/transports/grpc.py | 1552ec0cefd6e51205f0927deb40d362b227b6d6 | [
"Apache-2.0"
] | permissive | precs-jmcrs/python-dialogflow | 1a6a12741cb14460f5ecfe1200b2b0a20064b5cb | bf68864ff14e1a1f3626cb27fbbabb96e618358f | refs/heads/main | 2023-08-20T09:45:21.354126 | 2021-10-25T20:52:08 | 2021-10-25T20:52:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,930 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2beta1.types import session_entity_type
from google.cloud.dialogflow_v2beta1.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import empty_pb2 # type: ignore
from .base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
class SessionEntityTypesGrpcTransport(SessionEntityTypesTransport):
"""gRPC backend transport for SessionEntityTypes.
Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2beta1.SessionEntityType].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_session_entity_types(
self,
) -> Callable[
[session_entity_type.ListSessionEntityTypesRequest],
session_entity_type.ListSessionEntityTypesResponse,
]:
r"""Return a callable for the list session entity types method over gRPC.
Returns the list of all session entity types in the
specified session.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.ListSessionEntityTypesRequest],
~.ListSessionEntityTypesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_session_entity_types" not in self._stubs:
self._stubs["list_session_entity_types"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.SessionEntityTypes/ListSessionEntityTypes",
request_serializer=session_entity_type.ListSessionEntityTypesRequest.serialize,
response_deserializer=session_entity_type.ListSessionEntityTypesResponse.deserialize,
)
return self._stubs["list_session_entity_types"]
@property
def get_session_entity_type(
self,
) -> Callable[
[session_entity_type.GetSessionEntityTypeRequest],
session_entity_type.SessionEntityType,
]:
r"""Return a callable for the get session entity type method over gRPC.
Retrieves the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.GetSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_session_entity_type" not in self._stubs:
self._stubs["get_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.SessionEntityTypes/GetSessionEntityType",
request_serializer=session_entity_type.GetSessionEntityTypeRequest.serialize,
response_deserializer=session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["get_session_entity_type"]
@property
def create_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.CreateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the create session entity type method over gRPC.
Creates a session entity type.
If the specified session entity type already exists,
overrides the session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.CreateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_session_entity_type" not in self._stubs:
self._stubs["create_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.SessionEntityTypes/CreateSessionEntityType",
request_serializer=gcd_session_entity_type.CreateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["create_session_entity_type"]
@property
def update_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.UpdateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the update session entity type method over gRPC.
Updates the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.UpdateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_session_entity_type" not in self._stubs:
self._stubs["update_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.SessionEntityTypes/UpdateSessionEntityType",
request_serializer=gcd_session_entity_type.UpdateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["update_session_entity_type"]
@property
def delete_session_entity_type(
self,
) -> Callable[
[session_entity_type.DeleteSessionEntityTypeRequest], empty_pb2.Empty
]:
r"""Return a callable for the delete session entity type method over gRPC.
Deletes the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.DeleteSessionEntityTypeRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_session_entity_type" not in self._stubs:
self._stubs["delete_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.SessionEntityTypes/DeleteSessionEntityType",
request_serializer=session_entity_type.DeleteSessionEntityTypeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_session_entity_type"]
def close(self):
self.grpc_channel.close()
__all__ = ("SessionEntityTypesGrpcTransport",)
| [
"[email protected]"
] | |
319aaf36620144c77e9f4382cdc002dd7d12372a | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /xlsxwriter/test/vml/test_write_size_with_cells.py | 7d062d86032316bfb6cb293c6f39e4586d4653f4 | [
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] | permissive | mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...vml import Vml
class TestWriteXSizeWithCells(unittest.TestCase):
"""
Test the Vml _write_size_with_cells() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_size_with_cells(self):
"""Test the _write_size_with_cells() method"""
self.vml._write_size_with_cells()
exp = """<x:SizeWithCells/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
144ad152b5e9dc1c38e2a29bb9072569a0c6fdd2 | 45df508e4c99f453ca114053a92deb65939f18c9 | /tfx/utils/logging_utils.py | 8d5e5b0ce8ce081ed22a0a7b494b338c20b32a4d | [
"Apache-2.0"
] | permissive | VonRosenchild/tfx | 604eaf9a3de3a45d4084b36a478011d9b7441fc1 | 1c670e92143c7856f67a866f721b8a9368ede385 | refs/heads/master | 2020-08-09T13:45:07.067267 | 2019-10-10T03:07:20 | 2019-10-10T03:07:48 | 214,100,022 | 1 | 0 | Apache-2.0 | 2019-10-10T06:06:11 | 2019-10-10T06:06:09 | null | UTF-8 | Python | false | false | 3,124 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for TFX-specific logger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import os
import tensorflow as tf
from typing import Any, Dict, Optional, Text
class LoggerConfig(object):
"""Logger configuration class.
Logger configuration consists of:
- pipeline_name: name of active pipeline
- worker_name: name of component/object doing the logging
- log_root: path for log directory
- log_level: logger's level, default to INFO.
"""
def __init__(self,
log_root: Optional[Text] = '/var/tmp/tfx/logs',
log_level: Optional[int] = logging.INFO,
pipeline_name: Optional[Text] = '',
worker_name: Optional[Text] = ''):
self.log_root = log_root
self.log_level = log_level
self.pipeline_name = pipeline_name
self.worker_name = worker_name
def update(self, config: Optional[Dict[Text, Any]] = None):
"""Updates the log config parameters via elements in a dict.
Args:
config: Dict of parameter tuples to assign to the logging config.
Raises:
ValueError if key is not a supported logging parameter.
"""
if config:
for k, v in config.items():
if k in ('log_root', 'log_level', 'pipeline_name', 'worker_name'):
setattr(self, k, v)
else:
raise ValueError('%s not expected in logger config.' % k)
def copy(self):
"""Returns a shallow copy of this config."""
return copy.copy(self)
def get_logger(config):
"""Create and configure a TFX-specific logger.
Args:
config: LoggingConfig class used to configure logger
Returns:
A logger that outputs to log_dir/log_file_name.
Raises:
RuntimeError: if log dir exists as a file.
"""
log_path = os.path.join(config.log_root, 'tfx.log')
logger = logging.getLogger(log_path)
logger.setLevel(config.log_level)
if not tf.gfile.Exists(config.log_root):
tf.io.gfile.makedirs(config.log_root)
if not tf.gfile.IsDirectory(config.log_root):
raise RuntimeError('Log dir exists as a file: {}'.format(config.log_root))
# Create logfile handler.
fh = logging.FileHandler(log_path)
# Define logmsg format.
formatter = logging.Formatter(
'%(asctime)s - {}:{} (%(filename)s:%(lineno)s) - %(levelname)s: %(message)s'
.format(config.pipeline_name, config.worker_name))
fh.setFormatter(formatter)
# Add handler to logger.
logger.addHandler(fh)
return logger
| [
"[email protected]"
] | |
51a247af73a7e20f4aaa919360ae9ea26d676f40 | f8bdc46409c9f5eaf3d85ef157260589462d941a | /jsk_2016_01_baxter_apc/node_scripts/check_sanity_setup_for_pick | 1ab7ba1ac9271856cae0085c0e78a81cb65fadb9 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | start-jsk/jsk_apc | 2e268f8b65e9d7f4f9cc4416dc8383fd0a7b9750 | c4e349f45ef38457dc774e33f6902acf1a1540a6 | refs/heads/master | 2023-09-05T09:06:24.855510 | 2023-09-01T17:10:12 | 2023-09-01T17:10:12 | 25,620,908 | 36 | 25 | NOASSERTION | 2023-09-01T17:10:14 | 2014-10-23T05:28:31 | Common Lisp | UTF-8 | Python | false | false | 462 | #!/usr/bin/env python
import rospy
from jsk_tools.sanity_lib import checkTopicIsPublished
def main():
rospy.init_node('check_setup_sanity')
checkTopicIsPublished(
'/left_hand_camera/extract_indices_target_bin/output',
timeout=5, echo=True, echo_noarr=True)
checkTopicIsPublished(
'/right_hand_camera/extract_indices_target_bin/output',
timeout=5, echo=True, echo_noarr=True)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
0827f989319c72b436014e11b875370bc3478be2 | 142f00440b73ae6c8c140bac00fe8cd026b62caf | /src/inputs/cs_event_query/sampler.py | 9e09a45966427d54b4ec5863c3028604ed467bdf | [] | no_license | Wisc-HCI/interaction-transformation | e81bfdb21b61b884f69af4912f8bca91933d1c43 | 45e275e5426023b19d9b587cf5520ad74b1c909b | refs/heads/master | 2020-04-10T20:29:28.628040 | 2020-04-02T18:59:08 | 2020-04-02T18:59:08 | 161,269,365 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,044 | py | from z3 import *
import time
import math
class Sampler:
'''
Sampler takes the existing trajectories as input,
and comes up with a tree-like interaction whose
branches are optimized to be (a) less-seen bad tra-
jectories, (b) unseen good trajectories, and (c)
unseen combinations of states
'''
def __init__(self, trajs, num_branches, inputs, outputs):
self.trajs = trajs
self.num_branches = num_branches
self.inputs = inputs
self.outputs_orig = outputs
self.outputs = {}
self.outputs_rev = {}
counter = 0
for out in self.outputs_orig:
self.outputs[out] = counter
self.outputs_rev[counter] = out
counter += 1
self.min_branch_len = 2
self.max_branch_len = max([len(t.vect) for t in self.trajs])
# place the trajs in a dict w/ number of matching trajs as values
self.traj_dict = {}
for traj in self.trajs:
cs = traj.comparable_string()
print(cs)
if cs not in self.traj_dict:
self.traj_dict[cs] = [1,traj]
else:
self.traj_dict[cs][0] += 1
# weight the benefit negative vs positive trajectories
self.neg_reward_benefit_weight = 2
self.positive_reward_benefit_weight = 1
self.unseen_traj_reward_benefit = 1
# set up the normal distribution function
self.mu = 1 # should always be 1 -- we can't weight unseen trajectories
self.sigma = 1 # can change this to be whatever
def solve(self):
# this may change later on
max_possible_score = self.num_branches * 2
prev_m = None
prev_f_T = None
prev_f_M = None
start_time = time.time()
for i in range(0,max_possible_score+1):
m, f_T, f_M = self.solve_helper(i)
if m is None:
break
else:
prev_m = m
prev_f_T = f_T
prev_f_M = f_M
print("found solution with score>={}".format(i))
end_time = time.time()
print("total time: {}".format(end_time - start_time))
#if prev_m is not None:
# exp = Exporter(prev_m, self.num_branches*self.max_branch_len, self.inputs, 0, prev_f_T, prev_f_M)
def solve_helper(self, thresh):
# which nodes each node points to
f_T = Function("f_T", BitVecSort(8), BitVecSort(8), BitVecSort(8))
# mapping nodes to tree levels
#f_L = Function("f_L", IntSort(), IntSort())
# mapping nodes to their parents
f_P = Function("f_P", BitVecSort(8), BitVecSort(8))
# mapping outputs onto states
f_M = Function("f_M", BitVecSort(8), BitVecSort(8))
consts = self.make_constraints(f_T, f_M, f_P)
paths, path_consts = self.make_paths(f_T, f_M)
prop_constraints = self.check_paths(paths, f_M)
objective = [BitVec("obj_{}".format(i), 8) for i in range(len(self.traj_dict)+self.num_branches)] #Int("obj")
obj_const = self.setup_objective(objective, paths, f_T, f_M)
#for traj in self.trajs:
#objective += B[traj] * self.score[traj]
# objective = 0
print("SOLVER --> setting up optimization problem")
o = Solver()
o.add(consts, path_consts, prop_constraints)
o.add(obj_const)
objective_func=(Sum(objective)>=thresh)
#h = o.maximize(Sum(objective))
o.add(objective_func)
print("SOLVER --> solving")
start_time = time.time()
satisfaction = o.check()
curr_time = time.time()
print("SOLVER --> done solving -- {} seconds".format(curr_time - start_time))
objective_val = None
if satisfaction == sat:
#o.upper(h)
m = o.model()
print(m)
for p in paths:
print("new path")
for step in p:
print(" {} -- {}".format(m.evaluate(f_M(step)), self.outputs_rev[int(str(m.evaluate(f_M(step))))]))
print(" ~~~")
for step in p:
print(" {}".format(m.evaluate(step)))
#print(m.evaluate(f_L))
for obj in objective:
print(m.evaluate(obj))
#counter = 0
#for traj in self.traj_dict:
# if int(str(m.evaluate(objective[counter]))) == 1:
# print(traj)
# counter += 1
print("SOLVER --> entire process took {} seconds".format(curr_time - start_time))
print("SOLVER --> returning solution")
return m, f_T, f_M
else:
print("SOLVER --> entire process took {} seconds".format(curr_time - start_time))
print("SOLVER --> returning solution")
print("ERROR: no solution")
return None, None, None
#solution = self.package_results(m, f_T, f_M, n)
#curr_time = time.time()
#return solution,objective_val
def setup_objective(self, obj, paths, f_T, f_M):
obj_const = And(True)
for o in obj:
obj_const = And(obj_const, Or(o==0,o==1))
traj_strings = list(self.traj_dict)
for i in range(len(traj_strings)):
traj_string = traj_strings[i]
traj = self.traj_dict[traj_string][1].vect
weight = self.traj_dict[traj_string][0]
reward = self.traj_dict[traj_string][1].reward
# test if it is inside
# start with the initial state
exists_within = And(True)
exists_within = And(exists_within, f_M(0)==self.outputs[traj[0][1].type])
prev_output = 0
# do all other states
for j in range(1, len(traj)):
#prev_output = self.outputs[traj[j-1][1].type]
correct_input = self.inputs[traj[j][0].type]
if traj[j][1].type != "END":
correct_output = self.outputs[traj[j][1].type]
exists_within = And(exists_within, f_M(f_T(prev_output, correct_input))==correct_output)
prev_output = f_T(prev_output, correct_input)
#else:
# exists_within = And(exists_within, f_T(prev_output, correct_input)==-1)
obj_const = And(obj_const, Implies(exists_within,obj[i]==1))
obj_const = And(obj_const, Implies(Not(exists_within),obj[i]==0))
#obj_const = And(obj_const, obj[i]==1)
return obj_const
def make_constraints(self, f_T, f_M, f_P):
constraints = And(True)
# make the tree nodes
num_nodes = self.num_branches * self.max_branch_len
#sts = [Int("st_{}".format(i)) for i in range(num_nodes)]
# bc = a binary flag array showing which nodes are children
bc = [BitVec("bc_{}".format(i), 8) for i in range(num_nodes)]
levels = [[0]]
curr_level = []
for i in range(1,num_nodes):
curr_level.append(i)
if ((i)%self.num_branches==0) or (i==num_nodes-1 and (i)%self.num_branches!=0):
level_to_add = []
for item in curr_level:
level_to_add.append(item)
levels.append(level_to_add)
curr_level.clear()
# node identity constraints and restrict the id's of each node
constraints = And(constraints,f_M(-1)==-1)
for i in range(num_nodes):
constraints = And(constraints, f_M(i)>=0, f_M(i)<len(self.outputs))
#constraints = And(constraints, st>=-1, st<num_nodes) # this is redundant
# TREE CONSTRAINTS
# set the ID and the level no
'''
constraints=And(constraints,f_L(0)==0)
counter = 0
for i in range(1, num_nodes):
#constraints=And(constraints,st==counter)
#constraints=And(constraints,f_L(st)>=0,f_L(st)<self.max_branch_len)
#if counter >= 1: # assign states to levels
level = math.floor((counter-1)/self.num_branches) + 1
constraints = And(constraints,f_L(i)==level)
counter += 1
'''
# f_T constraints
#for i in range(num_nodes):
# for inp in self.inputs:
# constraints = And(constraints, f_T(i,self.inputs[inp])>=-1, f_T(i,self.inputs[inp])<num_nodes)
for inp in self.inputs:
constraints = And(constraints, f_T(-1,self.inputs[inp])==-1)
for i in range(len(levels)):
for st in levels[i]:
for inp in self.inputs:
if i < (len(levels)-1):
or_const = Or(False)
for st_next_level in levels[i+1]:
or_const = Or(or_const, f_T(st,self.inputs[inp])==st_next_level)
or_const = Or(or_const, f_T(st,self.inputs[inp])==-1)
constraints = And(constraints, or_const)
else:
constraints = And(constraints, f_T(st,self.inputs[inp])==-1)
for i in range(len(levels)):
for st1 in levels[i]:
for st2 in levels[i]:
if st1!=st2:
for inp1 in self.inputs:
for inp2 in self.inputs:
constraints = And(constraints, f_T(st1,self.inputs[inp1])!=f_T(st2,self.inputs[inp2]))
#constraints = And(constraints, f_T(0,0)==2, f_T(0,1)==2)
'''
# tree level constraints
for i in range(num_nodes):
for inp in self.inputs:
for j in range(num_nodes):
constraints = And(constraints,
Implies(And(f_T(i,self.inputs[inp])==j,
j!=-1), # ensuring that -1's can be on different levels
f_L(i)==f_L(j)-1))
'''
# leaves can only be pointed to once (definition of tree)
'''
for i in range(num_nodes):
for inp in self.inputs:
constraints = And(constraints, Implies(f_T(i,self.inputs[inp])>=0,f_P(f_T(i,self.inputs[inp]))==i))
'''
# count how many "orphan" nodes so that we can give them extra leaves
hangers = [BitVec("hanger_{}".format(i), 8) for i in range(num_nodes)]
constraints = And(constraints, hangers[0]==0)
for i in range(1, num_nodes):
constraints = And(constraints,Or(hangers[i]==0,hangers[i]==1))
if i>0 and i<=self.num_branches:
parents = [0]
else:
parent_min = (i-self.num_branches) - ((i-self.num_branches)-1)%self.num_branches
parent_max = parent_min + (self.num_branches-1)
parents = [j for j in range(parent_min,parent_max+1)]
is_connected = Or(False)
for st in parents:
for inp in self.inputs:
is_connected = Or(is_connected, f_T(st,self.inputs[inp])==i)
constraints = And(constraints,Implies(Not(is_connected),hangers[i]==1))
constraints = And(constraints,Implies(is_connected,hangers[i]==0))
# constraints on number of leaves
for i in range(num_nodes):
for inp in self.inputs:
constraints = And(constraints,
Implies(f_T(i,self.inputs[inp])==-1,bc[i]==1),
Implies(bc[i]==1,f_T(i,self.inputs[inp])==-1))
for b in bc:
constraints = And(constraints, Or(b==0,b==1))
constraints = And(constraints, Sum(bc)==(self.num_branches + Sum(hangers)))
return constraints
def make_paths(self, f_T, f_M):
path_const = And(True)
# actually make the variables
ps = [[BitVec("p_{}_{}".format(j,i), 8) for i in range(self.max_branch_len)] for j in range(self.num_branches)]
# restrict the id's of the path nodes
for p in ps:
for p_i in p:
path_const = And(path_const, p_i>=-1, p_i<(self.num_branches * self.max_branch_len))
# constrain them so that they must be acheivable via f_T and f_M
for path in ps:
path_const = And(path_const, path[0]==0)
for i in range(1,self.max_branch_len):
or_const = Or(False)
for inp in self.inputs:
or_const = Or(or_const, And(path[i]==f_T(path[i-1],self.inputs[inp])))
path_const = And(path_const, or_const)
# constrain them so that each path must be unique
for path in ps:
for alt_path in ps:
if path != alt_path:
all_equal = And(True)
for i in range(self.max_branch_len):
all_equal = And(all_equal,path[i]==alt_path[i])
path_const = And(path_const, Not(all_equal))
return ps, path_const
def check_paths(self, paths, f_M):
# must ensure that various properties are satisfied
prop_constraints = And(True)
# GREETING PROPERTY
for path in paths:
prop_constraints = And(prop_constraints, f_M(path[0])==self.outputs["Greet"])
# FAREWELL PROPERTY
for path in paths:
for i in range(1,self.max_branch_len):
prop_constraints = And(prop_constraints, Implies(
And(path[i-1]>=0,path[i]==-1),
f_M(path[i-1])==self.outputs["Bye"]),
Implies(i==(self.max_branch_len-1),
f_M(path[i])==self.outputs["Bye"])
)
return prop_constraints
| [
"[email protected]"
] | |
5b2cc65fd28a82eef601c8b2bae269efad7edfe6 | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/pytest/fib6/fibonacci.py | 39bc1096dde229bdf5f76119b024e852994386a4 | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 124 | py | def fib(n):
if n < 1:
return None
a, b = 1, 1
for _ in range(1, n):
a, b = b, a+b
return a
| [
"[email protected]"
] | |
9a6cecbd8b7391193cbe482e34d4e9436736c068 | 1929443c8e4ec6ccd79777f18d161546867e17ef | /methods/transformers/src/transformers/configuration_reformer.py | b74bec1877b149d9249d568fde2e67702d6253b3 | [
"MIT",
"Apache-2.0"
] | permissive | INK-USC/RiddleSense | 6f4b00546d7f4d5ada12db50929c1f0d7713d541 | a3d57eaf084da9cf6b77692c608e2cd2870fbd97 | refs/heads/main | 2023-08-14T19:01:01.478946 | 2021-07-05T04:06:01 | 2021-07-05T04:06:01 | 376,487,870 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,444 | py | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Reformer model configuration """
from .configuration_utils import PretrainedConfig
from .utils import logging
logger = logging.get_logger(__name__)
REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/config.json",
"google/reformer-enwik8": "https://cdn.huggingface.co/google/reformer-enwik8/config.json",
}
class ReformerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.ReformerModel`. It is used to
instantiate a Reformer model according to the specified arguments, defining the model architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
attention_head_size (:obj:`int`, `optional`, defaults to 64):
Dimensionality of the projected key, query and value vectors
attn_layers (:obj:`List[str]`, `optional`, defaults to :obj:`["local", "lsh", "local", "lsh", "local", "lsh"]`):
List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer
(:obj:`"lsh"`) and a LocalSelfAttention layer (:obj:`"local"`).
For more information on LSHSelfAttention layer, see `LSH Self Attention
<reformer.html#lsh-self-attention>`__. For more information on LocalSelfAttention layer, see `Local Self
Attention <reformer.html#local-sensitive-hashing-self-attention>`__.
axial_pos_embds (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use axial position embeddings. For more information on how axial position embeddings
work, see `Axial Position Encodings <reformer.html#axial-positional-encodings>`__.
axial_norm_std (:obj:`float`, `optional`, defaults to 1.0):
The standard deviation of the normal_initializer for initializing the weight matrices of the axial
positional encodings.
axial_pos_shape (:obj:`List[int]`, `optional`, defaults to :obj:`[64, 64]`):
The position dims of the axial position encodings. During training the product of the position dims has to
be equal to the sequence length.
For more information on how axial position embeddings work, see `Axial Position Encodings
<reformer.html#axial-positional-encodings>`__.
axial_pos_embds_dim (:obj:`List[int]`, `optional`, defaults to :obj:`[64, 192]`):
The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the
hidden size.
For more information on how axial position embeddings work, see `Axial Position Encodings
<reformer.html#axial-positional-encodings>`__.
chunk_size_lm_head (:obj:`int`, `optional`, defaults to 0):
The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed
forward layer is not chunked. A chunk size of n means that the feed forward layer processes n <
sequence_length embeddings at a time.
For more information on feed forward chunking, see `How does Feed Forward Chunking work?
<../glossary.html#feed-forward-chunking>`__.
eos_token_id (:obj:`int`, `optional`, defaults to 2):
The token id for the end-of-sentence token.
feed_forward_size (:obj:`int`, `optional`, defaults to 512):
Dimensionality of the feed_forward layer in the residual attention block.
hash_seed (:obj:`int`, `optional`):
Seed that can be used to make local sensitive hashing in :obj:`LSHSelfAttention` deterministic. This should
only be set for testing purposed. For evaluation and training purposes :obj:`hash_seed` should be left as
:obj:`None` to ensure fully random rotations in local sensitive hashing scheme.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"relu"`):
The non-linear activation function (function or string) in the feed forward layer in the residual attention
block. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.05):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_size (:obj:`int`, `optional`, defaults to 256):
Dimensionality of the output hidden states of the residual attention blocks.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to use a causal mask in addition to the :obj:`attention_mask` passed to
:class:`~transformers.ReformerModel`. When using the Reformer for causal language modeling, this argument
should be set to :obj:`True`.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
local_chunk_length (:obj:`int`, `optional`, defaults to 64):
Length of chunk which attends to itself in :obj:`LocalSelfAttention`. Chunking reduces memory complexity
from sequence length x sequence length (self attention) to chunk length x chunk length x sequence length /
chunk length (chunked self attention).
local_num_chunks_before (:obj:`int`, `optional`, defaults to 1):
Number of previous neighbouring chunks to attend to in :obj:`LocalSelfAttention` layer to itself.
local_num_chunks_after (:obj:`int`, `optional`, defaults to 0):
Number of following neighbouring chunks to attend to in :obj:`LocalSelfAttention` layer in addition to
itself.
local_attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities in :obj:`LocalSelfAttention`.
lsh_attn_chunk_length (:obj:`int`, `optional`, defaults to 64):
Length of chunk which attends to itself in :obj:`LSHSelfAttention`. Chunking reduces memory complexity from
sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
length (chunked self attention).
lsh_num_chunks_before (:obj:`int`, `optional`, defaults to 1):
Number of previous neighbouring chunks to attend to in :obj:`LSHSelfAttention` layer to itself.
lsh_num_chunks_after (:obj:`int`, `optional`, defaults to 0):
Number of following neighbouring chunks to attend to in :obj:`LSHSelfAttention` layer to itself.
lsh_attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities in :obj:`LSHSelfAttention`.
max_position_embeddings (:obj:`int`, `optional`, defaults to 4096):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_buckets (:obj:`int` or :obj:`List[int]`, `optional`):
Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme.
Each query key vector is hashed into a hash in :obj:`1, ..., num_buckets`. The number of buckets can also
be factorized into a list for improved memory complexity. In this case, each query key vector is hashed
into a hash in :obj:`1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if
:obj:`num_buckets` is factorized into two factors. The number of buckets (or the product the factors)
should approximately equal sequence length / lsh_chunk_length. If :obj:`num_buckets` not set, a good value
is calculated on the fly.
num_hashes (:obj:`int`, `optional`, defaults to 1):
Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher
:obj:`num_hashes`, the more accurate the :obj:`LSHSelfAttention` becomes, but also the more memory and time
intensive the hashing becomes.
pad_token_id (:obj:`int`, `optional`, defaults to 0):
The token id for the padding token.
vocab_size (:obj:`int`, `optional`, defaults to 320):\
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.ReformerModel`.
tie_word_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to tie input and output embeddings.
Examples::
>>> from transformers import ReformerModel, ReformerConfig
>>> # Initializing a Reformer configuration
>>> configuration = ReformerConfig()
>>> # Initializing a Reformer model
>>> model = ReformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "reformer"
def __init__(
self,
attention_head_size=64,
attn_layers=["local", "lsh", "local", "lsh", "local", "lsh"],
axial_norm_std=1.0,
axial_pos_embds=True,
axial_pos_shape=[64, 64],
axial_pos_embds_dim=[64, 192],
chunk_size_lm_head=0,
eos_token_id=2,
feed_forward_size=512,
hash_seed=None,
hidden_act="relu",
hidden_dropout_prob=0.05,
hidden_size=256,
initializer_range=0.02,
is_decoder=False,
layer_norm_eps=1e-12,
local_num_chunks_before=1,
local_num_chunks_after=0,
local_attention_probs_dropout_prob=0.05,
local_attn_chunk_length=64,
lsh_attn_chunk_length=64,
lsh_attention_probs_dropout_prob=0.0,
lsh_num_chunks_before=1,
lsh_num_chunks_after=0,
max_position_embeddings=4096,
num_attention_heads=12,
num_buckets=None,
num_hashes=1,
pad_token_id=0,
vocab_size=320,
tie_word_embeddings=False,
**kwargs
):
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_decoder=is_decoder,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
self.hash_seed = hash_seed
self.vocab_size = vocab_size
self.attention_head_size = attention_head_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hashes = num_hashes
self.num_hidden_layers = len(attn_layers)
self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
self.lsh_attn_chunk_length = lsh_attn_chunk_length
self.local_attn_chunk_length = local_attn_chunk_length
self.lsh_num_chunks_after = lsh_num_chunks_after
self.lsh_num_chunks_before = lsh_num_chunks_before
self.local_num_chunks_after = local_num_chunks_after
self.local_num_chunks_before = local_num_chunks_before
self.hidden_act = hidden_act
self.feed_forward_size = feed_forward_size
self.hidden_dropout_prob = hidden_dropout_prob
self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.axial_pos_embds = axial_pos_embds
self.axial_pos_shape = tuple(axial_pos_shape)
self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
self.axial_norm_std = axial_norm_std
self.chunk_size_lm_head = chunk_size_lm_head
self.attn_layers = attn_layers
| [
"[email protected]"
] | |
4d0accebb3c8b38201bf3aab2d6ab564283fbf9f | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /social_rl/adversarial_env/utils.py | 6a235a2c7e5d60f56c19c192fd53d434a804576e | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 17,133 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions, primarily used for finding the best hyperparameters.
"""
import datetime
import os
import numpy as np
import pandas as pd
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
HPARAM_COLUMNS = ['xm_adv_conv_filters', 'xm_adv_entropy_regularization',
'xm_adversary_population_size',
'xm_antagonist_population_size', 'xm_non_negative_regret',
'xm_percent_random_episodes', 'xm_protagonist_episode_length',
'xm_protagonist_population_size', 'xm_agents_use_regret',
'xm_combined_population', 'xm_flexible_protagonist',
'xm_block_budget_weight']
def save_best_work_units_csv(experiments, csv_path=None, metrics=None,
last_x_percent=.2, step_limit_1agent=None):
"""Collects XM work unit IDs corresponding to best hparams and saves to csv.
Args:
experiments: A list of Experiment objects.
csv_path: Location where the resulting csv should be saved.
metrics: Metrics used to select best hparams (e.g. reward).
last_x_percent: Select hparams that led to the best performance over the
last X% of the run.
step_limit_1agent: Restrict dataframes to this many steps before selecting
best hparams for last X%. This is the step limit if only 1 agent is being
trained, so will need to be adjusted for units with multiple agents.
Returns:
A pandas dataframe with the best work units.
"""
if metrics is None:
metrics = ['SolvedPathLength', 'adversary_env_AdversaryReward']
best_seeds_df = pd.DataFrame()
for exp in experiments:
if (not exp.hparam_sweep or exp.metrics_df is None
or 'work_unit' not in exp.metrics_df.columns.values):
continue
print_header(exp, exp.metrics_df)
if ('combined' in exp.name.lower() and
'xm_combined_population' not in exp.metrics_df.columns.values):
exp.metrics_df['xm_combined_population'] = True
metrics_df = exp.metrics_df
for metric in metrics:
if metric not in exp.metrics_df.columns.values:
continue
print('\nLooking for highest', metric)
best_setting = find_best_setting_for_metric(
metrics_df, metric, run='eval', step_limit_1agent=step_limit_1agent,
last_x_percent=last_x_percent, num_agents=exp.num_agents)
setting_df = restrict_to_setting(metrics_df, best_setting)
units = setting_df['work_unit'].unique().tolist()
# Check for combined population and calculate number of agents in pop
combined_population = False
if 'xm_combined_population' in setting_df.columns.values:
assert len(setting_df['xm_combined_population'].unique()) == 1
combined_population = setting_df['xm_combined_population'].unique()[0]
num_agents = calculate_num_agents_based_on_population(
setting_df, exp.num_agents, combined_population)
# Adjust step limit for number of agents
if step_limit_1agent:
step_limit = step_limit_1agent * num_agents
else:
step_limit = None
scores = get_score_for_setting(
metrics_df, exp.metrics, best_setting, step_limit=step_limit,
last_x_percent=last_x_percent, run='eval')
m_dict = {
'exp_name': exp.name,
'xm_id': exp.xm_id,
'settings': best_setting,
'best_seeds': [str(u) for u in units],
'metric': metric + '_last20%',
'score': scores[metric],
'work_units_tested': len(metrics_df['work_unit'].unique()),
'max_steps': metrics_df['step'].max()
}
best_seeds_df = best_seeds_df.append(m_dict, ignore_index=True)
if metric == 'SolvedPathLength':
single_best = metrics_df.loc[metrics_df[metric].idxmax()]
search_params = get_search_params(metrics_df)
settings = {}
for s in search_params:
settings[s] = single_best[s]
m_dict = {
'exp_name': exp.name,
'xm_id': exp.xm_id,
'settings': settings,
'best_seeds': single_best['work_unit'],
'metric': metric + '_best_ever',
'score': single_best[metric],
'work_units_tested': len(metrics_df['work_unit'].unique()),
'max_steps': metrics_df['step'].max()
}
best_seeds_df = best_seeds_df.append(m_dict, ignore_index=True)
if csv_path is not None:
with tf.io.gfile.GFile(csv_path, 'wb') as f:
best_seeds_df.to_csv(f)
print('Saved best seeds csv to:', csv_path)
return best_seeds_df
def combine_existing_transfer_data(transfer_dir, after_date=None,
filter_n_trials=10.):
"""Combine all transfer files after a certain date, dropping duplicates."""
files = tf.io.gfile.listdir(transfer_dir)
# This will sort files, and trim any files pre-dating the after_date
sorted_files = sort_files_by_date(files, after_date=after_date)
df = pd.DataFrame()
for transfer_file in reversed(sorted_files):
transfer_df_path = os.path.join(transfer_dir, transfer_file)
if tf.io.gfile.stat(transfer_df_path).length == 0:
print('File', transfer_df_path, 'has length 0, skipping')
continue
with tf.gfile.GFile(transfer_df_path, 'rb') as f:
file_df = pd.read_csv(f)
print('\nLoaded file', transfer_file, 'of length', len(file_df))
if file_df.empty:
continue
# Remove previous rows which used a different number of trials
if filter_n_trials is not None:
prev_len = len(file_df)
file_df = file_df[file_df['n'] == filter_n_trials]
if len(file_df) != prev_len:
print('Removed', prev_len - len(file_df),
'rows where n !=', filter_n_trials, '... New length is:',
len(file_df))
if file_df.empty:
continue
# Remove extra unnecessary index columns
bad_cols = [c for c in file_df.columns.values if 'Unnamed' in c]
file_df = file_df.drop(columns=bad_cols)
if 'metric' not in file_df.columns.values:
file_df['metric'] = ''
print('\tExperiments/metrics found in this file:',
get_unique_combos_in_df(file_df, ['name', 'metric']))
key_names = ['name', 'xm_id', 'seed', 'env', 'checkpoint', 'agent_id', 'n',
'domain_rand_comparable_checkpoint', 'metric']
# Merge in new rows
deduped_file_df = drop_duplicates_but_alert(
file_df, key_names, transfer_file)
deduped_df = drop_duplicates_but_alert(
df, key_names, 'main df')
prev_len = len(deduped_df)
df = pd.concat([deduped_df, deduped_file_df],
sort=True).reset_index(drop=True)
df.drop_duplicates(subset=key_names, inplace=True, keep='first')
print('Added', len(df) - prev_len, 'new rows to the main df. It now has',
len(df), 'rows')
if len(df) == prev_len:
continue
assert prev_len < len(df), 'Merging should not remove rows'
# Analyze which rows were added by this file
new_rows = df[prev_len:]
print('\t', len(new_rows) / float(len(file_df)) * 100.,
'% of the rows in file', transfer_file, 'were new.')
print('\tNew rows involve these experiments/metrics:',
get_unique_combos_in_df(new_rows, ['name', 'metric']))
return df
def sort_files_by_date(files, after_date=None, check_str='transfer'):
"""Sorts files by date, assuming the date is the last part of the filename.
Will discard files with a date before the after_date.
Args:
files: A list of string filenames with the date as the last part of the
string before the extension.
after_date: A date such that any file dating after this date should be kept.
check_str: Each file must contain this string or it will be skipped.
Returns:
A list of filenames in sorted order and with dates that are too early
discarded.
"""
after_dt = None
if after_date is not None:
after_dt = datetime.datetime.strptime(after_date, '%d.%m.%Y.%H:%M:%S')
trimmed_files = []
datetimes = []
for f in files:
if f == 'transfer_results.csv':
continue
# Skip files not containing check_str
if check_str not in f:
continue
end_idx = f.find('.csv')
start_idx = end_idx - len('02.06.2020.07:58:30')
date_str = f[start_idx:end_idx]
dt = datetime.datetime.strptime(date_str, '%d.%m.%Y.%H:%M:%S')
if after_dt is not None and dt < after_dt:
# Ignore dates before the after_date
continue
datetimes.append(dt)
trimmed_files.append(f)
zipped_pairs = zip(datetimes, trimmed_files)
sorted_files = [x for _, x in sorted(zipped_pairs)]
return sorted_files
def drop_duplicates_but_alert(df, key_names, df_name):
prev_len = len(df)
deduped_df = df.drop_duplicates(key_names)
if len(deduped_df) != prev_len:
print('Dropped', prev_len - len(deduped_df), 'duplicates from', df_name)
return deduped_df
return deduped_df
def get_unique_combos_in_df(df, keys):
for k in keys:
df[k] = df[k].fillna('')
return np.unique(['/'.join(k) for k in df[keys].values])
def calculate_num_agents_based_on_population(
settings, exp_num_agents, combined_population=False, is_dict=False):
"""Calculate how much to adjust steps based on number of trained agents."""
pop_sizes = {}
# Get population sizes from a dictionary
if is_dict:
for pop_type in ['xm_protagonist_population_size',
'xm_antagonist_population_size',
'xm_adversary_population_size']:
if pop_type in settings:
pop_sizes[pop_type] = settings[pop_type]
else:
pop_sizes[pop_type] = 1
# Get population sizes from a dataframe
else:
for pop_type in ['xm_protagonist_population_size',
'xm_antagonist_population_size',
'xm_adversary_population_size']:
if pop_type in settings.columns.values:
assert len(settings[pop_type].unique()) == 1
pop_sizes[pop_type] = settings[pop_type].unique()[0]
else:
pop_sizes[pop_type] = 1
if combined_population:
num_agents = pop_sizes['xm_protagonist_population_size'] + \
pop_sizes['xm_adversary_population_size']
elif exp_num_agents == 3:
num_agents = pop_sizes['xm_protagonist_population_size'] + \
pop_sizes['xm_antagonist_population_size'] + \
pop_sizes['xm_adversary_population_size']
elif exp_num_agents == 2:
num_agents = pop_sizes['xm_protagonist_population_size'] + \
pop_sizes['xm_adversary_population_size']
else:
num_agents = 1
return num_agents
def print_header(exp, df, last_x_percent=.2):
"""Print information about a hyperparameter sweep experiment."""
print('HPARAM SWEEP =', exp.name)
print('Looking at last', last_x_percent*100, '% of data')
print('Considering', df['run'].unique())
print('Model has been trained for', df['step'].max(), 'steps')
print(len(df['work_unit'].unique()), 'work units reporting in\n')
def get_search_params(df, hparam_columns=None):
"""Find all different hyperparameter combinations present in an XM exp df."""
if hparam_columns is None:
hparam_columns = HPARAM_COLUMNS
search_hparams = [h for h in hparam_columns if h in df.columns.values]
to_remove = []
for h in search_hparams:
if len(df[h].unique()) < 2:
to_remove.append(h)
return [h for h in search_hparams if h not in to_remove]
def restrict_to_setting(df, setting, run='eval'):
"""Restrict an experiment dataframe to one hyperparameter setting."""
setting_df = df[df['run'] == run]
for k, v in setting.items():
if k in df.columns.values:
setting_df = setting_df[setting_df[k] == v]
return setting_df
def get_score_for_setting(df, metrics, setting, step_limit=None,
last_x_percent=.2, run='eval', verbose=True,
ignore_metrics=None):
"""Find the average score across several metrics for an hparam setting."""
if ignore_metrics is None:
ignore_metrics = ['NumEnvEpisodes', 'GoalX', 'GoalY']
if verbose:
print('Testing hparameter settings:')
print(setting)
setting_df = restrict_to_setting(df, setting, run)
if verbose:
print('There are', len(setting_df['work_unit'].unique()),
'work units with these settings')
print('\twhich are:', setting_df['work_unit'].unique())
setting_df = setting_df.sort_values('step')
if step_limit is not None:
prev_len = len(setting_df)
setting_df = setting_df[setting_df['step'] <= step_limit]
if verbose:
print('After restricting to step limit of', step_limit,
'the dataframe went from', prev_len, 'rows to', len(setting_df))
start_step = int(len(setting_df) * (1-last_x_percent))
scores = {}
for metric in metrics:
if metric not in setting_df.columns.values or metric in ignore_metrics:
continue
scores[metric] = setting_df[metric][start_step:].mean()
if verbose: print('Mean', metric, scores[metric])
return scores
def find_best_settings(df, metrics, verbose=True, step_limit_1agent=None,
last_x_percent=.2, run='eval', hparam_columns=None,
num_agents=1):
"""Find the hparam settings that led to the highest score on each metric."""
if hparam_columns is None:
hparam_columns = HPARAM_COLUMNS
search_hparams = [h for h in hparam_columns if h in df.columns.values]
to_remove = []
for h in search_hparams:
if h != 'xm_combined_population' and len(df[h].unique()) < 2:
to_remove.append(h)
search_hparams = [h for h in search_hparams if h not in to_remove]
if verbose: print('Searching for combos of', search_hparams)
hparam_combos = df[search_hparams].drop_duplicates()
if verbose:
print('Investigating', len(hparam_combos), 'hparam settings')
scores_list = []
settings_list = []
for k, row in hparam_combos.iterrows():
row_dict = row.to_dict()
settings_list.append(row_dict)
# Check for combined population. If True the number of agents varies per
# hparam setting.
combined_population = (
'xm_combined_population' in row_dict
and row_dict['xm_combined_population']) or (
'xm_combined_population' in df.columns.values and
df['xm_combined_population'].unique()[0])
num_agents = calculate_num_agents_based_on_population(
row_dict, num_agents, combined_population, is_dict=True)
# Recompute step limit based on number of agents
if step_limit_1agent is not None:
step_limit = step_limit_1agent * num_agents
else:
step_limit = None
scores_list.append(get_score_for_setting(
df, metrics, row_dict, step_limit=step_limit,
last_x_percent=last_x_percent, run=run, verbose=False))
scores_dict = {k: [dic[k] for dic in scores_list] for k in scores_list[0]}
return scores_dict, settings_list
def find_best_setting_for_metric(df, metric, run='eval', step_limit_1agent=None,
last_x_percent=.2, num_agents=1):
"""Find the hparam setting that led to the highest score on metric."""
scores_dict, settings_list = find_best_settings(
df,
[metric],
run=run,
step_limit_1agent=step_limit_1agent,
last_x_percent=last_x_percent,
num_agents=num_agents)
scores = scores_dict[metric]
max_idx = scores.index(max(scores))
return settings_list[max_idx]
def restrict_to_best_setting_for_metric(df, metric, run='eval',
last_x_percent=.2, num_agents=1,
step_limit_1agent=None):
"""Restrict df to hparam settings with highest score on metric."""
best_setting = find_best_setting_for_metric(
df, metric, run=run, last_x_percent=last_x_percent, num_agents=num_agents,
step_limit_1agent=step_limit_1agent)
print('Found best setting', best_setting)
return restrict_to_setting(df, best_setting)
def copy_recursively(source, destination):
"""Copies a directory and its content.
Args:
source: Source directory.
destination: Destination directory.
"""
for src_dir, _, src_files in tf.io.gfile.walk(source):
dst_dir = os.path.join(destination, os.path.relpath(src_dir, source))
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
for src_file in src_files:
tf.io.gfile.copy(
os.path.join(src_dir, src_file),
os.path.join(dst_dir, src_file),
overwrite=True)
| [
"[email protected]"
] | |
dd287ab4dacb2cfc45941be3341ffa301364946e | c8781d6c35bc1a01572ae079c10ae09a48360d0d | /postprocess_tweets.py | 32e7614af149211c9c5541c6c10e55184e600ce0 | [] | no_license | molguin92/RechazoMiner | 030e718d7a7128ad3cf3c9b167e58fd353c898e8 | 3c027426876e7ed65c7f9039f5a66c8260ada746 | refs/heads/master | 2022-04-21T04:35:20.497340 | 2020-04-12T20:00:33 | 2020-04-12T20:00:33 | 250,283,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from wordcloud import WordCloud
def main():
tweets = pd.read_parquet('./tweets.parquet') \
.reset_index() \
.drop_duplicates(subset='tweet_id') \
.set_index('tweet_id')
# tweets_no_dup = tweets.drop_duplicates(subset='full_text').copy()
tweets['words'] = tweets['full_text'] \
.str.split() \
.apply(lambda x: np.array(x, dtype=np.unicode))
fig, ax = plt.subplots()
tweets['full_text'].str.len().plot(kind='hist', ax=ax, density=True)
ax.set_xlabel('Tweet length [characters]')
plt.show()
text_freqs = tweets['full_text'].value_counts()
words = np.hstack(tweets['full_text'].to_numpy())
words = str.join(' ', words)
hashtags = np.hstack(tweets['hashtags'].to_numpy())
# Generate a word cloud image
swords = set(stopwords.words(['english', 'spanish'])).union(hashtags)
swords.add('https')
swords.add('rechazo')
swords.add('rechazocrece')
swords.add('rechazotuoportunismo')
# swords.add('kramerrechazotuodio')
# swords.add('kramermiserable')
swords.add('si')
swords.add('co')
swords.add('así')
# swords.add('país')
# swords.add('apoyoamañalich')
# swords.add('apoyoalpresidente')
swords.add('ahora')
swords.add('solo')
swords.add('ud')
# swords.add('chile')
swords.add('gente')
# swords.add('chileno')
# swords.add('chilenos')
wordcloud = WordCloud(width=800, height=800, stopwords=swords) \
.generate(words)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ef94445d6b1610a2ae6a0c06860afd62b5e23786 | 9e6b91907e04a37f708b28e8ee52760662b4c761 | /py/dirbalak/run.py | 0355e2698c739d28f282edf63e736dfafbde8594 | [
"Apache-2.0"
] | permissive | shlomimatichin/dirbalak | d744e3f4fdadf920082c870107f50f9020d57af6 | 218441fe55715c0602dd41142ae6a34ddfef6b38 | refs/heads/master | 2020-12-11T03:45:23.059866 | 2015-02-26T15:40:44 | 2015-02-26T15:40:44 | 28,646,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,848 | py | import subprocess
import upseto.run
import tempfile
import shutil
import os
import select
import time
run = upseto.run.run
def runAndBeamLog(logName, command, cwd=None):
with open("/dev/null") as devNull:
popen = subprocess.Popen(
command, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
stdin=devNull, close_fds=True)
outputLines = []
TIMEOUT = 20 * 60
OVERALL_TIMEOUT = 4 * 60 * 60
before = time.time()
while True:
ready = select.select([popen.stdout], [], [], TIMEOUT)[0]
if ready == []:
outputLines.append(
"\n\n\nNo output from command '%s' for over %s seconds, timeout" % (
command, TIMEOUT))
returnCode = -1
break
if time.time() - before > OVERALL_TIMEOUT:
outputLines.append(
"\n\n\nCommand '%s' is taking over %s seconds, timeout" % (
command, OVERALL_TIMEOUT))
returnCode = -1
break
line = popen.stdout.readline()
if line == '':
returnCode = popen.wait()
break
outputLines.append(line)
print line,
output = "".join(outputLines)
beamLog(logName, output, returnCode)
if returnCode != 0:
raise Exception("The command '%s' failed, output:\n%s" % (command, output))
def beamLog(logName, output, returnCode):
dir = tempfile.mkdtemp()
try:
logFilename = os.path.join(dir, logName + ".log.txt")
with open(logFilename, "w") as f:
f.write(output)
f.write("\nRETURN_CODE %d" % returnCode)
run(["logbeam", "upload", logFilename])
finally:
shutil.rmtree(dir, ignore_errors=True)
def beamLogsDir(under, path):
run(["logbeam", "upload", path, "--under", under])
| [
"[email protected]"
] | |
3d896af960b5e3f6493dfe0bd2f44dcb1c5b85db | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /keras/keras54_conv1d_01_lstm.py | 46786f3cdafe9a0d5880bfff0ec509b09b860a2d | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | # conv1d
# LSTM과 비교
import numpy as np
# 1. 데이터
x = np.array([[1,2,3] ,[2,3,4], [3,4,5], [4,5,6],
[5,6,7], [6,7,8], [7,8,9], [8,9,10],
[9,10,11], [10,11,12],
[20,30,40],[30,40,50],[40,50,60]])
y = np.array([4,5,6,7,8,9,10,11,12,13,50,60,70])
x_pred = np.array([50,60,70])
print("x.shape : ", x.shape) #(13, 3)
print("y.shape : ", y.shape) #(13,)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size = 0.8, random_state = 66)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],1)
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],1)
#2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Conv1D
model = Sequential()
model.add(Conv1D(128, 2, input_shape=(3,1)))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
model.summary()
# 3. 컴파일, 훈련
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['mae'])
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint
modelpath = '../data/modelcheckpoint/k54_1_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
early_stopping = EarlyStopping(monitor = 'loss', patience=20, mode='min')
model.fit(x_train, y_train, batch_size = 8, callbacks=[early_stopping, cp], epochs=1000, validation_split=0.2)
# 4. 평가 예측
loss,mae = model.evaluate(x_test, y_test, batch_size=8)
print("loss, mae : ", loss, mae)
x_pred = x_pred.reshape(1,3,1)
y_predict = model.predict(x_pred)
# lstm
# loss : 0.03733702003955841
#conv1d
# loss, mae : 0.38369080424308777 0.5462395548820496
'''
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D,Dense,Flatten
model = Sequential()
model.add(Conv1D(filters = 10, kernel_size = 2,strides=1,
padding='same',input_shape=(10,1)))
model.add(Conv1D(9,2))
model.add(Flatten())
model.add(Dense(1))
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d (Conv1D) (None, 10, 10) 30
_________________________________________________________________
conv1d_1 (Conv1D) (None, 9, 9) 189
_________________________________________________________________
flatten (Flatten) (None, 81) 0
_________________________________________________________________
dense (Dense) (None, 1) 82
=================================================================
Total params: 301
Trainable params: 301
Non-trainable params: 0
number_parameters = out_channels * (in_channels * kernel + 1)
conv1d = 10*(1*2 +1) = 30
conv2d = 9*(10*2 +1) = 189
'''
| [
"[email protected]"
] | |
8ddc1bb8f4a20ecdb528058fedc4bcd5962a64de | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/asyncio_coroutine_return.py | 4232ff84185f4cccf3562bcb97cb4daeb5f66d4f | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # asyncio_coroutine_return.py
import asyncio
async def coroutine():
print('nella coroutine')
return 'risultato'
event_loop = asyncio.get_event_loop()
try:
return_value = event_loop.run_until_complete(
coroutine()
)
print('ritornato: {!r}'.format(return_value))
finally:
event_loop.close()
| [
"[email protected]"
] | |
2951f9918b65ff8723f67976693c4de1f72c851d | 48bb4a0dbb361a67b88b7c7532deee24d70aa56a | /codekata/persquare.py | 0d0aef46cce120f2115cd0d2263aef327619c712 | [] | no_license | PRAMILARASI/GUVI | 66080a80400888263d511138cb6ecd37540507c7 | 6a30a1d0a3f4a777db895f0b3adc8b0ac90fd25b | refs/heads/master | 2022-01-28T08:54:07.719735 | 2019-06-24T15:57:05 | 2019-06-24T15:57:05 | 191,355,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | l,r=map(int,input().split())
l1=[]
cg=0
for i in range(1,r+1):
sq=i*i
if(sq<=r):
l1.append(i*i)
else:
break
for j in range(l,r+1):
if(j in l1):
cg+=1
print(cg)
| [
"[email protected]"
] | |
29c7191e1f0bb964d65ea7d49d125e5c1361171f | 2aba62d66c2c622bdc148cef451da76cae5fd76c | /exercise/learn_python_dm2039/ch17/ch17_21.py | 2f28cbe564dd9eb87e19a2b997177f787f6af96d | [] | no_license | NTUT-109AB8011/crawler | 6a76de2ab1848ebc8365e071e76c08ca7348be62 | a703ec741b48d3af615a757fed7607b1f8eb66a6 | refs/heads/master | 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # ch17_21.py
from PIL import Image, ImageDraw
newImage = Image.new('RGBA', (300, 300), 'Yellow') # 建立300*300黃色底的影像
drawObj = ImageDraw.Draw(newImage)
drawObj.rectangle((0,0,299,299), outline='Black') # 影像外框線
drawObj.ellipse((30,60,130,100),outline='Black') # 左眼外框
drawObj.ellipse((65,65,95,95),fill='Blue') # 左眼
drawObj.ellipse((170,60,270,100),outline='Black') # 右眼外框
drawObj.ellipse((205,65,235,95),fill='Blue') # 右眼
drawObj.polygon([(150,120),(180,180),(120,180),(150,120)],fill='Aqua') # 鼻子
drawObj.rectangle((100,210,200,240), fill='Red') # 嘴
newImage.save("out17_21.png")
| [
"[email protected]"
] | |
83fd0483590ca890422fb68150b7d7917f2a1699 | 1b6cea605ee3ad1cac7ed39e8e56d78b41a0a9c8 | /touchdown/aws/route53/zone.py | df8463a3555b95405eb2e22bd3a323ef2ab00f98 | [
"Apache-2.0"
] | permissive | pombredanne/touchdown | 37e0847649d376ad8b087f993ef3021db37ff728 | 2213f3572c2b99cecf46dbee48d6d3f038cc442c | refs/heads/master | 2021-01-18T09:05:05.027531 | 2015-01-29T11:42:10 | 2015-01-29T11:42:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,133 | py | # Copyright 2014-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from touchdown.core.plan import Plan
from touchdown.core import argument, serializers
from ..account import BaseAccount
from ..common import Resource, SimpleDescribe, SimpleApply, SimpleDestroy
from ..vpc import VPC
from .alias_target import AliasTarget
def _normalize(dns_name):
"""
The Amazon Route53 API silently accepts 'foo.com' as a dns record, but
internally that becomes 'foo.com.'. In order to match records we need to do
the same.
"""
return dns_name.rstrip('.') + "."
class Record(Resource):
resource_name = "record"
name = argument.String(field="Name")
type = argument.String(field="Type")
values = argument.List(
field="ResourceRecords",
serializer=serializers.List(serializers.Dict(
Value=serializers.Identity(),
), skip_empty=True)
)
ttl = argument.Integer(min=0, field="TTL")
set_identifier = argument.Integer(min=1, max=128, field="SetIdentifier")
alias = argument.Resource(
AliasTarget,
field="AliasTarget",
serializer=serializers.Resource(),
)
def clean_name(self, name):
return _normalize(name)
#weight = argument.Integer(min=1, max=255, field="Weight")
#region = argument.String(field="Region")
#geo_location = argument.String(field="GeoLocation")
#failover = argument.String(choices=["PRIMARY", "SECONDARY"], field="Failover")
#alias_target = argument.Resource(field="AliasTarget")
#health_check = argument.Resource(field="HealthCheckId")
class HostedZone(Resource):
""" A DNS zone hosted at Amazon Route53 """
resource_name = "hosted_zone"
extra_serializers = {
"CallerReference": serializers.Expression(lambda x, y: str(uuid.uuid4())),
}
name = argument.String(field="Name")
vpc = argument.Resource(VPC, field="VPC")
comment = argument.String(
field="HostedZoneConfig",
serializer=serializers.Dict(
Comment=serializers.Identity(),
),
)
records = argument.ResourceList(Record)
shared = argument.Boolean()
""" If a hosted zone is shared then it won't be destroyed and DNS records will never be deleted """
account = argument.Resource(BaseAccount)
def clean_name(self, name):
return _normalize(name)
class Describe(SimpleDescribe, Plan):
resource = HostedZone
service_name = 'route53'
describe_action = "list_hosted_zones"
describe_list_key = "HostedZone"
singular = "HostedZone"
key = 'Id'
def describe_object(self):
zone_name = self.resource.name.rstrip(".") + "."
paginator = self.client.get_paginator("list_hosted_zones")
for page in paginator.paginate():
for zone in page['HostedZones']:
if zone['Name'] == zone_name:
return zone
class Apply(SimpleApply, Describe):
create_action = "create_hosted_zone"
create_response = "not-that-useful"
# update_action = "update_hosted_zone_comment"
def update_object(self):
changes = []
description = ["Update hosted zone records"]
# Retrieve all DNS records associated with this hosted zone
# Ignore SOA and NS records for the top level domain
remote_records = []
if self.resource_id:
for record in self.client.list_resource_record_sets(HostedZoneId=self.resource_id)['ResourceRecordSets']:
if record['Type'] in ('SOA', 'NS') and record['Name'] == self.resource.name:
continue
remote_records.append(record)
for local in self.resource.records:
for remote in remote_records:
if local.matches(self.runner, remote):
break
else:
changes.append(serializers.Dict(
Action="UPSERT",
ResourceRecordSet=serializers.Context(serializers.Const(local), serializers.Resource()),
))
description.append("Name => {}, Type={}, Action=UPSERT".format(local.name, local.type))
if not self.resource.shared:
for remote in remote_records:
for local in self.resource.records:
if remote["Name"] != local.name:
continue
if remote["Type"] != local.type:
continue
if remote.get("SetIdentifier", None) != local.set_identifier:
continue
break
else:
changes.append(serializers.Const({"Action": "DELETE", "ResourceRecordSet": record}))
description.append("Name => {}, Type={}, Action=DELETE".format(record["Name"], record["Type"]))
if changes:
yield self.generic_action(
description,
self.client.change_resource_record_sets,
serializers.Dict(
HostedZoneId=serializers.Identifier(),
ChangeBatch=serializers.Dict(
#Comment="",
Changes=serializers.Context(serializers.Const(changes), serializers.List(serializers.SubSerializer())),
)
),
)
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_hosted_zone"
def destroy_object(self):
if not self.resource.shared:
for action in super(Destroy, self).destroy_object():
yield action
| [
"[email protected]"
] | |
9c58da8f6f2ca4fe76c5acddd34f0f70ccf4e23d | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_47340.py | 91cb79d70b4583e53b6a388ec9065caeaeae9000 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,850 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((478.695, 491.618, 534.974), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((456.448, 443.265, 575.319), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((427.058, 375.334, 612.354), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((499.981, 362.186, 493.433), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((338.529, 244.809, 727.359), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((453.438, 458.283, 555.029), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((453.387, 459.104, 554.028), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((426.461, 462.169, 547.509), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((399.717, 462.187, 540.416), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((373.353, 460.189, 531.792), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((360.982, 463.532, 506.908), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((384.883, 469.211, 493.136), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((469.191, 481.848, 558.095), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((298.112, 462.255, 432.399), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((259.758, 343.869, 591.668), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((259.758, 343.869, 591.668), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((280.931, 363.467, 589.883), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((299.619, 385.281, 586.577), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((318.979, 406.504, 584.189), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((343.244, 422.039, 583.818), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((370.483, 431.076, 585.055), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((398.877, 434.946, 583.834), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((205.981, 333.036, 445.315), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((598.405, 534.572, 716.502), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((403.357, 414.699, 621.968), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((403.357, 414.699, 621.968), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((405.467, 397.525, 598.311), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((419.846, 379.331, 580.485), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((444.556, 364.461, 585.527), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((513.337, 446.467, 521.88), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((386.052, 276.451, 653.213), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((473.752, 433.46, 555.492), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((473.873, 433.324, 555.305), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((492.013, 424.605, 535.03), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((478.084, 443.879, 519.789), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((459.443, 463.602, 512.271), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((432.054, 470.246, 512.889), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((407.621, 484.291, 515.604), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((379.964, 490.416, 517.756), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((404.183, 491.253, 600.111), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((353.742, 484.147, 434.656), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((435.58, 469.772, 626.287), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((441.849, 448.888, 615.107), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((458.356, 401.83, 587.61), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((475.765, 353.437, 559.115), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((544.667, 395.707, 545.915), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((449.222, 255.85, 531.598), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((483.333, 396.597, 511.448), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((478.082, 392.75, 538.905), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((479.678, 386.273, 566.407), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((476.959, 373.686, 591.93), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((467.671, 371.983, 618.907), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((459.332, 369.701, 646.283), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((462.48, 436.145, 600.066), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((454.734, 305.751, 697.003), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
4a6e8d053a94706c74d944c4a9ad04c88fc8fefd | 350db570521d3fc43f07df645addb9d6e648c17e | /1352_Product_of_the_Last_K_Numbers/solution.py | 5c9cce8733e553c8d05006544a5caa5ba5d53971 | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | '''
1352. Product of the Last K Numbers
Level: Medium
https://leetcode.com/problems/product-of-the-last-k-numbers
'''
'''
Solution:
'''
class ProductOfNumbers:
def __init__(self):
def add(self, num: int) -> None:
def getProduct(self, k: int) -> int:
# Your ProductOfNumbers object will be instantiated and called as such:
# obj = ProductOfNumbers()
# obj.add(num)
# param_2 = obj.getProduct(k) | [
"[email protected]"
] | |
b373db01d970b6ce88143768f0517f0be7b4a386 | 5f6112f58e4e570a99e58ac0424b36a0fb7da744 | /preps/apps/sports/track/admin.py | 0f91fc904b1abfaa5ef3415e96d14c9cb520bad5 | [] | no_license | huyenme/django-preps | 335915f2d6c68cde0410bccbabaeee48f3ccc39a | fede0d245cbcff5781b6a4bf1cecc0477897d9e4 | refs/heads/master | 2020-05-02T22:52:46.779618 | 2011-07-05T21:10:43 | 2011-07-05T21:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | from django.contrib import admin
from preps.apps.sports.track.models import Meet
class MeetAdmin(admin.ModelAdmin):
fieldsets = (
('Basic information', {
'fields': (('meet_type', 'meet_date_time'), ('teams', 'season'))
}),
('Administration', {
'fields': (('featured_meet', 'conference_meet'),)
}),
('Status', {
'fields': ('status', 'status_description')
}),
('Location', {
'fields': ('meet_location', 'meet_location_address', 'meet_location_description'),
'classes': ('collapse',),
}),
('Summary', {
'fields': ('meet_result_headline', 'meet_result_summary'),
'classes': ('scoreboard', 'collapse'),
})
)
admin.site.register(Meet, MeetAdmin) | [
"[email protected]"
] | |
df974a2a8eb8076abb44ee0af07e0366ee280f16 | f85fb8ea9df44a3ebb19430d8fe248be7d738413 | /tests/test_plotter.py | eab925f85900d74d67c471aec580a5352247c590 | [
"MIT"
] | permissive | Samreay/ChainConsumer | 9ab425fd31a0853f5be92bf2a266ece59a1a2d01 | 888921942789f7c7a817c7a3cee3e7e5da3a26bf | refs/heads/master | 2022-09-16T16:52:06.175389 | 2022-09-04T02:48:44 | 2022-09-04T02:48:44 | 63,556,220 | 74 | 20 | MIT | 2018-09-06T00:34:00 | 2016-07-17T23:01:21 | Python | UTF-8 | Python | false | false | 2,856 | py | import numpy as np
from scipy.stats import norm
from chainconsumer import ChainConsumer
class TestChain(object):
np.random.seed(1)
n = 2000000
data = np.random.normal(loc=5.0, scale=1.5, size=n)
data2 = np.random.normal(loc=3, scale=1.0, size=n)
def test_plotter_extents1(self):
c = ChainConsumer()
c.add_chain(self.data, parameters=["x"])
c.configure()
minv, maxv = c.plotter._get_parameter_extents("x", c.chains)
assert np.isclose(minv, (5.0 - 1.5 * 3.7), atol=0.2)
assert np.isclose(maxv, (5.0 + 1.5 * 3.7), atol=0.2)
def test_plotter_extents2(self):
c = ChainConsumer()
c.add_chain(self.data, parameters=["x"])
c.add_chain(self.data + 5, parameters=["y"])
c.configure()
minv, maxv = c.plotter._get_parameter_extents("x", c.chains)
assert np.isclose(minv, (5.0 - 1.5 * 3.7), atol=0.2)
assert np.isclose(maxv, (5.0 + 1.5 * 3.7), atol=0.2)
def test_plotter_extents3(self):
c = ChainConsumer()
c.add_chain(self.data, parameters=["x"])
c.add_chain(self.data + 5, parameters=["x"])
c.configure()
minv, maxv = c.plotter._get_parameter_extents("x", c.chains)
assert np.isclose(minv, (5.0 - 1.5 * 3.7), atol=0.2)
assert np.isclose(maxv, (10.0 + 1.5 * 3.7), atol=0.2)
def test_plotter_extents4(self):
c = ChainConsumer()
c.add_chain(self.data, parameters=["x"])
c.add_chain(self.data + 5, parameters=["y"])
c.configure()
minv, maxv = c.plotter._get_parameter_extents("x", c.chains[:1])
assert np.isclose(minv, (5.0 - 1.5 * 3.7), atol=0.2)
assert np.isclose(maxv, (5.0 + 1.5 * 3.7), atol=0.2)
def test_plotter_extents5(self):
x, y = np.linspace(-3, 3, 200), np.linspace(-5, 5, 200)
xx, yy = np.meshgrid(x, y, indexing='ij')
xs, ys = xx.flatten(), yy.flatten()
chain = np.vstack((xs, ys)).T
pdf = (1 / (2 * np.pi)) * np.exp(-0.5 * (xs * xs + ys * ys / 4))
c = ChainConsumer()
c.add_chain(chain, parameters=['x', 'y'], weights=pdf, grid=True)
c.configure()
minv, maxv = c.plotter._get_parameter_extents("x", c.chains)
assert np.isclose(minv, -3, atol=0.001)
assert np.isclose(maxv, 3, atol=0.001)
def test_plotter_extents6(self):
c = ChainConsumer()
for mid in np.linspace(-1, 1, 3):
data = np.random.normal(loc=0, size=1000)
posterior = norm.logpdf(data)
data += mid
c.add_chain(data, parameters=['x'], posterior=posterior, plot_point=True, plot_contour=False)
c.configure()
minv, maxv = c.plotter._get_parameter_extents("x", c.chains)
assert np.isclose(minv, -1, atol=0.01)
assert np.isclose(maxv, 1, atol=0.01)
| [
"[email protected]"
] | |
9986a1659cfdc8dc4bd6a6862039eead3c2b5e84 | 30e21273013b228f5421a5d685660cc370c92d7c | /dgfont.py | 2e37de03891875ec73a74d5581f477352fd09cf6 | [] | no_license | hwinther/homeautomation | 0aecccae16995f53b14d0de07eb6d61cc91a52fc | 0ee1f97238bdd5c2c7367a75f4376f2d662c4b9c | refs/heads/master | 2021-01-10T10:11:06.803852 | 2018-10-22T22:06:36 | 2018-10-22T22:06:36 | 44,271,892 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | py | #!/usr/bin/python
# coding=utf-8
from datetime import datetime
letters = {
'0': [
[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
],
'1': [
[0, 0, 1, 1],
[0, 1, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
],
'2': [
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 0, 1, 1],
[0, 1, 0, 0],
[1, 1, 1, 1],
],
'3': [
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1],
],
'4': [
[0, 0, 1, 1],
[0, 1, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1],
[0, 0, 0, 1],
],
'5': [
[1, 1, 1, 1],
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 1, 0],
],
'6': [
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 1, 1, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
],
'7': [
[1, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
],
'8': [
[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 1, 1, 1],
],
'9': [
[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 1, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
],
':': [
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
],
' ': [ # empty, sample
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
],
}
def displayLetter(matrix, letter, x_rel, y_rel, r, g, b):
"""Display letter with RBGmatrix instance, starting at x_rel/y_rel and using colors r, g, b
Returns new relative position that is clear off the letter"""
# print 'displaying letter ' + letter + ' at ' + str(x_rel) + ', ' + str(y_rel)
x = 0
y = y_rel
firstRun = True
iteration = 0
for rows in letters[letter]:
x = x_rel
for col in rows:
# print col
if col == 1:
matrix.SetPixel(x, y, r, g, b)
x += 1
y += 1
return x, y
def displayText(matrix, text, x_rel, y_rel, r, g, b):
"""Display series of letters with RGBmatrix instance, starting at x_rel/y_rel and using colors r, g, b
Returns new relative position that is clear off the text"""
x = x_rel
y = y_rel
for letter in text:
x, y = displayLetter(matrix, letter, x, y, r, g, b)
y = y_rel # one line
x += 1
return x, y_rel + 6 # last is y, calculate one line height with 1 pixel spacer
def displayCurrentTime(matrix, x_rel, y_rel, r, g, b):
"""Displays current hour and minute with RBGmatrix instance at x_rel/y_rel and using colors r, g, b"""
dtime = datetime.now().strftime('%H:%M')
y = y_rel
x, y = displayText(matrix, dtime, x_rel, y, r, g, b)
# print datetime.now(), rnd, x, y
| [
"[email protected]"
] | |
c8f6df806cbd7433aff54e08bcc8bfbd20c702d0 | 8ea15bb41fa672a8abd5cbc6e4c3413a910e5fb4 | /api/admin.py | fdc895cff91355bb421614c370acdbf99f297eca | [] | no_license | bluedazzle/smart_screen | 172bb1672bd9995e27889700a7320c0d9e207abe | f4d40807e7c6684ae35d8c1c7f386ae2ff8d8925 | refs/heads/master | 2021-07-04T07:30:50.807052 | 2019-01-23T06:47:56 | 2019-01-23T06:47:56 | 111,908,525 | 1 | 0 | null | 2019-05-16T03:30:51 | 2017-11-24T10:56:46 | Python | UTF-8 | Python | false | false | 812 | py | from django.contrib import admin
from models import *
# Register your models here.
class GoodsInventoryAdmin(admin.ModelAdmin):
search_fields = ['barcode']
class GoodsAdmin(admin.ModelAdmin):
search_fields = ['belong__id']
admin.site.register(FuelOrder, GoodsAdmin)
admin.site.register(FuelTank)
admin.site.register(Site)
admin.site.register(GoodsOrder, GoodsAdmin)
admin.site.register(InventoryRecord)
admin.site.register(Classification)
admin.site.register(SecondClassification)
admin.site.register(ThirdClassification)
admin.site.register(Supplier)
admin.site.register(Receiver)
admin.site.register(DeliveryRecord)
admin.site.register(FuelPlan)
admin.site.register(CardRecord, GoodsAdmin)
admin.site.register(AbnormalRecord, GoodsAdmin)
admin.site.register(GoodsInventory, GoodsInventoryAdmin)
| [
"[email protected]"
] | |
67c88c5642b885e5c3ffa1ab317224f11a47d97d | a26a3605f8055a28083107b98800d72bfcc11fd9 | /Koncreti/Koncreti/middlewares.py | efea3c768875aa07a437ec6da0bd57857a844b6a | [] | no_license | huzaifabaloch/Modern-Web-Scraping | 521c3bd9302c3be0743a16cb65a02bf5a6666d9e | 021c5f987d7c150118340ccbf986be51ecdca349 | refs/heads/master | 2020-09-26T23:40:46.306998 | 2020-06-30T17:10:49 | 2020-06-30T17:10:49 | 226,367,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,601 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class KoncretiSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class KoncretiDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
b14b1b13fd24073c9ebde57c05e4f234b0ac0da4 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/heatmapgl/colorbar/tickfont/_family.py | ba495312ec633e48e44034f6156ee76e0784f0c1 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 525 | py | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="heatmapgl.colorbar.tickfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
| [
"[email protected]"
] | |
ac409fba8612780ecc144aa3fc94607609a6c797 | f8b5aafac15f408a48fabf853a918015c927e6fe | /backup/virtualenv/venv27/lib/python2.7/site-packages/ansible/modules/extras/cloud/lxc/lxc_container.py | 518038124eb6cca7eeb78adc95a6b3a7b688fbb6 | [] | no_license | to30/tmp | bda1ac0ca3fc61e96c2a1c491367b698d7e97937 | ec809683970af6787728c2c41f161f416155982a | refs/heads/master | 2021-01-01T04:25:52.040770 | 2016-05-13T16:34:59 | 2016-05-13T16:34:59 | 58,756,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 55,215 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kevin Carter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: lxc_container
short_description: Manage LXC Containers
version_added: 1.8.0
description:
- Management of LXC containers
author: "Kevin Carter (@cloudnull)"
options:
name:
description:
- Name of a container.
required: true
backing_store:
choices:
- dir
- lvm
- loop
- btrfs
- overlayfs
- zfs
description:
- Backend storage type for the container.
required: false
default: dir
template:
description:
- Name of the template to use within an LXC create.
required: false
default: ubuntu
template_options:
description:
- Template options when building the container.
required: false
config:
description:
- Path to the LXC configuration file.
required: false
default: null
lv_name:
description:
- Name of the logical volume, defaults to the container name.
default: $CONTAINER_NAME
required: false
vg_name:
description:
- If Backend store is lvm, specify the name of the volume group.
default: lxc
required: false
thinpool:
description:
- Use LVM thin pool called TP.
required: false
fs_type:
description:
- Create fstype TYPE.
default: ext4
required: false
fs_size:
description:
- File system Size.
default: 5G
required: false
directory:
description:
- Place rootfs directory under DIR.
required: false
zfs_root:
description:
- Create zfs under given zfsroot.
required: false
container_command:
description:
- Run a command within a container.
required: false
lxc_path:
description:
- Place container under PATH
required: false
container_log:
choices:
- true
- false
description:
- Enable a container log for host actions to the container.
default: false
container_log_level:
choices:
- INFO
- ERROR
- DEBUG
description:
- Set the log level for a container where *container_log* was set.
required: false
default: INFO
clone_name:
version_added: "2.0"
description:
- Name of the new cloned server. This is only used when state is
clone.
required: false
default: false
clone_snapshot:
version_added: "2.0"
required: false
choices:
- true
- false
description:
- Create a snapshot a container when cloning. This is not supported
by all container storage backends. Enabling this may fail if the
backing store does not support snapshots.
default: false
archive:
choices:
- true
- false
description:
- Create an archive of a container. This will create a tarball of the
running container.
default: false
archive_path:
description:
- Path the save the archived container. If the path does not exist
the archive method will attempt to create it.
default: null
archive_compression:
choices:
- gzip
- bzip2
- none
description:
- Type of compression to use when creating an archive of a running
container.
default: gzip
state:
choices:
- started
- stopped
- restarted
- absent
- frozen
description:
- Define the state of a container. If you clone a container using
`clone_name` the newly cloned container created in a stopped state.
The running container will be stopped while the clone operation is
happening and upon completion of the clone the original container
state will be restored.
required: false
default: started
container_config:
description:
- list of 'key=value' options to use when configuring a container.
required: false
requirements:
- 'lxc >= 1.0 # OS package'
- 'python >= 2.6 # OS Package'
- 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
notes:
- Containers must have a unique name. If you attempt to create a container
with a name that already exists in the users namespace the module will
simply return as "unchanged".
- The "container_command" can be used with any state except "absent". If
used with state "stopped" the container will be "started", the command
executed, and then the container "stopped" again. Likewise if the state
is "stopped" and the container does not exist it will be first created,
"started", the command executed, and then "stopped". If you use a "|"
in the variable you can use common script formatting within the variable
iteself The "container_command" option will always execute as BASH.
When using "container_command" a log file is created in the /tmp/ directory
which contains both stdout and stderr of any command executed.
- If "archive" is **true** the system will attempt to create a compressed
tarball of the running container. The "archive" option supports LVM backed
containers and will create a snapshot of the running container when
creating the archive.
- If your distro does not have a package for "python2-lxc", which is a
requirement for this module, it can be installed from source at
"https://github.com/lxc/python2-lxc" or installed via pip using the package
name lxc-python2.
"""
EXAMPLES = """
- name: Create a started container
lxc_container:
name: test-container-started
container_log: true
template: ubuntu
state: started
template_options: --release trusty
- name: Create a stopped container
lxc_container:
name: test-container-stopped
container_log: true
template: ubuntu
state: stopped
template_options: --release trusty
- name: Create a frozen container
lxc_container:
name: test-container-frozen
container_log: true
template: ubuntu
state: frozen
template_options: --release trusty
container_command: |
echo 'hello world.' | tee /opt/started-frozen
# Create filesystem container, configure it, and archive it, and start it.
- name: Create filesystem container
lxc_container:
name: test-container-config
backing_store: dir
container_log: true
template: ubuntu
state: started
archive: true
archive_compression: none
container_config:
- "lxc.aa_profile=unconfined"
- "lxc.cgroup.devices.allow=a *:* rmw"
template_options: --release trusty
# Create an lvm container, run a complex command in it, add additional
# configuration to it, create an archive of it, and finally leave the container
# in a frozen state. The container archive will be compressed using bzip2
- name: Create a frozen lvm container
lxc_container:
name: test-container-lvm
container_log: true
template: ubuntu
state: frozen
backing_store: lvm
template_options: --release trusty
container_command: |
apt-get update
apt-get install -y vim lxc-dev
echo 'hello world.' | tee /opt/started
if [[ -f "/opt/started" ]]; then
echo 'hello world.' | tee /opt/found-started
fi
container_config:
- "lxc.aa_profile=unconfined"
- "lxc.cgroup.devices.allow=a *:* rmw"
archive: true
archive_compression: bzip2
register: lvm_container_info
- name: Debug info on container "test-container-lvm"
debug: var=lvm_container_info
- name: Run a command in a container and ensure its in a "stopped" state.
lxc_container:
name: test-container-started
state: stopped
container_command: |
echo 'hello world.' | tee /opt/stopped
- name: Run a command in a container and ensure its it in a "frozen" state.
lxc_container:
name: test-container-stopped
state: frozen
container_command: |
echo 'hello world.' | tee /opt/frozen
- name: Start a container
lxc_container:
name: test-container-stopped
state: started
- name: Run a command in a container and then restart it
lxc_container:
name: test-container-started
state: restarted
container_command: |
echo 'hello world.' | tee /opt/restarted
- name: Run a complex command within a "running" container
lxc_container:
name: test-container-started
container_command: |
apt-get update
apt-get install -y curl wget vim apache2
echo 'hello world.' | tee /opt/started
if [[ -f "/opt/started" ]]; then
echo 'hello world.' | tee /opt/found-started
fi
# Create an archive of an existing container, save the archive to a defined
# path and then destroy it.
- name: Archive container
lxc_container:
name: test-container-started
state: absent
archive: true
archive_path: /opt/archives
# Create a container using overlayfs, create an archive of it, create a
# snapshot clone of the container and and finally leave the container
# in a frozen state. The container archive will be compressed using gzip.
- name: Create an overlayfs container archive and clone it
lxc_container:
name: test-container-overlayfs
container_log: true
template: ubuntu
state: started
backing_store: overlayfs
template_options: --release trusty
clone_snapshot: true
clone_name: test-container-overlayfs-clone-snapshot
archive: true
archive_compression: gzip
register: clone_container_info
- name: debug info on container "test-container"
debug: var=clone_container_info
- name: Clone a container using snapshot
lxc_container:
name: test-container-overlayfs-clone-snapshot
backing_store: overlayfs
clone_name: test-container-overlayfs-clone-snapshot2
clone_snapshot: true
- name: Create a new container and clone it
lxc_container:
name: test-container-new-archive
backing_store: dir
clone_name: test-container-new-archive-clone
- name: Archive and clone a container then destroy it
lxc_container:
name: test-container-new-archive
state: absent
clone_name: test-container-new-archive-destroyed-clone
archive: true
archive_compression: gzip
- name: Start a cloned container.
lxc_container:
name: test-container-new-archive-destroyed-clone
state: started
- name: Destroy a container
lxc_container:
name: "{{ item }}"
state: absent
with_items:
- test-container-stopped
- test-container-started
- test-container-frozen
- test-container-lvm
- test-container-config
- test-container-overlayfs
- test-container-overlayfs-clone
- test-container-overlayfs-clone-snapshot
- test-container-overlayfs-clone-snapshot2
- test-container-new-archive
- test-container-new-archive-clone
- test-container-new-archive-destroyed-clone
"""
try:
import lxc
except ImportError:
HAS_LXC = False
else:
HAS_LXC = True
# LXC_COMPRESSION_MAP is a map of available compression types when creating
# an archive of a container.
LXC_COMPRESSION_MAP = {
'gzip': {
'extension': 'tar.tgz',
'argument': '-czf'
},
'bzip2': {
'extension': 'tar.bz2',
'argument': '-cjf'
},
'none': {
'extension': 'tar',
'argument': '-cf'
}
}
# LXC_COMMAND_MAP is a map of variables that are available to a method based
# on the state the container is in.
LXC_COMMAND_MAP = {
'create': {
'variables': {
'config': '--config',
'template': '--template',
'backing_store': '--bdev',
'lxc_path': '--lxcpath',
'lv_name': '--lvname',
'vg_name': '--vgname',
'thinpool': '--thinpool',
'fs_type': '--fstype',
'fs_size': '--fssize',
'directory': '--dir',
'zfs_root': '--zfsroot'
}
},
'clone': {
'variables': {
'backing_store': '--backingstore',
'lxc_path': '--lxcpath',
'fs_size': '--fssize',
'name': '--orig',
'clone_name': '--new'
}
}
}
# LXC_BACKING_STORE is a map of available storage backends and options that
# are incompatible with the given storage backend.
LXC_BACKING_STORE = {
'dir': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
],
'lvm': [
'zfs_root'
],
'btrfs': [
'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
],
'loop': [
'lv_name', 'vg_name', 'thinpool', 'zfs_root'
],
'overlayfs': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
],
'zfs': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
]
}
# LXC_LOGGING_LEVELS is a map of available log levels
LXC_LOGGING_LEVELS = {
'INFO': ['info', 'INFO', 'Info'],
'ERROR': ['error', 'ERROR', 'Error'],
'DEBUG': ['debug', 'DEBUG', 'Debug']
}
# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
# when a particular state is evoked.
LXC_ANSIBLE_STATES = {
'started': '_started',
'stopped': '_stopped',
'restarted': '_restarted',
'absent': '_destroyed',
'frozen': '_frozen',
'clone': '_clone'
}
# This is used to attach to a running container and execute commands from
# within the container on the host. This will provide local access to a
# container without using SSH. The template will attempt to work within the
# home directory of the user that was attached to the container and source
# that users environment variables by default.
ATTACH_TEMPLATE = """#!/usr/bin/env bash
pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
if [[ -f ".bashrc" ]];then
source .bashrc
fi
popd
# User defined command
%(container_command)s
"""
def create_script(command):
"""Write out a script onto a target.
This method should be backward compatible with Python 2.4+ when executing
from within the container.
:param command: command to run, this can be a script and can use spacing
with newlines as separation.
:type command: ``str``
"""
import os
import os.path as path
import subprocess
import tempfile
(fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
f = os.fdopen(fd, 'wb')
try:
f.write(ATTACH_TEMPLATE % {'container_command': command})
f.flush()
finally:
f.close()
# Ensure the script is executable.
os.chmod(script_file, 0700)
# Output log file.
stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
# Error log file.
stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
# Execute the script command.
try:
subprocess.Popen(
[script_file],
stdout=stdout_file,
stderr=stderr_file
).communicate()
finally:
# Close the log files.
stderr_file.close()
stdout_file.close()
# Remove the script file upon completion of execution.
os.remove(script_file)
class LxcContainerManagement(object):
def __init__(self, module):
"""Management of LXC containers via Ansible.
:param module: Processed Ansible Module.
:type module: ``object``
"""
self.module = module
self.state = self.module.params.get('state', None)
self.state_change = False
self.lxc_vg = None
self.container_name = self.module.params['name']
self.container = self.get_container_bind()
self.archive_info = None
self.clone_info = None
def get_container_bind(self):
return lxc.Container(name=self.container_name)
@staticmethod
def _roundup(num):
"""Return a rounded floating point number.
:param num: Number to round up.
:type: ``float``
:returns: Rounded up number.
:rtype: ``int``
"""
num, part = str(num).split('.')
num = int(num)
if int(part) != 0:
num += 1
return num
@staticmethod
def _container_exists(container_name):
"""Check if a container exists.
:param container_name: Name of the container.
:type: ``str``
:returns: True or False if the container is found.
:rtype: ``bol``
"""
if [i for i in lxc.list_containers() if i == container_name]:
return True
else:
return False
@staticmethod
def _add_variables(variables_dict, build_command):
"""Return a command list with all found options.
:param variables_dict: Pre-parsed optional variables used from a
seed command.
:type variables_dict: ``dict``
:param build_command: Command to run.
:type build_command: ``list``
:returns: list of command options.
:rtype: ``list``
"""
for key, value in variables_dict.items():
build_command.append(
'%s %s' % (key, value)
)
else:
return build_command
def _get_vars(self, variables):
"""Return a dict of all variables as found within the module.
:param variables: Hash of all variables to find.
:type variables: ``dict``
"""
# Remove incompatible storage backend options.
variables = variables.copy()
for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
variables.pop(v, None)
return_dict = dict()
false_values = [None, ''] + BOOLEANS_FALSE
for k, v in variables.items():
_var = self.module.params.get(k)
if _var not in false_values:
return_dict[v] = _var
else:
return return_dict
def _run_command(self, build_command, unsafe_shell=False, timeout=600):
"""Return information from running an Ansible Command.
This will squash the build command list into a string and then
execute the command via Ansible. The output is returned to the method.
This output is returned as `return_code`, `stdout`, `stderr`.
Prior to running the command the method will look to see if the LXC
lockfile is present. If the lockfile "/var/lock/subsys/lxc" the method
will wait upto 10 minutes for it to be gone; polling every 5 seconds.
:param build_command: Used for the command and all options.
:type build_command: ``list``
:param unsafe_shell: Enable or Disable unsafe sell commands.
:type unsafe_shell: ``bol``
:param timeout: Time before the container create process quites.
:type timeout: ``int``
"""
lockfile = '/var/lock/subsys/lxc'
for _ in xrange(timeout):
if os.path.exists(lockfile):
time.sleep(1)
else:
return self.module.run_command(
' '.join(build_command),
use_unsafe_shell=unsafe_shell
)
else:
message = (
'The LXC subsystem is locked and after 5 minutes it never'
' became unlocked. Lockfile [ %s ]' % lockfile
)
self.failure(
error='LXC subsystem locked',
rc=0,
msg=message
)
def _config(self):
"""Configure an LXC container.
Write new configuration values to the lxc config file. This will
stop the container if it's running write the new options and then
restart the container upon completion.
"""
_container_config = self.module.params.get('container_config')
if not _container_config:
return False
container_config_file = self.container.config_file_name
with open(container_config_file, 'rb') as f:
container_config = f.readlines()
# Note used ast literal_eval because AnsibleModule does not provide for
# adequate dictionary parsing.
# Issue: https://github.com/ansible/ansible/issues/7679
# TODO(cloudnull) adjust import when issue has been resolved.
import ast
options_dict = ast.literal_eval(_container_config)
parsed_options = [i.split('=', 1) for i in options_dict]
config_change = False
for key, value in parsed_options:
new_entry = '%s = %s\n' % (key, value)
for option_line in container_config:
# Look for key in config
if option_line.startswith(key):
_, _value = option_line.split('=', 1)
config_value = ' '.join(_value.split())
line_index = container_config.index(option_line)
# If the sanitized values don't match replace them
if value != config_value:
line_index += 1
if new_entry not in container_config:
config_change = True
container_config.insert(line_index, new_entry)
# Break the flow as values are written or not at this point
break
else:
config_change = True
container_config.append(new_entry)
# If the config changed restart the container.
if config_change:
container_state = self._get_state()
if container_state != 'stopped':
self.container.stop()
with open(container_config_file, 'wb') as f:
f.writelines(container_config)
self.state_change = True
if container_state == 'running':
self._container_startup()
elif container_state == 'frozen':
self._container_startup()
self.container.freeze()
def _container_create_clone(self):
"""Clone a new LXC container from an existing container.
This method will clone an existing container to a new container using
the `clone_name` variable as the new container name. The method will
create a container if the container `name` does not exist.
Note that cloning a container will ensure that the original container
is "stopped" before the clone can be done. Because this operation can
require a state change the method will return the original container
to its prior state upon completion of the clone.
Once the clone is complete the new container will be left in a stopped
state.
"""
# Ensure that the state of the original container is stopped
container_state = self._get_state()
if container_state != 'stopped':
self.state_change = True
self.container.stop()
build_command = [
self.module.get_bin_path('lxc-clone', True),
]
build_command = self._add_variables(
variables_dict=self._get_vars(
variables=LXC_COMMAND_MAP['clone']['variables']
),
build_command=build_command
)
# Load logging for the instance when creating it.
if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
build_command.append('--snapshot')
# Check for backing_store == overlayfs if so force the use of snapshot
# If overlay fs is used and snapshot is unset the clone command will
# fail with an unsupported type.
elif self.module.params.get('backing_store') == 'overlayfs':
build_command.append('--snapshot')
rc, return_data, err = self._run_command(build_command)
if rc != 0:
message = "Failed executing lxc-clone."
self.failure(
err=err, rc=rc, msg=message, command=' '.join(
build_command
)
)
else:
self.state_change = True
# Restore the original state of the origin container if it was
# not in a stopped state.
if container_state == 'running':
self.container.start()
elif container_state == 'frozen':
self.container.start()
self.container.freeze()
return True
def _create(self):
"""Create a new LXC container.
This method will build and execute a shell command to build the
container. It would have been nice to simply use the lxc python library
however at the time this was written the python library, in both py2
and py3 didn't support some of the more advanced container create
processes. These missing processes mainly revolve around backing
LXC containers with block devices.
"""
build_command = [
self.module.get_bin_path('lxc-create', True),
'--name %s' % self.container_name,
'--quiet'
]
build_command = self._add_variables(
variables_dict=self._get_vars(
variables=LXC_COMMAND_MAP['create']['variables']
),
build_command=build_command
)
# Load logging for the instance when creating it.
if self.module.params.get('container_log') in BOOLEANS_TRUE:
# Set the logging path to the /var/log/lxc if uid is root. else
# set it to the home folder of the user executing.
try:
if os.getuid() != 0:
log_path = os.getenv('HOME')
else:
if not os.path.isdir('/var/log/lxc/'):
os.makedirs('/var/log/lxc/')
log_path = '/var/log/lxc/'
except OSError:
log_path = os.getenv('HOME')
build_command.extend([
'--logfile %s' % os.path.join(
log_path, 'lxc-%s.log' % self.container_name
),
'--logpriority %s' % self.module.params.get(
'container_log_level'
).upper()
])
# Add the template commands to the end of the command if there are any
template_options = self.module.params.get('template_options', None)
if template_options:
build_command.append('-- %s' % template_options)
rc, return_data, err = self._run_command(build_command)
if rc != 0:
message = "Failed executing lxc-create."
self.failure(
err=err, rc=rc, msg=message, command=' '.join(build_command)
)
else:
self.state_change = True
def _container_data(self):
"""Returns a dict of container information.
:returns: container data
:rtype: ``dict``
"""
return {
'interfaces': self.container.get_interfaces(),
'ips': self.container.get_ips(),
'state': self._get_state(),
'init_pid': int(self.container.init_pid)
}
def _unfreeze(self):
"""Unfreeze a container.
:returns: True or False based on if the container was unfrozen.
:rtype: ``bol``
"""
unfreeze = self.container.unfreeze()
if unfreeze:
self.state_change = True
return unfreeze
def _get_state(self):
"""Return the state of a container.
If the container is not found the state returned is "absent"
:returns: state of a container as a lower case string.
:rtype: ``str``
"""
if self._container_exists(container_name=self.container_name):
return str(self.container.state).lower()
else:
return str('absent')
def _execute_command(self):
"""Execute a shell command."""
container_command = self.module.params.get('container_command')
if container_command:
container_state = self._get_state()
if container_state == 'frozen':
self._unfreeze()
elif container_state == 'stopped':
self._container_startup()
self.container.attach_wait(create_script, container_command)
self.state_change = True
def _container_startup(self, timeout=60):
"""Ensure a container is started.
:param timeout: Time before the destroy operation is abandoned.
:type timeout: ``int``
"""
self.container = self.get_container_bind()
for _ in xrange(timeout):
if self._get_state() != 'running':
self.container.start()
self.state_change = True
# post startup sleep for 1 second.
time.sleep(1)
else:
return True
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to start. Check to lxc is'
' available and that the container is in a functional'
' state.' % self.container_name
)
def _check_archive(self):
"""Create a compressed archive of a container.
This will store archive_info in as self.archive_info
"""
if self.module.params.get('archive') in BOOLEANS_TRUE:
self.archive_info = {
'archive': self._container_create_tar()
}
def _check_clone(self):
"""Create a compressed archive of a container.
This will store archive_info in as self.archive_info
"""
clone_name = self.module.params.get('clone_name')
if clone_name:
if not self._container_exists(container_name=clone_name):
self.clone_info = {
'cloned': self._container_create_clone()
}
else:
self.clone_info = {
'cloned': False
}
def _destroyed(self, timeout=60):
"""Ensure a container is destroyed.
:param timeout: Time before the destroy operation is abandoned.
:type timeout: ``int``
"""
for _ in xrange(timeout):
if not self._container_exists(container_name=self.container_name):
break
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
if self._get_state() != 'stopped':
self.state_change = True
self.container.stop()
if self.container.destroy():
self.state_change = True
# post destroy attempt sleep for 1 second.
time.sleep(1)
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to destroy container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to be destroyed. Check'
' that lxc is available and that the container is in a'
' functional state.' % self.container_name
)
def _frozen(self, count=0):
"""Ensure a container is frozen.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='frozen')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
container_state = self._get_state()
if container_state == 'frozen':
pass
elif container_state == 'running':
self.container.freeze()
self.state_change = True
else:
self._container_startup()
self.container.freeze()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._frozen(count)
def _restarted(self, count=0):
"""Ensure a container is restarted.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='restart')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
if self._get_state() != 'stopped':
self.container.stop()
self.state_change = True
# Run container startup
self._container_startup()
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._restarted(count)
def _stopped(self, count=0):
"""Ensure a container is stopped.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='stop')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
if self._get_state() != 'stopped':
self.container.stop()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._stopped(count)
def _started(self, count=0):
"""Ensure a container is started.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='start')
if self._container_exists(container_name=self.container_name):
container_state = self._get_state()
if container_state == 'running':
pass
elif container_state == 'frozen':
self._unfreeze()
elif not self._container_startup():
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to start. Check to lxc is'
' available and that the container is in a functional'
' state.' % self.container_name
)
# Return data
self._execute_command()
# Perform any configuration updates
self._config()
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._started(count)
def _get_lxc_vg(self):
"""Return the name of the Volume Group used in LXC."""
build_command = [
self.module.get_bin_path('lxc-config', True),
"lxc.bdev.lvm.vg"
]
rc, vg, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to read LVM VG from LXC config',
command=' '.join(build_command)
)
else:
return str(vg.strip())
def _lvm_lv_list(self):
"""Return a list of all lv in a current vg."""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('lvs', True)
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to get list of LVs',
command=' '.join(build_command)
)
all_lvms = [i.split() for i in stdout.splitlines()][1:]
return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
def _get_vg_free_pe(self, vg_name):
"""Return the available size of a given VG.
:param vg_name: Name of volume.
:type vg_name: ``str``
:returns: size and measurement of an LV
:type: ``tuple``
"""
build_command = [
'vgdisplay',
vg_name,
'--units',
'g'
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to read vg %s' % vg_name,
command=' '.join(build_command)
)
vg_info = [i.strip() for i in stdout.splitlines()][1:]
free_pe = [i for i in vg_info if i.startswith('Free')]
_free_pe = free_pe[0].split()
return float(_free_pe[-2]), _free_pe[-1]
def _get_lv_size(self, lv_name):
"""Return the available size of a given LV.
:param lv_name: Name of volume.
:type lv_name: ``str``
:returns: size and measurement of an LV
:type: ``tuple``
"""
vg = self._get_lxc_vg()
lv = os.path.join(vg, lv_name)
build_command = [
'lvdisplay',
lv,
'--units',
'g'
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to read lv %s' % lv,
command=' '.join(build_command)
)
lv_info = [i.strip() for i in stdout.splitlines()][1:]
_free_pe = [i for i in lv_info if i.startswith('LV Size')]
free_pe = _free_pe[0].split()
return self._roundup(float(free_pe[-2])), free_pe[-1]
def _lvm_snapshot_create(self, source_lv, snapshot_name,
snapshot_size_gb=5):
"""Create an LVM snapshot.
:param source_lv: Name of lv to snapshot
:type source_lv: ``str``
:param snapshot_name: Name of lv snapshot
:type snapshot_name: ``str``
:param snapshot_size_gb: Size of snapshot to create
:type snapshot_size_gb: ``int``
"""
vg = self._get_lxc_vg()
free_space, messurement = self._get_vg_free_pe(vg_name=vg)
if free_space < float(snapshot_size_gb):
message = (
'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
' [ %s ]' % (snapshot_size_gb, free_space, vg)
)
self.failure(
error='Not enough space to create snapshot',
rc=2,
msg=message
)
# Create LVM Snapshot
build_command = [
self.module.get_bin_path('lvcreate', True),
"-n",
snapshot_name,
"-s",
os.path.join(vg, source_lv),
"-L%sg" % snapshot_size_gb
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to Create LVM snapshot %s/%s --> %s'
% (vg, source_lv, snapshot_name)
)
def _lvm_lv_mount(self, lv_name, mount_point):
"""mount an lv.
:param lv_name: name of the logical volume to mount
:type lv_name: ``str``
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('mount', True),
"/dev/%s/%s" % (vg, lv_name),
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to mountlvm lv %s/%s to %s'
% (vg, lv_name, mount_point)
)
def _create_tar(self, source_dir):
"""Create an archive of a given ``source_dir`` to ``output_path``.
:param source_dir: Path to the directory to be archived.
:type source_dir: ``str``
"""
old_umask = os.umask(0077)
archive_path = self.module.params.get('archive_path')
if not os.path.isdir(archive_path):
os.makedirs(archive_path)
archive_compression = self.module.params.get('archive_compression')
compression_type = LXC_COMPRESSION_MAP[archive_compression]
# remove trailing / if present.
archive_name = '%s.%s' % (
os.path.join(
archive_path,
self.container_name
),
compression_type['extension']
)
build_command = [
self.module.get_bin_path('tar', True),
'--directory=%s' % os.path.realpath(
os.path.expanduser(source_dir)
),
compression_type['argument'],
archive_name,
'.'
]
rc, stdout, err = self._run_command(
build_command=build_command,
unsafe_shell=True
)
os.umask(old_umask)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to create tar archive',
command=' '.join(build_command)
)
return archive_name
def _lvm_lv_remove(self, lv_name):
"""Remove an LV.
:param lv_name: The name of the logical volume
:type lv_name: ``str``
"""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('lvremove', True),
"-f",
"%s/%s" % (vg, lv_name),
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
command=' '.join(build_command)
)
def _rsync_data(self, container_path, temp_dir):
"""Sync the container directory to the temp directory.
:param container_path: path to the container container
:type container_path: ``str``
:param temp_dir: path to the temporary local working directory
:type temp_dir: ``str``
"""
# This loop is created to support overlayfs archives. This should
# squash all of the layers into a single archive.
fs_paths = container_path.split(':')
if 'overlayfs' in fs_paths:
fs_paths.pop(fs_paths.index('overlayfs'))
for fs_path in fs_paths:
# Set the path to the container data
fs_path = os.path.dirname(fs_path)
# Run the sync command
build_command = [
self.module.get_bin_path('rsync', True),
'-aHAX',
fs_path,
temp_dir
]
rc, stdout, err = self._run_command(
build_command,
unsafe_shell=True
)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to perform archive',
command=' '.join(build_command)
)
def _unmount(self, mount_point):
"""Unmount a file system.
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
build_command = [
self.module.get_bin_path('umount', True),
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to unmount [ %s ]' % mount_point,
command=' '.join(build_command)
)
def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
"""mount an lv.
:param lowerdir: name/path of the lower directory
:type lowerdir: ``str``
:param upperdir: name/path of the upper directory
:type upperdir: ``str``
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
build_command = [
self.module.get_bin_path('mount', True),
'-t overlayfs',
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
'overlayfs',
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
% (lowerdir, upperdir, mount_point, build_command)
)
def _container_create_tar(self):
"""Create a tar archive from an LXC container.
The process is as follows:
* Stop or Freeze the container
* Create temporary dir
* Copy container and config to temporary directory
* If LVM backed:
* Create LVM snapshot of LV backing the container
* Mount the snapshot to tmpdir/rootfs
* Restore the state of the container
* Create tar of tmpdir
* Clean up
"""
# Create a temp dir
temp_dir = tempfile.mkdtemp()
# Set the name of the working dir, temp + container_name
work_dir = os.path.join(temp_dir, self.container_name)
# LXC container rootfs
lxc_rootfs = self.container.get_config_item('lxc.rootfs')
# Test if the containers rootfs is a block device
block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
# Test if the container is using overlayfs
overlayfs_backed = lxc_rootfs.startswith('overlayfs')
mount_point = os.path.join(work_dir, 'rootfs')
# Set the snapshot name if needed
snapshot_name = '%s_lxc_snapshot' % self.container_name
container_state = self._get_state()
try:
# Ensure the original container is stopped or frozen
if container_state not in ['stopped', 'frozen']:
if container_state == 'running':
self.container.freeze()
else:
self.container.stop()
# Sync the container data from the container_path to work_dir
self._rsync_data(lxc_rootfs, temp_dir)
if block_backed:
if snapshot_name not in self._lvm_lv_list():
if not os.path.exists(mount_point):
os.makedirs(mount_point)
# Take snapshot
size, measurement = self._get_lv_size(
lv_name=self.container_name
)
self._lvm_snapshot_create(
source_lv=self.container_name,
snapshot_name=snapshot_name,
snapshot_size_gb=size
)
# Mount snapshot
self._lvm_lv_mount(
lv_name=snapshot_name,
mount_point=mount_point
)
else:
self.failure(
err='snapshot [ %s ] already exists' % snapshot_name,
rc=1,
msg='The snapshot [ %s ] already exists. Please clean'
' up old snapshot of containers before continuing.'
% snapshot_name
)
elif overlayfs_backed:
lowerdir, upperdir = lxc_rootfs.split(':')[1:]
self._overlayfs_mount(
lowerdir=lowerdir,
upperdir=upperdir,
mount_point=mount_point
)
# Set the state as changed and set a new fact
self.state_change = True
return self._create_tar(source_dir=work_dir)
finally:
if block_backed or overlayfs_backed:
# unmount snapshot
self._unmount(mount_point)
if block_backed:
# Remove snapshot
self._lvm_lv_remove(snapshot_name)
# Restore original state of container
if container_state == 'running':
if self._get_state() == 'frozen':
self.container.unfreeze()
else:
self.container.start()
# Remove tmpdir
shutil.rmtree(temp_dir)
def check_count(self, count, method):
if count > 1:
self.failure(
error='Failed to %s container' % method,
rc=1,
msg='The container [ %s ] failed to %s. Check to lxc is'
' available and that the container is in a functional'
' state.' % (self.container_name, method)
)
def failure(self, **kwargs):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(**kwargs)
def run(self):
"""Run the main method."""
action = getattr(self, LXC_ANSIBLE_STATES[self.state])
action()
outcome = self._container_data()
if self.archive_info:
outcome.update(self.archive_info)
if self.clone_info:
outcome.update(self.clone_info)
self.module.exit_json(
changed=self.state_change,
lxc_container=outcome
)
def main():
"""Ansible Main module."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
template=dict(
type='str',
default='ubuntu'
),
backing_store=dict(
type='str',
choices=LXC_BACKING_STORE.keys(),
default='dir'
),
template_options=dict(
type='str'
),
config=dict(
type='str',
),
vg_name=dict(
type='str',
default='lxc'
),
thinpool=dict(
type='str'
),
fs_type=dict(
type='str',
default='ext4'
),
fs_size=dict(
type='str',
default='5G'
),
directory=dict(
type='str'
),
zfs_root=dict(
type='str'
),
lv_name=dict(
type='str'
),
lxc_path=dict(
type='str'
),
state=dict(
choices=LXC_ANSIBLE_STATES.keys(),
default='started'
),
container_command=dict(
type='str'
),
container_config=dict(
type='str'
),
container_log=dict(
choices=BOOLEANS,
default='false'
),
container_log_level=dict(
choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
default='INFO'
),
clone_name=dict(
type='str',
required=False
),
clone_snapshot=dict(
choices=BOOLEANS,
default='false'
),
archive=dict(
choices=BOOLEANS,
default='false'
),
archive_path=dict(
type='str',
),
archive_compression=dict(
choices=LXC_COMPRESSION_MAP.keys(),
default='gzip'
)
),
supports_check_mode=False,
required_if = ([
('archive', True, ['archive_path'])
]),
)
if not HAS_LXC:
module.fail_json(
msg='The `lxc` module is not importable. Check the requirements.'
)
lv_name = module.params.get('lv_name')
if not lv_name:
module.params['lv_name'] = module.params.get('name')
lxc_manage = LxcContainerManagement(module=module)
lxc_manage.run()
# import module bits
from ansible.module_utils.basic import *
main()
| [
"[email protected]"
] | |
108d68accc881e48bb69ecb2973ce1bd1dbf2825 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /examples/v1/synthetics/GetSyntheticsDefaultLocations.py | 6e44aa3003ea64d20039ddc3cb1e3952f2e290a2 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 387 | py | """
Get the default locations returns "OK" response
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.synthetics_api import SyntheticsApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = SyntheticsApi(api_client)
response = api_instance.get_synthetics_default_locations()
print(response)
| [
"[email protected]"
] | |
b6be628f703dec55030e68c4a3d13a67ce8180be | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-aom/huaweicloudsdkaom/v2/model/statistic_value.py | d3dfd884e2f8fce485e13476f04cbd5dc66f26e0 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,601 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class StatisticValue:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'statistic': 'str',
'value': 'float'
}
attribute_map = {
'statistic': 'statistic',
'value': 'value'
}
def __init__(self, statistic=None, value=None):
"""StatisticValue - a model defined in huaweicloud sdk"""
self._statistic = None
self._value = None
self.discriminator = None
if statistic is not None:
self.statistic = statistic
if value is not None:
self.value = value
@property
def statistic(self):
"""Gets the statistic of this StatisticValue.
统计方式。
:return: The statistic of this StatisticValue.
:rtype: str
"""
return self._statistic
@statistic.setter
def statistic(self, statistic):
"""Sets the statistic of this StatisticValue.
统计方式。
:param statistic: The statistic of this StatisticValue.
:type: str
"""
self._statistic = statistic
@property
def value(self):
"""Gets the value of this StatisticValue.
统计结果。
:return: The value of this StatisticValue.
:rtype: float
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this StatisticValue.
统计结果。
:param value: The value of this StatisticValue.
:type: float
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StatisticValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f9291f1a6af751a26ba13be48da38a42209934e0 | dba64f73e5e07a25ab1a8e87f8e7cf6700ee3c90 | /symphony/cli/pyinventory/graphql/survey_question_type_enum.py | 3a96d2dd169d65106806d858248ca73f1354a8ec | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | saaib/magma | 6410d161cf1538be450f341dae8bc6f159999338 | 679cd9622eab49a859a4fa9f84f657023e22adb8 | refs/heads/master | 2021-02-07T15:53:21.047308 | 2020-02-29T14:09:27 | 2020-02-29T14:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class SurveyQuestionType(Enum):
BOOL = "BOOL"
EMAIL = "EMAIL"
COORDS = "COORDS"
PHONE = "PHONE"
TEXT = "TEXT"
TEXTAREA = "TEXTAREA"
PHOTO = "PHOTO"
WIFI = "WIFI"
CELLULAR = "CELLULAR"
FLOAT = "FLOAT"
INTEGER = "INTEGER"
DATE = "DATE"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value):
return cls.MISSING_ENUM
| [
"[email protected]"
] | |
1ecbb8e831336074d1ce5c6d07b4e34745935ae4 | ce722f35f63d7e7af3e9890cbea50b05d32c34c7 | /crawler/dspider/myspider.py | a1a34d3e4640e21a35516c59c8e9c2d1892770cd | [] | no_license | tfangz888/smart_deal_tool | bc6645047e2c3ff36af0baed62e31d1c6cec4a15 | 0f0e4edfec582e93146b30273621a28c36a5d6ca | refs/heads/master | 2020-05-17T03:12:16.720526 | 2019-04-23T14:11:10 | 2019-04-23T14:11:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | # -*- coding: utf-8 -*-
import time
import calendar
import datetime
from scrapy import Spider
from datetime import datetime, timedelta
class BasicSpider(Spider):
def get_nday_ago(self, mdate, num, dformat = "%Y.%m.%d"):
t = time.strptime(mdate, dformat)
y, m, d = t[0:3]
_date = datetime(y, m, d) - timedelta(num)
return _date.strftime(dformat)
def get_next_date(self, sdate = datetime.now().strftime('%Y.%m.%d'), target_day = calendar.FRIDAY):
#func: get next date
#sdate: str, example: '2017-01-01'
#tdate: str, example: '2017-01-06'
tdate = ''
oneday = timedelta(days = 1)
sdate = datetime.strptime(sdate, '%Y.%m.%d')
if sdate.weekday() == target_day: sdate += oneday
while sdate.weekday() != target_day:
sdate += oneday
tdate = sdate.strftime("%Y.%m.%d")
return tdate
def get_tomorrow_date(self, sdate):
#func: get next date
#sdate: str, example: '2017.01.01'
#tdate: str, example: '2017.01.06'
tdate = ''
oneday = timedelta(days = 1)
sdate = datetime.strptime(sdate, '%Y.%m.%d')
sdate += oneday
tdate = sdate.strftime("%Y.%m.%d")
return tdate
| [
"[email protected]"
] | |
f40bbf4c4d4b9ef848cac097b67b4e5a643be55e | 67b7e6d2c08f08403ec086c510622be48b8d26d8 | /src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/basebackup/test_switch_xlog.py | 4d94b2c153d66678d75b70e85c154d9050cca17c | [
"Apache-2.0",
"PostgreSQL",
"LicenseRef-scancode-rsa-md4",
"OLDAP-2.8",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"Zlib",
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-mit-modification-obligations",
"OpenSSL",
"MIT",
"LicenseRef-scancode-other-copyleft",
"bzip2-1.0.6",
"NTP",
"W3C",
"metamail",
"Beerware",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-stream-benchmark",
"LicenseRef-scancode-openssl",
"X11-distribute-modifications-variant",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-ssleay-windows",
"Spencer-94",
"ISC",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause",
"Python-2.0",
"curl",
"LicenseRef-scancode-sun-bcl-sdk-5.0",
"MIT-CMU",
"W3C-19980720"
] | permissive | sshyran/gpdb | 41012411d22b0294204dfb0fe67a1f4c8d1ecaf6 | 2d065ecdd2b5535cb42474f17a0ee6592b4e6837 | refs/heads/master | 2023-04-09T14:05:44.030212 | 2016-11-12T08:33:33 | 2016-11-12T08:34:36 | 73,544,159 | 0 | 0 | Apache-2.0 | 2023-04-04T00:30:10 | 2016-11-12T09:43:54 | PLpgSQL | UTF-8 | Python | false | false | 1,977 | py | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
import unittest2 as unittest
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby
class SwitchXlogTestCase(ScenarioTestCase):
''' Initiate standby with old pg_basebackup and new xlog'''
def __init__(self, methodName):
self.gp = GpinitStandby()
super(SwitchXlogTestCase,self).__init__(methodName)
def setUp(self):
#Remove standby if present
self.gp.run(option='-r')
def tearDown(self):
#Remove standby
self.gp.run(option='-r')
@unittest.skipIf(os.uname()[0] == 'Darwin', "Skipping this test on OSX")
def test_switch_xlog_after_basebackup(self):
test_case_list0 = []
test_case_list0.append('mpp.gpdb.tests.storage.walrepl.basebackup.SwitchClass.run_pg_basebackup')
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append('mpp.gpdb.tests.storage.walrepl.basebackup.switch.runsql.RunWorkload')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('mpp.gpdb.tests.storage.walrepl.basebackup.SwitchClass.start_standby')
self.test_case_scenario.append(test_case_list2)
| [
"[email protected]"
] | |
ecbd429887ac6d5abc32c05fd1f43073737f37c7 | 2a89cc4472bd6251a16a4369f13cdc3c40d28d86 | /data_processing/nx_test.py | 4fb70316922faecc8f18b7c0693cd4403c43642c | [] | no_license | wpower12/CIS5524-FinalProject | 5a51a4b3dd91b92a27023c4317c2e167d06ab9bc | 5f5104c6a8369b9939399258b6686b612cb22ffb | refs/heads/master | 2020-05-05T00:20:59.188552 | 2019-04-30T17:49:04 | 2019-04-30T17:49:04 | 179,571,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | import pickle
import networkx as nx
from networkx.algorithms import community
from networkx.algorithms import bipartite as bp
DATA_PFN = "pol_300_year_00_20_new.p"
DEBUG = ""
edge_list = pickle.load(open("data/{}".format(DATA_PFN), "rb"))
graph = nx.Graph()
for user in edge_list:
# print(user)
for sub in user[1]:
a = user[0]
b = sub
# print(a, b)
graph.add_edge("U-"+user[0], "S-"+sub)
DEBUG = "num nodes: {}\n".format(graph.order())
DEBUG += "num cc's: {}\n".format(nx.number_connected_components(graph))
DEBUG += "connected?: {}\n".format(nx.is_connected(graph))
DEBUG += "bipartite?: {}\n".format(bp.is_bipartite(graph))
print(DEBUG)
DEBUG = ""
# for c in nx.connected_components(graph):
# subg = graph.subgraph(c)
# i += 1
# a, b = bp.sets(subg)
# sorted_ccs = sorted(nx.connected_components(graph), key=len)
# cc = sorted_ccs[-1]
cc = max(nx.connected_components(graph), key=len)
g = graph.subgraph(cc)
DEBUG += "sub graph bipartite?: {}\n".format(bp.is_bipartite(g))
print(DEBUG)
DEBUG = ""
a, b = bp.sets(g)
DEBUG += "found bipartite set of largest CC\n"
print(DEBUG)
DEBUG = ""
# hacky way to see which is the user set.
for A in a:
if A[0] == "S":
user_set = b
else:
user_set = a
break
print(len(user_set))
# user_user_g = bp.projected_graph(g, user_set)
# DEBUG += "found user-user projection of largest CC"
# print(DEBUG)
# DEBUG = ""
# DEBUG += "size of user-user projection\n"
# DEBUG += "{} nodes, {} edges\n".format(user_user_g.order(), user_user_g.size())
# pickle.dump(user_user_g, open("user_user_graph.p", "wb"))
# # need to do it by parts. How many connected components?
# # Might need a LOT more comments from the user history to make sure it
# # gets a little more connected
# # parts = bp.sets(graph)
# # comps = girvan_newman(graph)
# print(DEBUG)
# nx.write_gexf(graph, "pol_300_year_00_20_new.gexf")
# print("gefx file written.") | [
"[email protected]"
] | |
36602fdb00348f04be6f1f77b384c11eb93676fa | 6bf336bc8d6ba061e0c707bdd8595368dee4d27b | /sherlock_and_anagrams.py | d1cc84aa3edc108d72c16638bb4c7edcd539760a | [
"MIT"
] | permissive | avenet/hackerrank | aa536214dbccf5a822a30ea226e1dbaac9afb243 | e522030a023af4ff50d5fc64bd3eba30144e006c | refs/heads/master | 2021-01-01T20:15:06.647873 | 2017-11-24T23:59:19 | 2017-11-24T23:59:19 | 98,801,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | testcases = int(raw_input())
def is_equal(d1, d2):
if len(d1) != len(d2):
return False
for k, v in d1.items():
if k != 'last':
if d1[k] != d2.get(k):
return False
return True
def next_level(dict_items):
next_level_result = []
for i in xrange(len(dict_items) - 1):
dict_item = dict_items[i]
next_last_char = dict_items[i + 1]['last']
dict_item['last'] = next_last_char
dict_item[next_last_char] = dict_item.get(next_last_char, 0) + 1
next_level_result.append(dict_item)
return next_level_result
for testcase in xrange(testcases):
input_str = raw_input()
dict_items = []
result = 0
for c in input_str:
dict_item = {'last': c}
dict_item[c] = 1
dict_items.append(dict_item)
while len(dict_items) > 1:
for i in xrange(len(dict_items)):
for j in xrange(i+1, len(dict_items)):
if is_equal(dict_items[i], dict_items[j]):
result += 1
dict_items = next_level(dict_items)
print result | [
"[email protected]"
] | |
35b04254829c219407297add0b75e3e5f4483e9d | 66213c48da0b752dc6c350789935fe2b2b9ef5ca | /abc/174/e.py | ffc0d5bd9d8bbac094b0625c24ec7ed291cb8d7d | [] | no_license | taketakeyyy/atcoder | 28c58ae52606ba85852687f9e726581ab2539b91 | a57067be27b27db3fee008cbcfe639f5309103cc | refs/heads/master | 2023-09-04T16:53:55.172945 | 2023-09-04T07:25:59 | 2023-09-04T07:25:59 | 123,848,306 | 0 | 0 | null | 2019-04-21T07:39:45 | 2018-03-05T01:37:20 | Python | UTF-8 | Python | false | false | 1,100 | py | # -*- coding:utf-8 -*-
def solve():
"""
「丸太の長さをX以下にする」を各丸太について行い、カット回数を計算していく方針。
カット回数の計算は丸太一本あたりO(1)で計算できるので、全丸太でO(N)かかる。
Xは二分探索で範囲を狭めていけば、Xの探索はO(logN)で済む。
全体の計算量は、O(Nlog(N))
"""
import math
N, K = list(map(int, input().split()))
As = list(map(int, input().split()))
left, right = 1, 10**9
while left != right:
mid = (left+right)//2
cut = 0
for a in As:
if a/mid > 1: cut += math.ceil(a/mid) - 1
if cut <= K:
# 切って良い回数を満たしている
if left+1 == right:
print(left)
return
right = mid
else:
# 切って良い回数を満たしていない
if left+1 == right:
print(right)
return
left = mid
if __name__ == "__main__":
solve()
| [
"[email protected]"
] | |
90d8d869fc8f16e185d015112875789166658815 | aeba64588c629c2f8a39fc054c48e34ae5b1bc76 | /Урок 3. Практическое задание/geekshop/authapp/admin.py | e09e35a8b3ae6d15fc2797fcbbe9025ffe678c9c | [] | no_license | ZF-1000/12_Django_Framework_Optimization_tools | a600caa2739824011251003a58f016ad1aa4d9e7 | 664224a22711d071fc63a33acedf430571d795e8 | refs/heads/master | 2022-12-17T22:59:46.581864 | 2020-09-24T18:34:00 | 2020-09-24T18:34:00 | 289,723,244 | 0 | 0 | null | 2020-09-24T18:34:01 | 2020-08-23T16:22:46 | Python | UTF-8 | Python | false | false | 184 | py | from django.contrib import admin
from authapp.models import ShopUser, ShopUserProfile
# Register your models here.
admin.site.register(ShopUser)
admin.site.register(ShopUserProfile)
| [
"[email protected]"
] | |
4f7d33984f4de1a2db5ca3fc534dcc10909e1f06 | 30ab9750e6ca334941934d1727c85ad59e6b9c8a | /server/base/middlewares.py | 84d8a5ba83d986202b5a9503d755dfa61cee8a94 | [
"Apache-2.0"
] | permissive | ankurvaishley/zentral | 57e7961db65278a0e614975e484927f0391eeadd | a54769f18305c3fc71bae678ed823524aaa8bb06 | refs/heads/main | 2023-05-31T02:56:40.309854 | 2021-07-01T07:51:31 | 2021-07-01T14:15:34 | 382,346,360 | 1 | 0 | Apache-2.0 | 2021-07-02T12:55:47 | 2021-07-02T12:55:47 | null | UTF-8 | Python | false | false | 2,103 | py | # adapted from https://github.com/mozilla/django-csp
from functools import partial
from django.conf import settings
from django.utils.crypto import get_random_string
from django.utils.functional import SimpleLazyObject
from http.client import INTERNAL_SERVER_ERROR, NOT_FOUND
CSP_HEADER = 'Content-Security-Policy'
DEFAULT_CSP_POLICIES = {
"default-src": "'self'",
"script-src": "'self'",
"base-uri": "'none'",
"frame-ancestors": "'none'",
"object-src": "'none'",
"style-src": "'self' 'unsafe-inline'",
}
def make_csp_nonce(request, length=16):
if not getattr(request, '_csp_nonce', None):
request._csp_nonce = get_random_string(length)
return request._csp_nonce
def build_csp_header(request):
csp_policies = DEFAULT_CSP_POLICIES.copy()
csp_nonce = getattr(request, '_csp_nonce', None)
if csp_nonce:
csp_policies["script-src"] += " 'nonce-{}'".format(csp_nonce)
return ";".join("{} {}".format(k, v) for k, v in csp_policies.items())
def csp_middleware(get_response):
def middleware(request):
nonce_func = partial(make_csp_nonce, request)
request.csp_nonce = SimpleLazyObject(nonce_func)
response = get_response(request)
if CSP_HEADER in response:
# header already present (HOW ???)
return response
if response.status_code in (INTERNAL_SERVER_ERROR, NOT_FOUND) and settings.DEBUG:
# no policies in debug views
return response
response[CSP_HEADER] = build_csp_header(request)
return response
return middleware
def deployment_info_middleware(get_response):
deployment_info = {}
try:
import base.deployment as deployment
except ImportError:
pass
else:
for attr in ("version", "image_id", "instance_id", "setup_at"):
val = getattr(deployment, attr, None)
if val is not None:
deployment_info[attr] = val
def middleware(request):
request.zentral_deployment = deployment_info
return get_response(request)
return middleware
| [
"[email protected]"
] | |
b5f70c3dc6b4b1c3197a94cff2bcf3cea020f5b6 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Simulation/G4Utilities/G4DebuggingTools/python/G4DebuggingToolsConfig.py | f52b57fe2059fda006a2cf456c9959bd184320f5 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,737 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
from AthenaCommon import CfgMgr, Logging
from G4AtlasServices import G4AtlasServicesConfig
def getVerboseSelectorTool(name="G4UA::VerboseSelectorTool", **kwargs):
from G4AtlasApps.SimFlags import simFlags
# example custom configuration
if name in simFlags.UserActionConfig.get_Value().keys():
for prop,value in simFlags.UserActionConfig.get_Value()[name].iteritems():
kwargs.setdefault(prop,value)
return CfgMgr.G4UA__VerboseSelectorTool(name, **kwargs)
def addVerboseSelectorTool(name="G4UA::VerboseSelectorTool",system=False):
G4AtlasServicesConfig.addAction(name,['Event','Step','Tracking'],system)
def getG4AtlantisDumperTool(name="G4UA::G4AtlantisDumperTool", **kwargs):
return CfgMgr.G4UA__G4AtlantisDumperTool(name, **kwargs)
def addG4AtlantisDumperTool(name="G4UA::G4AtlantisDumperTool",system=False):
G4AtlasServicesConfig.addAction(name,['Event','Step','Event'],system)
def getEnergyConservationTestTool(name="G4UA::EnergyConservationTestTool", **kwargs):
return CfgMgr.G4UA__EnergyConservationTestTool(name, **kwargs)
def addEnergyConservationTestTool(name="G4UA::EnergyConservationTestTool",system=False):
G4AtlasServicesConfig.addAction(name,['Event','Step','Tracking'],system)
def getHyperspaceCatcherTool(name="G4UA::HyperspaceCatcherTool", **kwargs):
from G4AtlasApps.SimFlags import simFlags
# example custom configuration
if name in simFlags.UserActionConfig.get_Value().keys():
for prop,value in simFlags.UserActionConfig.get_Value()[name].iteritems():
kwargs.setdefault(prop,value)
return CfgMgr.G4UA__HyperspaceCatcherTool(name, **kwargs)
def addHyperspaceCatcherTool(name="G4UA::HyperspaceCatcherTool",system=False):
G4AtlasServicesConfig.addAction(name,['Run','Step'],system)
def getStepNtupleTool(name="G4UA::StepNtupleTool", **kwargs):
from AthenaCommon.ConcurrencyFlags import jobproperties as concurrencyProps
if concurrencyProps.ConcurrencyFlags.NumThreads() >1:
log=Logging.logging.getLogger(name)
log.fatal('Attempt to run '+name+' with more than one thread, which is not supported')
#from AthenaCommon.AppMgr import theApp
#theApp.exit(1)
return False
return CfgMgr.G4UA__StepNtupleTool(name, **kwargs)
def getVolumeDebuggerTool(name="G4UA::VolumeDebuggerTool", **kwargs):
from AthenaCommon.ConcurrencyFlags import jobproperties as concurrencyProps
from G4AtlasApps.SimFlags import simFlags
# example custom configuration
if name in simFlags.UserActionConfig.get_Value().keys():
for prop,value in simFlags.UserActionConfig.get_Value()[name].iteritems():
kwargs.setdefault(prop,value)
return CfgMgr.G4UA__VolumeDebuggerTool(name, **kwargs)
def getGeant4SetupCheckerTool(name="G4UA::Geant4SetupCheckerTool", **kwargs):
# Set reference based on geometry
from G4AtlasApps.SimFlags import simFlags
default_file = '/afs/cern.ch/atlas/groups/Simulation/G4config_reference_files/default_reference.txt'
test_file = '/afs/cern.ch/atlas/groups/Simulation/G4config_reference_files/'
test_file+=simFlags.SimLayout().replace('_VALIDATION','')+'_reference.txt'
import os
if os.access(test_file,os.R_OK): default_file = test_file
kwargs.setdefault('ReferenceFile',default_file)
# Grab the properties that were already set
if name in simFlags.UserActionConfig.get_Value().keys():
for prop,value in simFlags.UserActionConfig.get_Value()[name].iteritems():
kwargs.setdefault(prop,value)
# Set up the user action
return CfgMgr.G4UA__Geant4SetupCheckerTool(name, **kwargs)
| [
"[email protected]"
] | |
d6c6b643bd659e3f9cfac6cae96011a6dca0c086 | 45a00518abed3ef4796655d8d2a0677f29961aa3 | /example 46/python_venv/lib/python3.8/site-packages/joblib/test/test_cloudpickle_wrapper.py | 733f51c7239467da0f967b2aa790a2df76cfa185 | [] | no_license | ruiwu1990/CSCI_4710_6710 | 07b92e456d6cda3e63a5b5d078c1718110317555 | 6e32c89ef70fbe4b4a5db14682dc94b13bab6d9e | refs/heads/master | 2023-05-03T21:50:54.943702 | 2023-04-18T21:48:43 | 2023-04-18T21:48:43 | 174,882,138 | 9 | 17 | null | 2023-05-01T20:53:06 | 2019-03-10T21:18:01 | Python | UTF-8 | Python | false | false | 749 | py | """
Test that our implementation of wrap_non_picklable_objects mimics
properly the loky implementation.
"""
from .._cloudpickle_wrapper import wrap_non_picklable_objects
from .._cloudpickle_wrapper import my_wrap_non_picklable_objects
def a_function(x):
return x
class AClass(object):
def __call__(self, x):
return x
def test_wrap_non_picklable_objects():
# Mostly a smoke test: test that we can use callable in the same way
# with both our implementation of wrap_non_picklable_objects and the
# upstream one
for obj in (a_function, AClass()):
wrapped_obj = wrap_non_picklable_objects(obj)
my_wrapped_obj = my_wrap_non_picklable_objects(obj)
assert wrapped_obj(1) == my_wrapped_obj(1)
| [
"[email protected]"
] | |
64612b9c2435744a2cdb72e21120ca270c1675e4 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-2939.py | 12303d1902d870a55260fe2b6bcdbb323417c476 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,290 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) $RetType:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
0d558a04a974c96413e34fc82e039b476a0ba4bd | a392bb476779cbfde6c4863e6039af9a57c288be | /S1_Foundation/C2_GettingStarted/selection_sort.py | 966df2f7c6568266bfc89861df06db3b9452b246 | [
"MIT"
] | permissive | JasonVann/CLRS | 49e25d021718156a732455a117331dace1fe501c | d9dcdc7ea9230d03d97d72857f9345ea995e5986 | refs/heads/master | 2021-05-04T10:03:06.999922 | 2017-10-21T07:43:54 | 2017-10-21T07:43:54 | 53,999,274 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | from test_sort import *
def selection_sort(data):
n = len(data)
for i in range(0, n-1):
# Find the i-th minimum number and swap with data[i]
min_i = None
for j in range(i, n):
if min_i is None or data[j] < min_i:
min_i = data[j]
idx = j
data[i], data[idx] = data[idx], data[i]
def selection_sort_clrs(data):
n = len(data)
for i in range(0, n-1):
smallest = i
for j in range(i+1, n):
if data[j] < data[smallest]:
smallest = j
data[i], data[smallest] = data[smallest], data[i]
data = gen_test()
#selection_sort(data)
selection_sort_clrs(data)
print(verify_sort(data))
| [
"[email protected]"
] | |
561a1515aec45c05a6f692c454547c35b601299c | fc7cad490cb774d769c1b463ac6d1d9a8ea97024 | /accounts/migrations/0004_auto_20200612_0259.py | ec0685ecf8d345c0e4a3fed233fe2258dc000cec | [] | no_license | Aviemusca/curriculum-dev | c301915532353836cb085130fd12e2734da4b956 | 691a6536718ef496ac603b1c8daee7508b3e8ff2 | refs/heads/master | 2022-12-26T20:56:55.031344 | 2020-10-01T08:11:49 | 2020-10-01T08:11:49 | 297,643,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # Generated by Django 3.0.5 on 2020-06-12 02:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200519_1800'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=250)),
('message', models.TextField(default='')),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
},
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='default.png', upload_to='profile_pics'),
),
]
| [
"[email protected]"
] | |
202413f8bed169d63c0e062e5db80f4c7ab19053 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/smart_document_recognizer_layout_result.py | 880edef4f9e5268fec0c2a94a79f45be803ebb0a | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,891 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SmartDocumentRecognizerLayoutResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'layout_block_count': 'int',
'layout_block_list': 'list[SmartDocumentRecognizerLayoutBlock]'
}
attribute_map = {
'layout_block_count': 'layout_block_count',
'layout_block_list': 'layout_block_list'
}
def __init__(self, layout_block_count=None, layout_block_list=None):
"""SmartDocumentRecognizerLayoutResult
The model defined in huaweicloud sdk
:param layout_block_count: 模型识别到的文档版面区域数量。
:type layout_block_count: int
:param layout_block_list: 文档版面区域识别结果列表。
:type layout_block_list: list[:class:`huaweicloudsdkocr.v1.SmartDocumentRecognizerLayoutBlock`]
"""
self._layout_block_count = None
self._layout_block_list = None
self.discriminator = None
if layout_block_count is not None:
self.layout_block_count = layout_block_count
if layout_block_list is not None:
self.layout_block_list = layout_block_list
@property
def layout_block_count(self):
"""Gets the layout_block_count of this SmartDocumentRecognizerLayoutResult.
模型识别到的文档版面区域数量。
:return: The layout_block_count of this SmartDocumentRecognizerLayoutResult.
:rtype: int
"""
return self._layout_block_count
@layout_block_count.setter
def layout_block_count(self, layout_block_count):
"""Sets the layout_block_count of this SmartDocumentRecognizerLayoutResult.
模型识别到的文档版面区域数量。
:param layout_block_count: The layout_block_count of this SmartDocumentRecognizerLayoutResult.
:type layout_block_count: int
"""
self._layout_block_count = layout_block_count
@property
def layout_block_list(self):
"""Gets the layout_block_list of this SmartDocumentRecognizerLayoutResult.
文档版面区域识别结果列表。
:return: The layout_block_list of this SmartDocumentRecognizerLayoutResult.
:rtype: list[:class:`huaweicloudsdkocr.v1.SmartDocumentRecognizerLayoutBlock`]
"""
return self._layout_block_list
@layout_block_list.setter
def layout_block_list(self, layout_block_list):
"""Sets the layout_block_list of this SmartDocumentRecognizerLayoutResult.
文档版面区域识别结果列表。
:param layout_block_list: The layout_block_list of this SmartDocumentRecognizerLayoutResult.
:type layout_block_list: list[:class:`huaweicloudsdkocr.v1.SmartDocumentRecognizerLayoutBlock`]
"""
self._layout_block_list = layout_block_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmartDocumentRecognizerLayoutResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
1269ec37a14795d04833ef26c19ca25ac9209675 | 61fd735fda784dfc817b51a53c933ecdc4d47177 | /PythonLearningFiles/小甲鱼Python基础课程笔记/python基础教程/07分支和循环.py | 185365fc588c50fb18dc07e45a2d6eb0de12ccb4 | [] | no_license | github653224/GitProjects_PythonLearning | bfe4610bf8f944d9a51889a30cf7b20e5ab219b7 | ac975efbb336846a3f145821cf47431d39b30ac1 | refs/heads/master | 2021-05-08T01:03:04.150717 | 2017-10-21T09:52:02 | 2017-10-21T09:52:02 | 107,763,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | """
打飞机方案:
加载北京音乐
播放北京音乐(设置单曲循环)
我飞机诞生
interval=0
while true:
if 用户是否点击了关闭按钮:
退出程序
interval+=1
if interval==50:
小飞机诞生
小飞机移动一个位置
刷新屏幕
if 用户鼠标产生移动:
我方飞机中心位置=用户鼠标位置
刷新屏幕
if 我方飞机与小飞机发生肢体冲突:
我方挂,播放音乐停止
修改我方飞机图案
打印"game over"
停止音乐,最好淡出
"""
| [
"[email protected]"
] | |
68dd46a0f9f060b60554ee409185bc17f3b0eecb | 484f2b6ed2a51a78978a4b6450f97a3cbefcd087 | /ugc/migrations/0005_auto_20170214_1413.py | 845ec474ff677ca19957848ca9652760ee9e3645 | [] | no_license | ivan371/technotrack-web2-spring-2017 | 31d0a937f1b6342bd70432cbebd37bb68c1dd8df | 92e9fd9040984eef66b6bab45bb4d6918e178d41 | refs/heads/master | 2021-01-11T14:27:04.717314 | 2017-05-22T18:55:22 | 2017-05-22T18:55:22 | 81,424,353 | 0 | 0 | null | 2017-02-09T07:54:27 | 2017-02-09T07:54:27 | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-14 14:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ugc', '0004_auto_20170214_1402'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=models.CharField(max_length=2047, verbose_name='post_content'),
),
]
| [
"[email protected]"
] | |
177db48652d402f97d576e0f6523800c48df1c68 | 5cde60183d67eb92aad31d0bfa03a8e2ebddad0c | /setup.py | 12ac376cecfc5442d07485725892e2397462b032 | [
"MIT"
] | permissive | FedeClaudi/fcutils | 7379615f2eec7d152aab5c89ec70afee79e76964 | 2ef6f037303fc426d5c5b2851d2c99f17efa4002 | refs/heads/master | 2021-09-25T15:35:32.348620 | 2021-09-21T15:44:37 | 2021-09-21T15:44:37 | 236,294,597 | 5 | 0 | MIT | 2021-01-31T17:34:22 | 2020-01-26T10:05:55 | Python | UTF-8 | Python | false | false | 579 | py | from setuptools import setup, find_namespace_packages
requirements = [
"numpy",
"opencv-python",
"pandas",
"matplotlib",
"seaborn",
"scipy",
"pyyaml",
"statsmodels",
"loguru",
]
setup(
name="fcutils",
version="1.1.2.7",
author_email="[email protected]",
description="bunch of utility functions",
packages=find_namespace_packages(exclude=()),
include_package_data=True,
url="https://github.com/FedeClaudi/fcutils",
author="Federico Claudi",
zip_safe=False,
install_requires=requirements,
)
| [
"[email protected]"
] | |
e586d7a3c000d224849a27dbaddfc190dffd0029 | 0d413c078c0dd7f1f68083750022441f091736f5 | /addons/source-python/plugins/gungame/plugins/included/gg_dissolver/gg_dissolver.py | 736d55841d464a5e19a21a4b3a3e088ef32fe82a | [] | no_license | Hackmastr/GunGame-SP | 7eac4c031e5e6f4624a8cacfea8d3d5df85a2dfb | dd76d1f581a1a8aff18c2194834665fa66a82aab | refs/heads/master | 2020-12-26T04:56:12.309910 | 2016-12-29T23:15:53 | 2016-12-29T23:15:53 | 67,629,767 | 0 | 0 | null | 2016-09-07T17:40:57 | 2016-09-07T17:40:56 | null | UTF-8 | Python | false | false | 3,166 | py | # ../gungame/plugins/included/gg_dissolver/gg_dissolver.py
"""Plugin that dissolves player ragdolls on death."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Python
from random import randrange
from warnings import warn
# Source.Python
from entities.constants import DissolveType, INVALID_ENTITY_INTHANDLE
from entities.entity import Entity
from entities.helpers import index_from_inthandle
from events import Event
from listeners.tick import Delay
from players.entity import Player
# Plugin
from .configuration import dissolver_delay, dissolver_type, magnitude
# =============================================================================
# >> GLOBAL VARIABLES
# =============================================================================
_num_dissolve_types = len(DissolveType.__members__)
# =============================================================================
# >> GAME EVENTS
# =============================================================================
@Event('player_death')
def dissolve_player_ragdoll(game_event):
"""Dissolve/remove the player's ragdoll on death."""
# Get the type of dissolver to use
current_type = dissolver_type.get_int()
# Is the type valid?
if current_type < 0 or current_type > _num_dissolve_types + 2:
# Raise a warning
warn(
'Invalid value for {cvar} cvar "{value}".'.format(
cvar=dissolver_type.name,
value=current_type
)
)
# Use the remove setting
current_type = _num_dissolve_types + 2
# Delay the dissolving
Delay(
max(0, dissolver_delay.get_int()),
dissolve_ragdoll,
(game_event['userid'], current_type),
)
# =============================================================================
# >> HELPER FUNCTIONS
# =============================================================================
def dissolve_ragdoll(userid, current_type):
"""Dissolve/remove the player's ragdoll."""
# Get the ragdoll entity
try:
inthandle = Player.from_userid(userid).ragdoll
# TODO: clarify this exception
except Exception:
return
if inthandle == INVALID_ENTITY_INTHANDLE:
return
entity = Entity(index_from_inthandle(inthandle))
# Should the ragdoll just be removed?
if current_type == _num_dissolve_types + 2:
entity.remove()
return
# Set the target name for the player's ragdoll
entity.target_name = 'ragdoll_{userid}'.format(userid=userid)
# Get the dissolver entity
dissolver_entity = Entity.find_or_create('env_entity_dissolver')
# Should a random dissolve type be chosen?
if current_type == _num_dissolve_types + 1:
current_type = randrange(_num_dissolve_types)
# Set the magnitude
dissolver_entity.magnitude = magnitude.get_int()
# Set the dissolve type
dissolver_entity.dissolve_type = current_type
# Dissolve the ragdoll
dissolver_entity.dissolve('ragdoll_{userid}'.format(userid=userid))
| [
"[email protected]"
] | |
a88f598b7bf08e6083106dbe44f2ca8df1bfc8b0 | 15c124ef75ee3974af8c5d0f97a35d13f3673378 | /dump_committees.py | 12f686c76fb41ebb3e6318e3fc5c50558f54520f | [] | no_license | datamade/war-chest | e9e2eaddb4c5dec584fdaa3806ce760757f0dfae | 6b4eb021d46b3db0e5409e54a496b8823a4aff2b | refs/heads/master | 2021-01-15T23:06:30.602126 | 2014-06-06T15:41:30 | 2014-06-06T15:41:30 | 13,306,945 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | from app import Candidate, Report, Person
import csv
from collections import OrderedDict
from operator import itemgetter
from sqlalchemy import or_
if __name__ == "__main__":
dump = []
for person in Person.query.all():
for cand in person.candidacies.all():
for comm in cand.committees:
c = OrderedDict()
c['name'] = person.name
c['committee'] = comm.name
c['status'] = comm.status
c['url'] = comm.url
#rep = Report.query.filter(Report.committee_id == comm.id)\
# .filter(or_(Report.type.like('Quarterly%'),
# Report.type.like('D-2 Semiannual Report%')))\
# .order_by(Report.date_filed.desc()).first()
#if rep:
# c['current_funds'] = rep.funds_end
# c['invest_total'] = rep.invest_total
# c['total_assets'] = rep.funds_end + rep.invest_total
#else:
# c['current_funds'] = None
# c['invest_total'] = None
# c['total_assets'] = None
if c not in dump:
dump.append(c)
for comm in person.committee_positions.all():
if 'chair' in comm.title.lower()\
and comm.committee.type\
and not comm.committee.type.lower() == 'candidate':
c = OrderedDict()
c['name'] = person.name
c['committee'] = comm.committee.name
c['status'] = comm.committee.status
c['url'] = comm.committee.url
#rep = Report.query.filter(Report.committee_id == comm.committee.id)\
# .filter(or_(Report.type.like('Quarterly%'),
# Report.type.like('D-2 Semiannual Report%')))\
# .order_by(Report.date_filed.desc()).first()
#if rep:
# c['current_funds'] = rep.funds_end
# c['invest_total'] = rep.invest_total
# c['total_assets'] = rep.funds_end + rep.invest_total
#else:
# c['current_funds'] = None
# c['invest_total'] = None
# c['total_assets'] = None
if c not in dump:
dump.append(c)
dump = sorted(dump, key=itemgetter('name'))
out = open('candidate_committees.csv', 'wb')
outp = csv.DictWriter(out, fieldnames=dump[0].keys())
outp.writeheader()
outp.writerows(dump)
out.close()
| [
"[email protected]"
] | |
a64bb1807505a2ed94f6dae03805a20c63d76bd1 | 413125277311510b40ca481b12ab82d379f5df62 | /factorial number of trailing zeros in factorial.py | 11ce6c912e5196f14256726dc5f9adc771d0a39c | [] | no_license | Aakashbansal837/python | 98d85ce1e88c73f0e5180b1b1af80714f3e45097 | 4de2a3d6a482fdba8809ceb81e94f201b776b00e | refs/heads/master | 2021-04-06T00:16:24.884830 | 2018-05-30T17:42:20 | 2018-05-30T17:42:20 | 124,778,551 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | import sys
sys.setrecursionlimit(10000)
def factorial(n):
if n == 0:
return 1
if n == 1:
return 1
else:
return n*factorial(n-1)
num = list((str(factorial(int(input())))))
l = len(num)-1
count = 0
while l:
if num[l] == '0':
count+=1
else:
break
l-=1
print(count)
| [
"[email protected]"
] | |
8b0825a8ddd160fc42a6622a90c4d63dba60573d | e57a122cba8d00aac9d014a45e815063cb9f0359 | /imagepy/menus/Kit3D/Viewer 3D/tablepoints_plg.py | 86fe577864e3131f6f976b490ee2afe4771c7966 | [
"BSD-2-Clause"
] | permissive | WeisongZhao/imagepy | 9d66664578c77eb2d463de922c8d06af4f8af35a | 43cd5c4dcb9d6fefdcf11b8b9e9c0d56e11fab1e | refs/heads/master | 2020-04-25T23:44:31.304590 | 2020-01-29T06:10:26 | 2020-01-29T06:10:26 | 173,155,520 | 1 | 0 | NOASSERTION | 2019-02-28T17:21:56 | 2019-02-28T17:21:55 | null | UTF-8 | Python | false | false | 1,761 | py | from imagepy.core.engine import Table
from imagepy.core.manager import ColorManager
from imagepy.core import myvi
import numpy as np
from imagepy import IPy
class Plugin(Table):
title = 'Table Point Cloud'
para = {'x':None, 'y':None, 'z':None, 'r':5, 'rs':None, 'c':(0,0,255),
'cs':None, 'cm':None, 'cube':False}
view = [('field', 'x', 'x data', ''),
('field', 'y', 'y data', ''),
('field', 'z', 'z data', ''),
(float, 'r', (0, 1024), 3, 'radius', 'pix'),
('lab', 'lab', '== if set the radius would becom factor =='),
('field', 'rs', 'radius', 'column'),
('color', 'c', 'color', ''),
('lab', 'lab', '== if set the color upon would disable =='),
('field', 'cs', 'color', 'column'),
('cmap', 'cm', 'color map when color column is set'),
(bool, 'cube', 'draw outline cube')]
def load(self, para):
self.frame = myvi.Frame3D.figure(IPy.curapp, title='3D Canvas')
return True
def run(self, tps, snap, data, para = None):
pts = np.array(data[[para['x'], para['y'], para['z']]])
rs = data[para['rs']]*para['r'] if para['rs'] != 'None' else [para['r']]*len(pts)
cm = ColorManager.get_lut(para['cm'])/255.0
clip = lambda x : (x-x.min())/(x.max()-x.min())*255
if para['cs'] == 'None': cs = [np.array(para['c'])/255.0]*len(pts)
else: cs = cm[clip(data[para['cs']]).astype(np.uint8)]
vts, fs, ns, cs = myvi.build_balls(pts.astype(np.float32), list(rs), cs)
self.frame.viewer.add_surf_asyn('ball', vts, fs, ns, cs)
if para['cube']:
p1 = data[[para['x'], para['y'], para['z']]].min(axis=0)
p2 = data[[para['x'], para['y'], para['z']]].max(axis=0)
vts, fs, ns, cs = myvi.build_cube(p1, p2)
self.frame.viewer.add_surf_asyn('cube', vts, fs, ns, cs, mode='grid')
self.frame.Raise()
self.frame = None | [
"[email protected]"
] | |
2a9948a9fa15cfc738111ee698e8284117c2f0ab | 5c6c165cdbdc4fd538f4aed7d0fede76fc480444 | /asphalt/feedreader/__init__.py | a990cce8accc3f529540060c6c57896c512d464d | [
"Apache-2.0"
] | permissive | asphalt-framework/asphalt-feedreader | b4b5e693a65dd51dd3fdbe1234a691c7de769bfb | 096df835408ecfcfde593950c9c80d130f62cc5e | refs/heads/master | 2021-01-18T19:53:06.455455 | 2017-11-26T17:33:13 | 2017-11-26T17:34:46 | 86,918,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from .api import FeedReader, FeedStateStore # noqa
from .component import create_feed, FeedReaderComponent # noqa
from .events import EntryEvent, MetadataEvent # noqa
from .metadata import FeedEntry, FeedMetadata # noqa
from .readers.base import BaseFeedReader # noqa
| [
"[email protected]"
] | |
a7d44dbc3172c0b18482cbe972cbced3d85a9b38 | 66a9c0e23af1fab7f3c0b2f0cd6b8c6ac060b1d7 | /models/image_segmentation/tensorflow/3d_unet/inference/fp32/unet3d/prediction.py | 0810e632e699bbe4be9e3f11c215749c1247f453 | [
"MIT",
"Apache-2.0"
] | permissive | hekaplex/resnet_dl | ea289864b330bfa74996444d0325f1a062feae59 | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | refs/heads/master | 2023-04-15T06:03:18.696578 | 2021-05-05T14:18:13 | 2021-05-05T14:18:13 | 364,602,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,846 | py | import os
import nibabel as nib
import numpy as np
import tables
import time
import math
from .training import load_old_model
from .utils import pickle_load
from .utils.patches import reconstruct_from_patches, get_patch_from_3d_data, compute_patch_indices
from .augment import permute_data, generate_permutation_keys, reverse_permute_data
def patch_wise_prediction(model, data, overlap=0, batch_size=1, permute=False):
"""
:param batch_size:
:param model:
:param data:
:param overlap:
:return:
"""
patch_shape = tuple([int(dim) for dim in model.input.shape[-3:]])
predictions = list()
indices = compute_patch_indices(data.shape[-3:], patch_size=patch_shape, overlap=overlap)
batch = list()
i = 0
total_model_time = 0
while i < len(indices):
while len(batch) < batch_size:
patch = get_patch_from_3d_data(data[0], patch_shape=patch_shape, patch_index=indices[i])
batch.append(patch)
i += 1
# print('batch.shape: {}'.format(np.asarray(batch).shape))
start_time = time.time()
prediction = predict(model, np.asarray(batch), permute=permute)
end_time = time.time()
total_model_time += (end_time - start_time)
batch = list()
# print('prediction.shape: {}'.format(prediction.shape))
for predicted_patch in prediction:
# print('predicted_patch.shape: {}'.format(predicted_patch.shape))
predictions.append(predicted_patch)
#print('model evaluation time: {} ms'.format(total_model_time * 1000))
# print('predictions.length: {}'.format(len(predictions)))
# print('predictions[0].shape: {}'.format(predictions[0].shape))
output_shape = [int(model.output.shape[1])] + list(data.shape[-3:])
return reconstruct_from_patches(predictions, patch_indices=indices, data_shape=output_shape)
def get_prediction_labels(prediction, threshold=0.5, labels=None):
n_samples = prediction.shape[0]
label_arrays = []
for sample_number in range(n_samples):
label_data = np.argmax(prediction[sample_number], axis=0) + 1
label_data[np.max(prediction[sample_number], axis=0) < threshold] = 0
if labels:
for value in np.unique(label_data).tolist()[1:]:
label_data[label_data == value] = labels[value - 1]
label_arrays.append(np.array(label_data, dtype=np.uint8))
return label_arrays
def get_test_indices(testing_file):
return pickle_load(testing_file)
def predict_from_data_file(model, open_data_file, index):
return model.predict(open_data_file.root.data[index])
def predict_and_get_image(model, data, affine):
return nib.Nifti1Image(model.predict(data)[0, 0], affine)
def predict_from_data_file_and_get_image(model, open_data_file, index):
return predict_and_get_image(model, open_data_file.root.data[index], open_data_file.root.affine)
def predict_from_data_file_and_write_image(model, open_data_file, index, out_file):
image = predict_from_data_file_and_get_image(model, open_data_file, index)
image.to_filename(out_file)
def prediction_to_image(prediction, affine, label_map=False, threshold=0.5, labels=None):
if prediction.shape[1] == 1:
data = prediction[0, 0]
if label_map:
label_map_data = np.zeros(prediction[0, 0].shape, np.int8)
if labels:
label = labels[0]
else:
label = 1
label_map_data[data > threshold] = label
data = label_map_data
elif prediction.shape[1] > 1:
if label_map:
label_map_data = get_prediction_labels(prediction, threshold=threshold, labels=labels)
data = label_map_data[0]
else:
return multi_class_prediction(prediction, affine)
else:
raise RuntimeError("Invalid prediction array shape: {0}".format(prediction.shape))
return nib.Nifti1Image(data, affine)
def multi_class_prediction(prediction, affine):
prediction_images = []
for i in range(prediction.shape[1]):
prediction_images.append(nib.Nifti1Image(prediction[0, i], affine))
return prediction_images
def run_validation_case(data_index, output_dir, model, data_file, training_modalities,
output_label_map=False, threshold=0.5, labels=None, overlap=16, permute=False):
"""
Runs a test case and writes predicted images to file.
:param data_index: Index from of the list of test cases to get an image prediction from.
:param output_dir: Where to write prediction images.
:param output_label_map: If True, will write out a single image with one or more labels. Otherwise outputs
the (sigmoid) prediction values from the model.
:param threshold: If output_label_map is set to True, this threshold defines the value above which is
considered a positive result and will be assigned a label.
:param labels:
:param training_modalities:
:param data_file:
:param model:
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
affine = data_file.root.affine[data_index]
test_data = np.asarray([data_file.root.data[data_index]])
# print('test_data.shape: {}'.format(test_data.shape))
for i, modality in enumerate(training_modalities):
image = nib.Nifti1Image(test_data[0, i], affine)
image.to_filename(os.path.join(output_dir, "data_{0}.nii.gz".format(modality)))
test_truth = nib.Nifti1Image(data_file.root.truth[data_index][0], affine)
test_truth.to_filename(os.path.join(output_dir, "truth.nii.gz"))
patch_shape = tuple([int(dim) for dim in model.input.shape[-3:]])
if patch_shape == test_data.shape[-3:]:
# print('this branch !!!!!!!!!!!!!')
prediction = predict(model, test_data, permute=permute)
else:
prediction = patch_wise_prediction(model=model, data=test_data, overlap=overlap, permute=permute)[np.newaxis]
# print('!!!!!prediction.shape: {}'.format(prediction.shape))
prediction_image = prediction_to_image(prediction, affine, label_map=output_label_map, threshold=threshold,
labels=labels)
if isinstance(prediction_image, list):
for i, image in enumerate(prediction_image):
image.to_filename(os.path.join(output_dir, "prediction_{0}.nii.gz".format(i + 1)))
else:
prediction_image.to_filename(os.path.join(output_dir, "prediction.nii.gz"))
def run_validation_cases(validation_keys_file, model_file, training_modalities, labels, hdf5_file,
output_label_map=False, output_dir=".", threshold=0.5, overlap=16, permute=False, warmup=10, report_interval=1, batch_size=1, n_batch=10):
validation_indices = pickle_load(validation_keys_file)
model = load_old_model(model_file)
data_file = tables.open_file(hdf5_file, "r")
elapsed_time = 0
elapsed_step = 0
for index in validation_indices:
start = time.time()
if 'subject_ids' in data_file.root:
case_directory = os.path.join(output_dir, data_file.root.subject_ids[index].decode('utf-8'))
else:
case_directory = os.path.join(output_dir, "validation_case_{}".format(index))
run_validation_case(data_index=index, output_dir=case_directory, model=model, data_file=data_file,
training_modalities=training_modalities, output_label_map=output_label_map, labels=labels,
threshold=threshold, overlap=overlap, permute=permute)
end = time.time()
if index >= warmup:
elapsed_time += (end - start)
elapsed_step += 1
if elapsed_step + warmup == n_batch:
# print('performance = {} img/s, count for {} steps and batch size is {}'.format(elapsed_step * batch_size / elapsed_time, elapsed_step, batch_size), flush=True)
# print('latency = {} ms'.format(1000 * elapsed_time / elapsed_step), flush=True)
print('Time spent per BATCH: %.4f ms' % (1000.0 * elapsed_time / elapsed_step))
print('Total samples/sec: %.4f samples/s' % (elapsed_step * batch_size / elapsed_time))
break
data_file.close()
def predict(model, data, permute=False):
if permute:
predictions = list()
for batch_index in range(data.shape[0]):
predictions.append(predict_with_permutations(model, data[batch_index]))
return np.asarray(predictions)
else:
return model.predict(data)
def predict_with_permutations(model, data):
predictions = list()
for permutation_key in generate_permutation_keys():
temp_data = permute_data(data, permutation_key)[np.newaxis]
predictions.append(reverse_permute_data(model.predict(temp_data)[0], permutation_key))
return np.mean(predictions, axis=0)
def run_large_batch_validation_cases(validation_keys_file, model_file, training_modalities, labels, hdf5_file,
output_label_map=False, output_dir=".", threshold=0.5, overlap=16, permute=False, batch_size=1, warmup=1, report_interval=1, n_batch=10):
validation_indices = pickle_load(validation_keys_file)
model = load_old_model(model_file)
data_file = tables.open_file(hdf5_file, "r")
#
# Initilize validation case directory:
#
# for index in validation_indices:
# if 'subject_ids' in data_file.root:
# case_directory = os.path.join(output_dir, data_file.root.subject_ids[index].decode('utf-8'))
# else:
# case_directory = os.path.join(output_dir, "validation_case_{}".format(index))
# if not os.path.exists(case_directory):
# os.makedirs(case_directory)
# # Write image to validation case directory:
# affine = data_file.root.affine[index]
# affine_dict[index] = affine
# test_data = np.asarray([data_file.root.data[index]])
# for i, modality in enumerate(training_modalities):
# image = nib.Nifti1Image(test_data[0, i], affine)
# image.to_filename(os.path.join(case_directory, "data_{0}.nii.gz".format(modality)))
# test_truth = nib.Nifti1Image(data_file.root.truth[index][0], affine)
# test_truth.to_filename(os.path.join(case_directory, "truth.nii.gz"))
step = math.ceil(len(validation_indices) / batch_size)
elapsed_time = 0
elapsed_step = 0
for i in range(step):
print('iteration {} ...'.format(i))
start_time = time.time()
test_data_index = validation_indices[i * batch_size: (i + 1) * batch_size]
test_data = []
affine_dict = {}
for tdi in test_data_index:
#
# Initilize validation case directory:
#
if 'subject_ids' in data_file.root:
case_directory = os.path.join(output_dir, data_file.root.subject_ids[tdi].decode('utf-8'))
else:
case_directory = os.path.join(output_dir, "validation_case_{}".format(tdi))
if not os.path.exists(case_directory):
os.makedirs(case_directory)
# Write image to validation case directory:
affine = data_file.root.affine[tdi]
affine_dict[tdi] = affine
test_data_elem = np.asarray([data_file.root.data[tdi]])
for index, modality in enumerate(training_modalities):
image = nib.Nifti1Image(test_data_elem[0, index], affine)
image.to_filename(os.path.join(case_directory, "data_{0}.nii.gz".format(modality)))
test_truth = nib.Nifti1Image(data_file.root.truth[tdi][0], affine)
test_truth.to_filename(os.path.join(case_directory, "truth.nii.gz"))
test_data.append(data_file.root.data[tdi])
test_data = np.asarray([test_data])
# print('test_data.shape: {}'.format(test_data.shape))
patch_shape = tuple([int(dim) for dim in model.input.shape[-3:]])
if patch_shape == test_data.shape[-3:]:
# prediction = predict(model, test_data, permute=permute)
if test_data.ndim is 6:
assert test_data.shape[0] is 1
test_data = test_data[0]
predictions = predict(model, test_data, permute=permute)
else:
predictions = []
indices = compute_patch_indices(test_data.shape[-3:], patch_size=patch_shape, overlap=overlap)
batch = []
# print('len(indices): {}'.format(len(indices)))
for b in range(test_data.shape[1]):
indices_index = 0
while indices_index < len(indices):
patch = get_patch_from_3d_data(test_data[0][b], patch_shape=patch_shape, patch_index=indices[indices_index])
batch.append(patch)
indices_index += 1
pred_start = time.time()
prediction = predict(model, np.asarray(batch), permute=permute)
pred_stop = time.time()
print('pred time: {} ms'.format((pred_stop - pred_start) * 1000))
# print('prediction.shape: {}'.format(prediction.shape))
# batch = []
ps = prediction.shape
assert ps[0] % test_data.shape[1] == 0
prediction = np.reshape(prediction, (test_data.shape[1], int(ps[0] / test_data.shape[1]), ps[1], ps[2], ps[3], ps[4]))
for batch_index, batch_prediction in enumerate(prediction):
# in case of the list out of index situation
if len(predictions) < (batch_index + 1):
assert batch_index is len(predictions)
predictions.append([])
for patch_index, predicted_patch in enumerate(batch_prediction):
predictions[batch_index].append(predicted_patch)
output_shape = [int(model.output.shape[1])] + list(test_data.shape[-3:])
#
# Re-construction
#
reconstructed_predictions = []
for pred in predictions:
# print('before reconstruction: {}, {}'.format(pred[0].shape, len(pred)))
reconstructed_prediction = reconstruct_from_patches(pred, patch_indices=indices, data_shape=output_shape)[np.newaxis]
# print('reconstructed_prediction.shape: {}'.format(reconstructed_prediction.shape))
reconstructed_predictions.append(reconstructed_prediction)
#
# Predict to image
#
prediction_images = []
for pred_index, pred in enumerate(reconstructed_predictions):
rec_pred_index = test_data_index[pred_index]
# print('pred_index: {}'.format(rec_pred_index))
affine = affine_dict[rec_pred_index]
# print('pred.shape: {}'.format(pred.shape))
prediction_image = prediction_to_image(pred, affine, label_map=output_label_map, threshold=threshold,
labels=labels)
prediction_images.append(prediction_images)
if 'subject_ids' in data_file.root:
case_directory = os.path.join(output_dir, data_file.root.subject_ids[rec_pred_index].decode('utf-8'))
else:
case_directory = os.path.join(output_dir, "validation_case_{}".format(rec_pred_index))
if isinstance(prediction_image, list):
for image_index, image in enumerate(prediction_image):
image.to_filename(os.path.join(case_directory, "prediction_{0}.nii.gz".format(image_index + 1)))
else:
prediction_image.to_filename(os.path.join(case_directory, "prediction.nii.gz"))
stop_time = time.time()
if i >= warmup:
elapsed_time += (stop_time - start_time)
elapsed_step += 1
if elapsed_step + warmup == n_batch:
print('performance = {} img/s, count for {} steps and batch size is {}'.format(elapsed_step * batch_size / elapsed_time, elapsed_step, batch_size))
print('latency = {} ms'.format(1000 * elapsed_time / elapsed_step))
elapsed_time = 0
elapsed_step = 0
break
data_file.close()
| [
"[email protected]"
] | |
87b61ab79ea2e0d19563e98bd2c50283f6e2da98 | 6c44aa08cdac167f150fa5e08aa3a2d0efd22fbd | /clearbyte/urls.py | 40735a8fec3c632267a9474a85807e6c02d9f487 | [] | no_license | sumansai14/clearbyte | acaca0d194dfbca0a542301a622f44dbc122a325 | f960a1a898ccbc79ed5cdc4cf213f09232f94921 | refs/heads/master | 2022-12-11T18:10:38.239032 | 2017-07-31T06:42:13 | 2017-07-31T06:42:13 | 98,814,570 | 1 | 0 | null | 2022-11-22T01:47:56 | 2017-07-30T17:44:43 | JavaScript | UTF-8 | Python | false | false | 571 | py | from django.conf.urls import include, url
from django.contrib import admin
from clearbyte.web.home import HomeView
from clearbyte.api.views.company import CompanyViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'api/company', CompanyViewSet)
urlpatterns = [
# Examples:
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^company/search/.*/$', HomeView.as_view(), name='adamantium-home'),
url(r'^$', HomeView.as_view(), name='adamantium-home'),
] + router.urls
| [
"[email protected]"
] | |
11b5ad683134ad8b05c87abf40a8a09968e574ae | bf0aa689b92be1df24100e8581caab59a74e31db | /src/GTCv3/equilibrium.py | b7b44d3cb1e7b03643192de3efb5baa4b70e4d3c | [
"MIT"
] | permissive | shmilee/gdpy3 | d7c689a70557534baa98595092cee0d737ea93cc | cdebb80dbb4a4d84ffa7115d8f18b5589fd40fb2 | refs/heads/master | 2023-08-19T22:42:40.305085 | 2023-08-15T02:11:15 | 2023-08-15T03:11:04 | 88,051,033 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,669 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019-2020 shmilee
'''
Source fortran code:
eqplot.F90, subroutine eqplot
1. first part, 1D radial plots, datap(lsp),data1d(lsp)
write(ieq,101)nplot,nrad
write(ieq,102)datap
!!! 1
write(ieq,102)data1d
...
!!! 29
write(ieq,102)data1d
2. second part, 2D contour plots on poloidal plane,
datax(mpsi/mskip+1,lst),dataz(mpsi/mskip+1,lst),data2d(mpsi/mskip+1,lst,5)
write(ieq,101)nplot,mpsi/mskip+1,lst
!0-1: mesh points on (X,Z)
write(ieq,102)datax,dataz
!2: b-field
write(ieq,102)data2d(:,:,1)
!3: Jacobian
write(ieq,102)data2d(:,:,2)
!4: icurrent
write(ieq,102)data2d(:,:,3)
!5: zeta2phi
write(ieq,102)data2d(:,:,4)
!6: delb
write(ieq,102)data2d(:,:,5)
'''
import numpy
from ..cores.converter import Converter, clog
from ..cores.digger import Digger, dlog
_all_Converters = ['EquilibriumConverter']
_all_Diggers = ['EquilibriumPsi1DDigger', 'EquilibriumRadial1DDigger',
'EquilibriumErro1DDigger', 'EquilibriumPoloidalDigger',
'EquilibriumMeshDigger', 'EquilibriumThetaDigger']
__all__ = _all_Converters + _all_Diggers
class EquilibriumConverter(Converter):
'''
Equilibrium data
1) first part, 1D radial plots. 'nplot-1d', 'nrad'. 'nplot-1d' + 1 = 30.
Shape of '1d-data' is ('nplot-1d' + 1, nrad). 30 plots order:
0'radial-axis-using-poloidal-flux-function',
1'sqaure-root-of-normalized-toroidal-flux-function',
2'minor-radius', 3'major-radius',
4'Te', 5'-d(ln(Te))/dr', 6'ne', 7'-d(ln(ne))/dr',
8'Ti', 9'-d(ln(Ti))/dr', 10'ni', 11'-d(ln(ni))/dr',
12'Tf', 13'-d(ln(Tf))/dr', 14'nf', 15'-d(ln(nf))/dr',
16'zeff', 17'toroidal-rotation', 18'radial-electric-field',
19'q-profile', 20'd(ln(q))/dpsi',
21'gcurrent-profile', 22'pressure-profile',
23'minor-radius', 24'toroidal-flux', 25'rgpsi', 26'psitor', 27'psirg',
28'error-of-spline-cos', 29'error-of-spline-sin'.
2) second part, 2D contour plots on poloidal plane.
'nplot-2d', 'mpsi/mskip+1', 'lst'.
Shape of 2D data is (mpsi/mskip+1, lst).
'mesh-points-on-X', 'mesh-points-on-Z',
'b-field', 'Jacobian', 'icurrent', 'zeta2phi', 'delb'.
'''
__slots__ = []
nitems = '?'
itemspattern = ['^(?P<section>equilibrium)\.out$',
'.*/(?P<section>equilibrium)\.out$']
_datakeys = (
# 1. first part, 1D
'nplot-1d', 'nrad', '1d-data',
# 2. second part, 2D
'nplot-2d', 'mpsi-over-mskip+1', 'lst',
'mesh-points-on-X', 'mesh-points-on-Z',
'b-field', 'Jacobian', 'icurrent', 'zeta2phi', 'delb')
def _convert(self):
'''Read 'equilibrium.out'.'''
with self.rawloader.get(self.files) as f:
clog.debug("Read file '%s'." % self.files)
outdata = f.readlines()
sd = {}
# 1. first part
clog.debug("Filling datakeys: %s ..." % str(self._datakeys[:3]))
sd.update({'nplot-1d': int(outdata[0].strip()),
'nrad': int(outdata[1].strip())})
size1 = (sd['nplot-1d'] + 1) * sd['nrad']
shape1 = ((sd['nplot-1d'] + 1), sd['nrad'])
data1 = numpy.array([float(n.strip()) for n in outdata[2:2 + size1]])
data1 = data1.reshape(shape1, order='C')
sd.update({'1d-data': data1})
# 2. second part
clog.debug("Filling datakeys: %s ..." % str(self._datakeys[3:6]))
index2 = 2 + size1
sd.update({'nplot-2d': int(outdata[index2].strip()),
'mpsi-over-mskip+1': int(outdata[index2 + 1].strip()),
'lst': int(outdata[index2 + 2].strip())})
clog.debug("Filling datakeys: %s ..." % str(self._datakeys[6:]))
size2 = (sd['nplot-2d'] + 2) * sd['mpsi-over-mskip+1'] * sd['lst']
shape2 = ((sd['nplot-2d'] + 2), sd['mpsi-over-mskip+1'] * sd['lst'])
data2 = numpy.array([float(n.strip())
for n in outdata[index2 + 3:index2 + 3 + size2]])
data2 = data2.reshape(shape2, order='C')
shape3 = (sd['mpsi-over-mskip+1'], sd['lst'])
for i, key in enumerate(self._datakeys[6:]):
sd.update({key: data2[i].reshape(shape3, order='F')})
return sd
_1d_data_misc = {
'minor_r': dict(
title='inverse aspec-ratio from profile data', index=2),
'major_r': dict(title='major radius from profile data', index=3),
'Te': dict(title='Te', index=4),
'L_Te-1': dict(title='-d(ln(Te))/dr', index=5),
'ne': dict(title='ne', index=6),
'L_ne-1': dict(title='-d(ln(ne))/dr', index=7),
'Ti': dict(title='Ti', index=8),
'L_Ti-1': dict(title='-d(ln(Ti))/dr', index=9),
'ni': dict(title='ni', index=10),
'L_ni-1': dict(title='-d(ln(ni))/dr', index=11),
'Tf': dict(title='Tf', index=12),
'L_Tf-1': dict(title='-d(ln(Tf))/dr', index=13),
'nf': dict(title='nf', index=14),
'L_nf-1': dict(title='-d(ln(nf))/dr', index=15),
'zeff': dict(title=r'$Z_{eff}$', index=16),
'tor_rotation': dict(title='toroidal rotation', index=17),
'Er': dict(title=r'$E_r$', index=18),
'q': dict(title='q profile', index=19),
'shear': dict(title='shear d(ln(q))/dpsi', index=20),
'gcurrent': dict(title='gcurrent profile', index=21),
'pressure': dict(title='pressure profile', index=22),
'tor_flux': dict(title='toroidal flux', index=24),
'rg': dict(title='radial grid', index=25),
}
class EquilibriumPsi1DDigger(Digger):
'''
X -> psi of radius, Z_eff, rotation, E_r, q, shear, pressure ...
and T, n of ion, electron, fastion
'''
__slots__ = ['_numseed']
itemspattern = [r'^(?P<section>equilibrium)/1d-data$']
_misc = _1d_data_misc.copy()
_misc['r'] = dict(title='minor radius r(psi)', index=23)
numseeds = list(_misc.keys())
post_template = 'tmpl_line'
def _set_fignum(self, numseed=None):
self._fignum = '%s(psi)' % numseed
self._numseed = numseed
def _dig(self, kwargs):
data = self.pckloader.get(self.srckeys[0])
X = data[0]
return dict(X=X, xlim=[min(X), max(X)],
Y=data[self._misc[self._numseed]['index']],
title=self._misc[self._numseed]['title']), {}
def _post_dig(self, results):
r = results
return dict(LINE=[(r['X'], r['Y'])], title=r['title'],
xlabel=r'$\psi$', xlim=r['xlim'])
class EquilibriumRadial1DDigger(Digger):
'''
X -> r of psi, Z_eff, rotation, E_r, q, shear, pressure ...
and T, n of ion, electron, fastion
'''
__slots__ = ['_numseed']
itemspattern = [r'^(?P<section>equilibrium)/1d-data$']
commonpattern = ['equilibrium/nrad']
_misc = _1d_data_misc.copy()
_misc['psi'] = dict(title='psi(r)', index=0)
numseeds = list(_misc.keys())
post_template = 'tmpl_line'
def _set_fignum(self, numseed=None):
self._fignum = '%s(r)' % numseed
self._numseed = numseed
def _dig(self, kwargs):
data, nrad = self.pckloader.get_many(*self.srckeys, *self.extrakeys)
X = data[23] / data[23][nrad - 1]
return dict(X=X, xlim=[min(X), max(X)],
Y=data[self._misc[self._numseed]['index']],
title=self._misc[self._numseed]['title']), {}
def _post_dig(self, results):
r = results
return dict(LINE=[(r['X'], r['Y'])], title=r['title'],
xlabel=r'radius $r$', xlim=r['xlim'])
class EquilibriumErro1DDigger(Digger):
'''X -> [0, pi/2], error of spline cos, sin'''
__slots__ = ['_numseed']
itemspattern = [r'^(?P<section>equilibrium)/1d-data$']
commonpattern = ['equilibrium/nrad']
_misc = {'cos': dict(title='error of spline cos', index=28),
'sin': dict(title='error of spline sin', index=29)}
numseeds = list(_misc.keys())
post_template = 'tmpl_line'
def _set_fignum(self, numseed=None):
self._fignum = 'error-%s' % numseed
self._numseed = numseed
def _dig(self, kwargs):
data, nrad = self.pckloader.get_many(*self.srckeys, *self.extrakeys)
X = numpy.array(range(1, nrad + 1)) * (numpy.pi / 2 / nrad)
return dict(X=X, xlim=[min(X), max(X)],
Y=data[self._misc[self._numseed]['index']],
title=self._misc[self._numseed]['title']), {}
def _post_dig(self, results):
r = results
return dict(LINE=[(r['X'], r['Y'])], title=r['title'],
xlabel=r'$\theta$', xlim=r['xlim'])
class EquilibriumPoloidalDigger(Digger):
'''b-field, Jacobian, icurrent, zeta2phi, delb on poloidal'''
__slots__ = []
nitems = '+'
itemspattern = [r'^(?P<section>equilibrium)/'
+ '(?P<par>(?:b-field|Jacobian|icurrent|zeta2phi|delb))$',
r'^(?P<section>equilibrium)/mesh-points-on-(?:X|Z)$']
post_template = 'tmpl_contourf'
def _set_fignum(self, numseed=None):
self._fignum = self.section[1]
def _dig(self, kwargs):
Z, X, Y = self.pckloader.get_many(*self.srckeys)
return dict(X=X, Y=Y, Z=Z, title=self.fignum), {}
def _post_dig(self, results):
results.update(xlabel=r'$R(R_0)$', ylabel=r'$Z(R_0)$', aspect='equal')
return results
class EquilibriumMeshDigger(Digger):
'''poloidal mesh'''
__slots__ = []
nitems = '+'
itemspattern = [r'^(?P<section>equilibrium)/mesh-points-on-(?:X|Z)$',
r'^(?P<section>equilibrium)/mpsi-over-mskip\+1',
r'^(?P<section>equilibrium)/lst']
post_template = 'tmpl_line'
def _set_fignum(self, numseed=None):
self._fignum = 'poloidal_mesh'
def _dig(self, kwargs):
X, Y, lsp, lst = self.pckloader.get_many(
*self.srckeys, *self.extrakeys)
LINE1, LINE2 = [], []
for i in range(lsp):
x, y = X[i], Y[i]
x, y = numpy.append(x, x[0]), numpy.append(y, y[0])
LINE1.append((x, y))
for i in range(lst):
x, y = X[:, i], Y[:, i]
LINE2.append((x, y))
return dict(LINEs1=numpy.array(LINE1), LINEs2=numpy.array(LINE2),
title='poloidal mesh', xlabel='R', ylabel='Z'), {}
def _post_dig(self, results):
r = results
return dict(
LINE=list(r['LINEs1']) + list(r['LINEs2']), title=r['title'],
xlabel=r'$R(R_0)$', ylabel=r'$Z(R_0)$', aspect='equal')
class EquilibriumThetaDigger(Digger):
'''X -> theta of b-field, Jacobian, icurrent zeta2phi, delb at psi=isp'''
__slots__ = []
nitems = '?'
itemspattern = [r'^(?P<section>equilibrium)/'
+ '(?P<par>(?:b-field|Jacobian|icurrent|zeta2phi|delb'
+ '|1d-data))$'] # for 'gq_plus_I/BB'
commonpattern = ['equilibrium/mpsi-over-mskip\+1', 'equilibrium/lst']
post_template = 'tmpl_line'
def _set_fignum(self, numseed=None):
self._fignum = '%s:theta' % self.section[1]
if self.section[1] == '1d-data':
self._fignum = 'gq_plus_I/BB:theta'
self.kwoptions = None
def _dig(self, kwargs):
'''
kwargs
------
*isp*: int
fix psi=isp, default 'mpsi-over-mskip+1' - 1
'''
Z, lsp, lst = self.pckloader.get_many(*self.srckeys, *self.extrakeys)
isp = lsp - 1
acckwargs = {'isp': isp}
if 'isp' in kwargs and isinstance(kwargs['isp'], int):
if 1 < kwargs['isp'] < lsp - 1:
isp = kwargs['isp']
acckwargs['isp'] = isp
if self.kwoptions is None:
self.kwoptions = dict(
isp=dict(
widget='IntSlider',
rangee=(0, lsp - 1, 1),
value=isp,
description='psi=isp:'))
dlog.parm("fix psi=isp=%d. Maximal isp=%d." % (isp, lsp - 1))
X = 2.0 * numpy.pi * numpy.array(range(lst + 1)) / lst
if self.fignum == 'gq_plus_I/BB:theta':
g = Z[21][isp]
q = Z[19][isp]
# extrakeys, not check
icurrent, bfield = self.pckloader.get_many(
'equilibrium/icurrent', 'equilibrium/b-field')
I, B = icurrent[isp], bfield[isp]
Y = (g * q + I) / (B * B)
title = '(gq+I)/B^2'
else:
Y = Z[isp]
title = self.section[1]
Y = numpy.append(Y, Y[0])
title = r'%s ($\theta$) at psi=isp=%d' % (title, isp)
return dict(X=X, Y=Y, title=title, xlim=[min(X), max(X)]), acckwargs
_post_dig = EquilibriumErro1DDigger._post_dig
| [
"[email protected]"
] | |
e2c8bbd3422f404804ca4a5b2183623bd2d0fc02 | bbe7d6d59ef6d7364ff06377df9658367a19c425 | /coghq/DistributedBattleFactory.py | 9cbf5b835ad112cda6c82c2ab13e8a87022244ce | [
"Apache-2.0"
] | permissive | DedMemez/ODS-August-2017 | 1b45c912ad52ba81419c1596644d8db2a879bd9b | 5d6214732e3245f63bfa250e3e9c881cc2dc28ad | refs/heads/master | 2021-01-22T18:37:51.626942 | 2017-08-19T02:04:51 | 2017-08-19T02:04:51 | 100,762,513 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.coghq.DistributedBattleFactory
from panda3d.core import Filename
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleBase import *
from toontown.coghq import DistributedLevelBattle
from direct.directnotify import DirectNotifyGlobal
from toontown.toon import TTEmote
from otp.avatar import Emote
from toontown.battle import SuitBattleGlobals
from toontown.suit import SuitDNA
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.toonbase import ToontownGlobals
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
class DistributedBattleFactory(DistributedLevelBattle.DistributedLevelBattle):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleFactory')
def __init__(self, cr):
DistributedLevelBattle.DistributedLevelBattle.__init__(self, cr)
self.fsm.addState(State.State('FactoryReward', self.enterFactoryReward, self.exitFactoryReward, ['Resume']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('FactoryReward')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('FactoryReward')
self.battleMusic = loader.loadMusic(self.getBattleMusicFilename())
def getBattleMusicFilename(self):
return 'phase_9/audio/bgm/encntr_general_FACT_bg.ogg'
def getBattleMusic(self):
return self.battleMusic
def enterFaceOff(self, ts):
base.cr.playGame.place.loader.battleMusic = self.getBattleMusic()
base.cr.playGame.place.loader.battleMusic.play()
DistributedLevelBattle.DistributedLevelBattle.enterFaceOff(self, ts)
def enterFactoryReward(self, ts):
self.disableCollision()
self.delayDeleteMembers()
if self.hasLocalToon():
NametagGlobals.setMasterArrowsOn(0)
if self.bossBattle:
messenger.send('localToonConfrontedForeman')
self.movie.playReward(ts, self.uniqueName('building-reward'), self.__handleFactoryRewardDone, noSkip=True)
def __handleFactoryRewardDone(self):
if self.hasLocalToon():
self.d_rewardDone()
self.movie.resetReward()
self.fsm.request('Resume')
def exitFactoryReward(self):
self.movie.resetReward(finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1) | [
"[email protected]"
] | |
2d234c71ae3401e3c476c0145a739da0e171802a | 646ad63fc2274b85a219094b216b807c0bed1ede | /tests/core/commands/test_cmd_quota.py | 6c8784ea4c37a59ce12a55111ffffa52913c5f59 | [
"MIT"
] | permissive | Starz0r/pytuber | 9b252582b4ae296aee87faaf10a2eed9f541b7b0 | 5bb53edde6a39cedec48c4a8f41ba22db21d4727 | refs/heads/master | 2020-08-23T16:28:12.783690 | 2019-04-29T21:53:29 | 2019-04-29T21:53:29 | 216,662,810 | 0 | 0 | MIT | 2019-10-21T20:48:32 | 2019-10-21T20:48:31 | null | UTF-8 | Python | false | false | 914 | py | from datetime import datetime
from unittest import mock
from pytuber import cli
from pytuber.core.services import YouService
from tests.utils import CommandTestCase, ConfigFixture
class CommandQuotaTests(CommandTestCase):
@mock.patch.object(YouService, "get_quota_usage")
@mock.patch.object(YouService, "quota_date")
def test_run(self, quota_date, get_quota_usage):
ConfigFixture.youtube()
get_quota_usage.return_value = 9988
quota_date.return_value = datetime(
year=1970, month=1, day=1, hour=22, minute=22, second=11
)
result = self.runner.invoke(cli, ["quota"])
expected_output = (
"Provider: youtube",
" Limit: 1000000",
" Usage: 9988",
"Next reset: 1:37:49",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
| [
"[email protected]"
] | |
21f90cf761ed27de60b2f3a5a059e88edb7562ee | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_hatchet.py | 3ee3633a64a58ccd19ae48252680d868a128cd59 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py |
#calss header
class _HATCHET():
def __init__(self,):
self.name = "HATCHET"
self.definitions = [u'a small axe (= tool with a blade that cuts when you hit things with it)']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
6bd3295d27f377bb3ae91cf7f7f0ad42b8a46e60 | bd498cbbb28e33370298a84b693f93a3058d3138 | /Inspur/benchmarks/dlrm/implementations/implementation_closed/tests/buckle_embedding_test.py | 6f8033a42903ebcd748ea9664764bac2d035a03e | [
"Apache-2.0"
] | permissive | piyushghai/training_results_v0.7 | afb303446e75e3e9789b0f6c40ce330b6b83a70c | e017c9359f66e2d814c6990d1ffa56654a73f5b0 | refs/heads/master | 2022-12-19T16:50:17.372320 | 2020-09-24T01:02:00 | 2020-09-24T18:01:01 | 298,127,245 | 0 | 1 | Apache-2.0 | 2020-09-24T00:27:21 | 2020-09-24T00:27:21 | null | UTF-8 | Python | false | false | 2,042 | py | """Tests for buckle embedding"""
from absl.testing import absltest
import torch
from torch import nn
from dlrm.nn import BuckleEmbedding
# pylint:disable=missing-docstring, no-self-use
class DistEmbeddingBagTest(absltest.TestCase):
def test_smoke(self):
test_buckle_embedding = BuckleEmbedding([3, 5, 7, 11], 3, device="cpu")
test_buckle_embedding(torch.tensor([[1, 2, 3, 4], [2, 4, 6, 10]]))
def test_2embeddings_batch1(self):
test_sizes = [3, 5]
test_buckle_embedding = BuckleEmbedding(test_sizes, 3, device="cpu")
ref_embeddings = nn.ModuleList()
for size in test_sizes:
ref_embeddings.append(nn.Embedding(size, 3))
test_buckle_embedding.embedding.weight.data = torch.cat(
[embedding.weight for embedding in ref_embeddings]).clone()
test_indices = torch.tensor([[1, 3]])
embedding_out = test_buckle_embedding(test_indices)
ref_out = []
for embedding_id, embedding in enumerate(ref_embeddings):
ref_out.append(embedding(test_indices[:, embedding_id]))
ref_out = torch.cat(ref_out)
assert (ref_out == embedding_out).all()
def test_4embeddings_batch2(self):
test_sizes = [3, 5, 11, 13]
test_buckle_embedding = BuckleEmbedding(test_sizes, 3, device="cpu")
ref_embeddings = nn.ModuleList()
for size in test_sizes:
ref_embeddings.append(nn.Embedding(size, 3))
test_buckle_embedding.embedding.weight.data = torch.cat(
[embedding.weight for embedding in ref_embeddings]).clone()
test_indices = torch.tensor([[1, 3, 5, 7], [2, 4, 10, 12]])
embedding_out = test_buckle_embedding(test_indices)
ref_out = []
for embedding_id, embedding in enumerate(ref_embeddings):
ref_out.append(embedding(test_indices[:, embedding_id].unsqueeze(-1)))
ref_out = torch.cat(ref_out, dim=1)
assert (ref_out == embedding_out).all()
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
a8aa23bdab8f192dd86a7c7115adc91521b3a4d5 | 365609eab603486008b7eef9b2ede77adcac0d0c | /dataset/__init__.py | 05eefa22eb4c0ddb6b3f89e73ae9c2175d04729c | [] | no_license | dangchenyu/break_light-recognition | 8e78c0dd0ec332c3cf617caf679cdfa685cee3de | 293e8538df046acf684237caf7dd9611ebe79fb1 | refs/heads/master | 2020-08-03T12:59:09.296324 | 2019-09-30T02:34:02 | 2019-09-30T02:34:02 | 211,760,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | from .tail import * | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.