blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d8f89b65398b429c128f2f507f46338d6cda631 | 0acd186fcf8e7e83e28687baeb1d3fe0032c833d | /blogger/migrations/0008_auto_20210325_2212.py | 59644bca81956224ef2c11e4493990e64e5fc032 | [] | no_license | BaturinaAnna/DjangoWebsite | 1138d465a0b11f2db7c6e013b6a8c44e4175065a | aa788832261316b0c790fa9e94c3a36dcf51073c | refs/heads/main | 2023-03-29T02:46:48.056468 | 2021-03-31T09:45:31 | 2021-03-31T09:45:31 | 352,893,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | # Generated by Django 3.1.7 on 2021-03-25 19:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blogger', '0007_comment'),
]
operations = [
migrations.AddField(
model_name='post',
name='views',
field=models.PositiveIntegerField(default=0),
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogger.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
018772e43aa9380c33c9d2698efe503729a50777 | b584041c3ab574cad839523a87599192bf436aeb | /icsmp_project/img3d/migrations/0001_initial.py | dab2e319ca37764630afc77307b82bd73e8d8233 | [
"MIT"
] | permissive | ICSMP/icsmp | 4ee87f33a3138132c8ad743de0a976372b7dea0a | bf811f7457aa6d6e7251c209631d63496e561925 | refs/heads/main | 2023-01-07T17:09:41.463208 | 2020-11-01T17:51:35 | 2020-11-01T17:51:35 | 303,553,096 | 0 | 1 | MIT | 2020-11-01T17:51:36 | 2020-10-13T01:16:13 | JavaScript | UTF-8 | Python | false | false | 1,120 | py | # Generated by Django 3.1.2 on 2020-10-31 16:07
from django.db import migrations, models
import s3direct.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Img3d',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('nome', models.CharField(max_length=32, unique=True)),
('descricao', models.TextField(blank=True, null=True)),
('trabalhos_utilizados', models.TextField(blank=True, null=True)),
('slug', models.SlugField(blank=True, null=True, unique=True)),
('arquivo', s3direct.fields.S3DirectField()),
('url', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'ordering': ('order',),
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
bdd32549312bf8b9423f1b45a7a395269a4ee51f | 40245227fc1579f36f82c5ae5b87cb251f8902e1 | /practica1/build/software_arq_robots_course/thirdparty/yujin_ocs/yocs_keyop/catkin_generated/pkg.installspace.context.pc.py | 2f07a0ef85947d0e6db0a8c15733c4ed910a9f1a | [] | no_license | Veronica274/arq_software | 842d949fb36fa991fe852a385f9083be2ea5e4c2 | 6cb2da2109213208c122d4b999199880c25407e3 | refs/heads/main | 2023-03-04T10:21:53.084682 | 2021-02-13T16:45:25 | 2021-02-13T16:45:25 | 338,610,955 | 0 | 0 | null | 2021-02-13T16:45:25 | 2021-02-13T15:59:01 | Makefile | UTF-8 | Python | false | false | 387 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "yocs_keyop"
PROJECT_SPACE_DIR = "/home/ruben/arq_software/practica1/install"
PROJECT_VERSION = "0.12.1"
| [
"[email protected]"
] | |
0b039714da612a5ba9f5666fdfee22cd9cb5b7b6 | 02feb84c7775af145374d28f8071f1a31cfbd192 | /CAU_Mooc/apps/cau_college/migrations/0009_auto_20170427_2251.py | 40ae50b8ce11b7a596a22e6451e6210677353097 | [] | no_license | oceanlane/caux_online | e9ff126edf23a5ae571415ef784ed6ab754576b8 | 1320428a8c8b8870fc109b488275338e89ab510b | refs/heads/master | 2021-06-14T09:33:53.890557 | 2017-05-04T11:39:13 | 2017-05-04T11:39:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-27 22:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cau_college', '0008_auto_20170426_1914'),
]
operations = [
migrations.RenameField(
model_name='coursecollege',
old_name='courses_nums',
new_name='course_nums',
),
]
| [
"[email protected]"
] | |
b3bb0ceb361f78944310a278fe8e403fb8dccd4b | 280cafadda7e41608c132142aa82cb86a4dd9473 | /python_projects/07_UnitTests/Product.py | 22e4f9fe6710c415bb43d477f331f1dd177c31e9 | [] | no_license | sophiashuv/pythonProgramming-AMI23 | cd4ddea210b84c13b084008faa1a42be5c895318 | a6d87b24f855a14314f2cc52333d4df0d3678fac | refs/heads/master | 2023-04-14T16:28:15.308229 | 2021-04-27T07:35:56 | 2021-04-27T07:35:56 | 294,176,747 | 2 | 1 | null | 2021-04-05T14:50:04 | 2020-09-09T17:03:21 | Python | UTF-8 | Python | false | false | 2,228 | py | # Клас ПРОДУКТ з полями: ID, title, image_url, price, created_at (date), updated_at (date), description.
from Validation import Validation
class Product(object):
"""Class for Product representation."""
def __init__(self, **kwargs):
for (prop, default) in kwargs.items():
setattr(self, prop, kwargs.get(prop, default))
@property
def title(self):
return self._title
@title.setter
@Validation.validateStr
def title(self, value):
self._title = value
@property
def price(self):
return self._price
@price.setter
@Validation.validatePrice
def price(self, value):
self._price = value
@property
def created_at(self):
return self._created_at
@created_at.setter
@Validation.validateDate
def created_at(self, value):
self._created_at = value
@property
def updated_at(self):
return self._updated_at
@updated_at.setter
@Validation.validateDate
@Validation.isBiggerDate
def updated_at(self, value):
self._updated_at = value
@property
def image_url(self):
return self._image_url
@image_url.setter
@Validation.validateImage_url
def image_url(self, value):
self._image_url = value
def __get_dictionary(self):
return dict((name, getattr(self, name)) for name in dir(self) if not name.startswith('__')
and not name.startswith('_') and name != "input_product")
@staticmethod
def input_product(*args):
d = dict((prop, input(prop + " : ")) for prop in args)
return d
def __str__(self):
"""
(Product)->(str)
returns a string representing Product.
"""
return "Product:\n" + '\n'.join("%s : %r" % (key2, str(val2)) for (key2, val2)
in self.__get_dictionary().items()) + "\n"
def __repr__(self):
"""
(Product)->(str)
returns a string representing Product.
"""
return "Product:\n" + '\n'.join("%s : %r" % (key2, str(val2)) for (key2, val2)
in self.__get_dictionary().items()) + "\n"
| [
"[email protected]"
] | |
d43d2613e687dbbcdb6fac3650e2b8ee65844455 | 9d3b8612d904f8f8e352ddc542ac809e78991a8b | /3.3-permissions/api_with_restrictions/advertisements/serializers.py | 725a907863b902e404c053584b30b48634d9449b | [] | no_license | andreyru02/dj-homeworks | bce08f4477665366eb3222f994d4f4b0d5c861d7 | 7cb50c8eb82eaef245d5baece9da5881289b7a5b | refs/heads/master | 2023-07-13T08:06:13.838743 | 2021-08-12T03:13:41 | 2021-08-12T03:13:41 | 383,150,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,720 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from advertisements.models import Advertisement
class UserSerializer(serializers.ModelSerializer):
"""Serializer для пользователя."""
class Meta:
model = User
fields = ('id', 'username', 'first_name',
'last_name',)
class AdvertisementSerializer(serializers.ModelSerializer):
"""Serializer для объявления."""
creator = UserSerializer(
read_only=True,
)
class Meta:
model = Advertisement
fields = ('id', 'title', 'description', 'creator',
'status', 'created_at', )
def create(self, validated_data):
"""Метод для создания"""
# Простановка значения поля создатель по-умолчанию.
# Текущий пользователь является создателем объявления
# изменить или переопределить его через API нельзя.
# обратите внимание на `context` – он выставляется автоматически
# через методы ViewSet.
# само поле при этом объявляется как `read_only=True`
validated_data["creator"] = self.context["request"].user
return super().create(validated_data)
# def validate(self, data):
# """Метод для валидации. Вызывается при создании и обновлении."""
# # TODO: добавьте требуемую валидацию
#
# user = self.context['request'].user
# advertisement = Advertisement.objects.filter(creator=user).filter(status='OPEN')
# if 'CLOSED' in data.values():
# pass
# elif 'OPEN' in data.values() and advertisement.count() >= 10:
# raise serializers.ValidationError('Максимальное число объявлений в статусе Открыто не должно '
# 'превышать 10.')
# elif advertisement.count() >= 10:
# raise serializers.ValidationError('Максимальное число объявлений в статусе Открыто не должно '
# 'превышать 10.')
#
# return data
def validate(self, data):
user = self.context['request'].user
if Advertisement.objects.filter(status='OPEN', creator=user).count() >= 10:
raise serializers.ValidationError('10')
return data
| [
"[email protected]"
] | |
a74f2055b2f3e9dbafa5d39afb4eba50f4c218fb | ffb627b58f0553fc8bf86c0d100db1dde2015cfe | /week 1/day4/14889.py | c05bc1edfcce6fb61fcb72b83d5dab0df9b9edfc | [] | no_license | DentiQ/CodeTest | a208bb1250e18fca9d336b93a5c2e4807c621866 | a8d21447ad2cefc583b45c437623647abde11d95 | refs/heads/master | 2023-06-04T06:12:33.540950 | 2021-06-30T17:00:24 | 2021-06-30T17:00:24 | 363,316,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import itertools
n = int(input())
stat = []
team = list(range(0, n))
combinations = list(itertools.combinations(team, n // 2))
for i in range(n):
temp_array = []
temp_input = input().split()
for j in range(n):
temp_array.append(int(temp_input[j]))
stat.append(temp_array)
for i in range(len(combinations) // 2):
team1 = 0
team2 = 0
permutation1 = list(itertools.permutations(combinations[i], 2))
for j in permutation1:
team1 += stat[j[0]][j[1]]
permutation2 = list(itertools.permutations(combinations[-i - 1], 2))
for j in permutation2:
team2 += stat[j[0]][j[1]]
if i == 0:
ans = abs(team1 - team2)
else:
if ans > abs(team1 - team2):
ans = abs(team1 - team2)
print(ans)
| [
"[email protected]"
] | |
3d9c5234e9e162fc7690c8edac5ae8be3dbfdd35 | 0474dbdc0643439617e30654c3f46d1f38deb683 | /mj223vn_assign3/count_lines.py | b2eb6b22612ec3e270dbfa66e4b1e0ef16082bad | [] | no_license | makjohansson/PythonProgrammingExercises | ff806b9a7f727b1dbc91e1177635cd69fc5162be | 01ba9a3a3d79eef172b01a542f3ca2d9e97d847c | refs/heads/master | 2023-01-03T15:29:30.632507 | 2020-11-07T11:00:08 | 2020-11-07T11:00:08 | 310,820,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | import os
"""
A python script counting all the non-empty lines in all python files written in the
course 1DV501
"""
def is_py_file(entity):
"""
Check if file is a python file
:param entity: a file
:return: true if file is python file
"""
return entity.name.endswith(".py")
def line_counter(file_path):
"""
Counts number of non blank lines in a python file
:param file_path: path to file
:return: amount of non blank lines
"""
non_blank_line = 0
try:
with open(file_path) as file:
for line in file:
if line.strip():
non_blank_line += 1
return non_blank_line
except IOError as e:
print(e)
def count_py_lines(file_path):
"""
Count number of non-empty lines in python files in Starting directory and subdirectories
:param file_path: path to file
:return: amount of non-empty lines
"""
path_content = os.scandir(file_path)
counter = 0
for file in path_content:
if is_py_file(file):
counter += line_counter(file)
if file.is_dir() and not file.name.startswith("."):
sub_dir = os.scandir(file.path)
for the_file in sub_dir:
if is_py_file(the_file):
counter += line_counter(the_file)
return counter
path = "/Users/marcus/own_projects/1DV501"
print("Python Line Count:", count_py_lines(path))
| [
"[email protected]"
] | |
744aeeef469faa1b55771e9f025181cb9e2632e6 | 3c44c71f2006090b5ed3867c7275ba65cdd973d5 | /main.py | 45e8357a6b1152ae987ab013a0b8c5bc11dc643a | [] | no_license | moisesEzequiel1/Oscilloscope-Using-Atemga328p- | 6d24a8b9ab9416548bec094e2f287c069b71f0a8 | 3823d2994a0e7c2072c3c3b5b5706ff8da742c86 | refs/heads/main | 2023-08-06T12:54:52.437815 | 2021-09-21T19:54:05 | 2021-09-21T23:11:58 | 407,935,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py |
import struct
import serial
import time
con = serial.Serial("COM3", 115200, timeout=1)
time.sleep(2)
data = [0]*1024
con.write(b'r')
rawdata = con.read(1024)
data = list(struct.unpack('B'*1024,rawdata))
data = [(float(x)/255.0 - 0.5) * 20 for x in data]
print(data)
| [
"[email protected]"
] | |
65f66382a6ea279aa1d5bbe92137a21b4065a21a | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/PostenMapping/Model/Post060311414.py | 4966bb130cd0e2c9ab879eff260889ac75063f93 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 3,114 | py | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060311414(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.11414',
beschrijving='Bestrating van in rijen te leggen kasseien volgens 6-3.2, vierkante kasseien 14 x 14 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcAfmetingBxlInCm.breedte',
dotnotation='afmetingVanBestratingselementBxl.breedte',
defaultWaarde='14',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcAfmetingBxlInCm.lengte',
dotnotation='afmetingVanBestratingselementBxl.lengte',
defaultWaarde='14',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')])
| [
"[email protected]"
] | |
70aa979a518ab1dce626787ba726cde9fdbbb32a | 7906cc1c5cfed3437773cd00df2e73d49d74352c | /sovet/migrations/0011_auto_20201008_2304.py | 97bde949b35a5dde8223ff0c5a63ed7aeab2c530 | [] | no_license | Svobodinang/legalro_backend | 31bb3e505874140a7382c56966975388e31aa8fc | b0cca70b26bbab9ea509f9ed9f5a7c29d1cc5c1f | refs/heads/master | 2023-08-10T16:06:16.519851 | 2020-10-21T20:05:46 | 2020-10-21T20:05:46 | 280,234,738 | 0 | 0 | null | 2021-09-22T19:45:37 | 2020-07-16T18:59:13 | JavaScript | UTF-8 | Python | false | false | 1,034 | py | # Generated by Django 3.1.1 on 2020-10-08 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sovet', '0010_service'),
]
operations = [
migrations.AlterField(
model_name='service',
name='accent',
field=models.CharField(blank=True, default='', max_length=5),
),
migrations.AlterField(
model_name='service',
name='text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='service',
name='title',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='serviceblock',
name='text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='serviceblock',
name='title2',
field=models.CharField(blank=True, default='', max_length=100),
),
]
| [
"[email protected]"
] | |
c1c4be2abb98899510f70cfebc42994e491f0191 | c2387cd5436ac0a0b4f6701efa3c11c858816ec5 | /_latex/ensae/td_note_2013_novembre_2012_exoS.py | fc172501fce87d0c064afc2ccee526b305fd0621 | [
"MIT"
] | permissive | sdpython/teachpyx | b29773fa0400224718ceb1a9135b0b0d69400bd5 | a6f722aec36648351cd2c6fb89240d15e0243a6a | refs/heads/main | 2023-08-31T02:10:40.885668 | 2023-08-30T17:18:56 | 2023-08-30T17:18:56 | 68,512,321 | 3 | 1 | MIT | 2023-09-13T22:24:48 | 2016-09-18T09:50:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,054 | py | # coding:utf-8
import math
# cette fonction construit deux spirales imbriqu�es dans une matrice nb x nb
# le r�sultat est retourn� sous forme de liste de listes
def construit_matrice(nb):
mat = [[0 for x in range(0, nb)] for y in range(0, nb)]
def pointij(nb, r, th, mat, c, phase):
i, j = r * th * math.cos(th + phase), r * th * math.sin(th + phase)
i, j = int(i * 100 / nb), int(j * 100 / nb)
i, j = (i + nb) / 2, (j + nb) / 2
if 0 <= i < nb and 0 <= j < nb:
mat[i][j] = c
return i, j
r = 3.5
t = 0
for tinc in range(nb * 100000):
t += 1.0 * nb / 100000
th = t * math.pi * 2
i, j = pointij(nb, r, th, mat, 1, 0)
i, j = pointij(nb, r, th, mat, 1, math.pi)
if i >= nb and j >= nb:
break
return mat
# cette fonction re�oit une matrice sous forme
# de liste de listes contenant des entiers : 0,1,2
# � chaque valeur est associ�e une couleur :
# 0 pour blanc, 1 pour bleu, 2 pour rouge
def dessin_matrice(matrice):
import pylab
colors = {1: "blue", 2: "red"}
for i in range(0, len(matrice)):
for j in range(0, len(matrice[i])):
if matrice[i][j] in colors:
pylab.plot(
[
i - 0.5,
i - 0.5,
i + 0.5,
i + 0.5,
i - 0.5,
i + 0.5,
i - 0.5,
i + 0.5,
],
[
j - 0.5,
j + 0.5,
j + 0.5,
j - 0.5,
j - 0.5,
j + 0.5,
j + 0.5,
j - 0.5,
],
colors[matrice[i][j]],
)
pylab.show()
if __name__ == "__main__":
matrice = construit_matrice(100)
dessin_matrice(matrice)
| [
"[email protected]"
] | |
a6c1839a32e152dfa35baa9d51a08456d055624f | f5a88a3dcd047359002819a4a1594f0e22751e37 | /mnist-classifier-pytorch-basic-gpu.py | 7ab24909d142b2eade0c9810d34e01229c67d7d2 | [] | no_license | sintobin/lightning-tests | faddf7aaea08007f5e8ef79f90edeae9c50277f0 | a05129f8464a5a3e2b62c086ec485bbb09922e61 | refs/heads/main | 2023-03-21T16:10:50.662797 | 2021-02-27T05:56:42 | 2021-02-27T05:56:42 | 342,783,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | # https://www.youtube.com/watch?v=OMDn66kM9Qc
import torch
from torch import nn
from torch import optim
from torch.utils import data
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from torchvision import datasets, transforms
# define the model
model = nn.Sequential(
nn.Linear(28 * 28, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 10)
).cuda()
# define the optimizer
params = model.parameters() # under-the-hood
optimizer = optim.SGD(params, lr=1e-2)
# define the loss
loss = nn.CrossEntropyLoss()
# data, train and val split
train_data = datasets.MNIST('data', train=True, download=True, transform=transforms.ToTensor())
train, val = random_split(train_data, [55000, 5000])
train_loader = DataLoader(train, batch_size=32)
val_loader = DataLoader(val, batch_size=32)
# training loop
nb_epochs = 5
for epoch in range(nb_epochs):
losses = [] # for logging
for batch in train_loader:
x, y = batch
# x: b x 1 x 28 x 28
b = x.size(0)
x = x.view(b, -1).cuda()
### 5 steps for supervised learning ###
# under-the-hood: gives the underlying idea, code will not work as is
# 1 forward
l = model(x) # logit
# 2 compute the objective function
J = loss(l, y.cuda())
# 3 cleaning the gradient
model.zero_grad()
# under-the-hood:
# params.grad._zero()
# 4 accumulate the partial derivatives of J wrt params
J.backward()
# under-the-hood:
# params.grad.add_(dJ/dparams)
# 5 step in opposite direction of the gradient
optimizer.step()
# under-the-hood
# with torch.no_grad(): params = params - eta * params.grad
losses.append(J.item())
print(f'Epoch {epoch+1}, training loss: {torch.tensor(losses).mean():.2f}')
losses = [] # for logging
for batch in val_loader:
x, y = batch
# x: b x 1 x 28 x 28
b = x.size(0)
x = x.view(b, -1).cuda()
### 5 steps for supervised learning ###
# under-the-hood: gives the underlying idea, code will not work as is
# 1 forward
with torch.no_grad():
l = model(x) # logit
# 2 compute the objective function
J = loss(l, y.cuda())
losses.append(J.item())
print(f'Epoch {epoch+1}, validation loss: {torch.tensor(losses).mean():.2f}') | [
"[email protected]"
] | |
e93910b38f0c44b87885658713b90a86827289a2 | 53832bd168b797827be77bface570a2bea8952f1 | /routes/appointments.py | d9c0777cde3c8aff7f8b56968d69e1e0dd98bd3c | [] | no_license | Bryan1010/AidlyBackend | 840bbdf290d65577a37b17cc99fc1162177e0f02 | f0fd9215374713a077460876ab3069d5d556c66f | refs/heads/master | 2021-02-09T02:36:37.825383 | 2020-11-08T04:22:38 | 2020-11-08T04:22:38 | 244,229,180 | 2 | 0 | null | 2020-11-08T04:22:40 | 2020-03-01T21:50:21 | Python | UTF-8 | Python | false | false | 150 | py | from flask import Blueprint, Response, request, json, jsonify, make_response
from models.users import User
from models.appointments import Appointment | [
"[email protected]"
] | |
7357c0f383ca7c5e5834f0c65588b9c3d65dda04 | fbc26ea6c43c9c700b9d22c705df493b094cc5a6 | /shadowdp/checker.py | ea4bbe867ec58bc921f0eb49cec560b36942cc1e | [
"MIT"
] | permissive | hengchu/shadowdp | 701477047fc792d301b7ea6fe0cc9cfae68a2d49 | e8f7717476a74fb6be04cbcb4cc5d962b7744cd4 | refs/heads/master | 2020-05-02T16:03:25.476358 | 2019-03-27T05:37:08 | 2019-03-27T05:37:08 | 178,059,003 | 1 | 0 | MIT | 2019-03-27T19:12:20 | 2019-03-27T19:12:20 | null | UTF-8 | Python | false | false | 5,013 | py | # MIT License
#
# Copyright (c) 2018-2019 Yuxin (Ryan) Wang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import OrderedDict
from queue import Queue
import os
import subprocess
import threading
import logging
import shutil
import re
logger = logging.getLogger(__name__)
def _thread_wait_for(results, name, process):
try:
# wait for 30 seconds
out, err = process.communicate(timeout=30)
if r'Verification result: TRUE' in str(out):
results.put((True, name, None, None))
else:
results.put((False, name, out, err))
except subprocess.TimeoutExpired:
results.put((False, '30 seconds Timeout', '', ''))
def check(checkerpath, path, funcname=None):
funcname = os.path.splitext(os.path.basename(path))[0] if funcname is None else funcname
logger.info('Start checking {} with multiple solvers(MathSat, Z3, SMT-Interpol)...'.format(path))
processes = OrderedDict()
processes['MathSat'] = subprocess.Popen(
[checkerpath + '/scripts/cpa.sh', '-predicateAnalysis', path, '-preprocess', '-entryfunction', funcname,
'-setprop', 'cpa.predicate.encodeFloatAs=RATIONAL', '-setprop', 'solver.nonLinearArithmetic=USE',
'-setprop', 'output.path=output-{}-MathSat'.format(funcname)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
processes['Z3'] = subprocess.Popen(
[checkerpath + '/scripts/cpa.sh', '-predicateAnalysis', path, '-preprocess', '-entryfunction', funcname,
'-setprop', 'solver.solver=z3', '-setprop', 'cpa.predicate.encodeFloatAs=RATIONAL',
'-setprop', 'solver.nonLinearArithmetic=USE',
'-setprop', 'output.path=output-{}-Z3'.format(funcname)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
processes['SMTInterpol'] = subprocess.Popen(
[checkerpath + '/scripts/cpa.sh', '-predicateAnalysis-linear', path, '-preprocess', '-entryfunction', funcname,
'-setprop', 'solver.solver=smtinterpol', '-setprop', 'output.path=output-{}-SMTInterpol'.format(funcname)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# start threads to wait for results
results = Queue()
threads = set()
for name, proc in processes.items():
thread = threading.Thread(target=_thread_wait_for, args=(results, name, proc))
threads.add(thread)
thread.start()
# get the results
errors = set()
is_verified = False
for _ in range(len(processes)):
verified, name, out, err = results.get()
if verified:
logger.info('{} verified with {}.'.format(path, name))
# open and read report to find
with open('./output-{}-{}/Report.html'.format(funcname, name)) as report:
all_report = report.read()
time = re.search(r'Total time for CPAchecker[\s<>/a-zA-Z]*([0-9]+\.[0-9]+s)', all_report).groups()
logger.info('Verification finished in {}'.format(time[0]))
logger.info('CPA-Checker reports can be found at ./output-{}-{}'.format(funcname, name))
# remove failed solver output
for solver in ('MathSat', 'Z3', 'SMTInterpol'):
if solver != name:
shutil.rmtree('./output-{}-{}'.format(funcname, solver), ignore_errors=True)
is_verified = True
break
else:
# log the error if this solver fails
errors.add((name, out, err))
# clean up threads and processes
for proc in processes.values():
proc.kill()
proc.wait()
for thread in threads:
thread.join()
# if no solvers can verify the program
if not is_verified:
logger.warning('No solvers can verify the program, error messages shown below:')
for name, out, err in errors:
logger.warning('{}:\n\tout: {}\n\terr:{}'.format(name, out.decode('ascii'), err.decode('ascii')))
return is_verified
| [
"[email protected]"
] | |
cc3be2083688e78730917a3fd8fe30188c82cb53 | 76133934b1dd287273a9bfa0c801d10d08a21b21 | /test/functional/test_framework/address.py | 5dc495f9adf84075a12eac33bd8c49e87930fa8c | [
"MIT"
] | permissive | kenfmcoin/kenfmcoin | d8783b34fcb3ae01067e8d1b33e3a73e3b82b1f9 | 1fa48487593233f2066757dc54f48b2349e2d9db | refs/heads/master | 2020-03-10T17:53:31.569229 | 2018-04-14T12:28:55 | 2018-04-14T12:28:55 | 129,511,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The KenFMcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
| [
"[email protected]"
] | |
0b751a014be59e1ba3c7bf4a82a203ed9b8c5564 | 29432753db6ec21f2364dc9bc3d27e25c827aca9 | /src/route_planner.py | cabf277b2097cc32b7504c172eaf8b787f2d1c40 | [] | no_license | Finin-Quincey/Mechatronics-2 | d9358f2f75c79f91c17354a8f70b3efdc3652eb9 | 3c2c1dc662054feeaae22518d8ba2bae09c95197 | refs/heads/main | 2023-02-17T13:20:09.932631 | 2021-01-18T14:34:18 | 2021-01-18T14:34:18 | 311,111,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | ### Responsible for planning a route and monitoring/updating the Arduino whilst it is following the route
# A bunch of functions go here that interact with vision etc.
def start_route(target)
"""Starts moving the robot towards the given target"""
# Initialise route object, find objects, calculate route
class Route:
"""Instances of this class represent a route to be followed"""
def __init__(self, target)
"""Creates a new route that ends at the given target coordinates"""
self.target = target
self.current_step = 0 # Keeps track of which step in the route is currently being followed
self.steps = [] # A list of coordinates describing the route (excludes start point)
# Stuff
def calculate_steps(self, start_point)
# Work out the points along the route, fill in self.steps
# Just do a straight line for now
self.steps = [self.target]
def advance(self)
"""Move this route to the next step"""
self.current_step += 1
def get_current_destination(self)
"""Returns the point at the end of the current step of the route"""
return self.steps[self.current_step]
def is_finished(self)
"""Returns true if all steps have been completed, false otherwise"""
return self.current_step >= len(self.steps) | [
"[email protected]"
] | |
be0a985190261ae88db8bfa97f741b9e86a9bb79 | 20d040a1a04d0a8cae892c8f90649cdf9101d512 | /RS_Fusion_Exp/Model/Segmentation/Train_Zurich1.py | 1e40eec036bc9bb32d1e3a17596546a493d002f9 | [] | no_license | PotentialPie/RS_Fusion_exp | add4634d526049ebfc56e51e314fe0a9839100fc | 6c3a9b3f9c7d6d5834b8115223cae06ae8e016ae | refs/heads/master | 2022-03-27T16:58:14.104059 | 2020-01-13T08:58:49 | 2020-01-13T08:58:49 | 233,552,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,377 | py | import keras
import argparse
import Models
import Z1_LoadBatches
from keras_fcn import FCN
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
parser = argparse.ArgumentParser()
parser.add_argument("--save_weights_path", type=str)
parser.add_argument("--train_images", type=str)
parser.add_argument("--train_annotations", type=str)
parser.add_argument("--n_classes", type=int)
parser.add_argument("--input_height", type=int, default=224)
parser.add_argument("--input_width", type=int, default=224)
parser.add_argument('--validate', action='store_false')
parser.add_argument("--val_images", type=str, default="")
parser.add_argument("--val_annotations", type=str, default="")
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument("--val_batch_size", type=int, default=2)
parser.add_argument("--load_weights", type=str, default="")
parser.add_argument("--model_name", type=str, default="")
parser.add_argument("--optimizer_name", type=str, default="adadelta")
args = parser.parse_args()
train_images_path = args.train_images
train_segs_path = args.train_annotations
train_batch_size = args.batch_size
n_classes = args.n_classes
input_height = args.input_height
input_width = args.input_width
validate = args.validate
save_weights_path = args.save_weights_path
epochs = args.epochs
load_weights = args.load_weights
optimizer_name = args.optimizer_name
model_name = args.model_name
if validate:
val_images_path = args.val_images
val_segs_path = args.val_annotations
val_batch_size = args.val_batch_size
modelFns = {'vgg_segnet': Models.VGGSegnet.VGGSegnet, 'vgg_unet': Models.VGGUnet.VGGUnet, 'fcn': FCN,
'vgg_unet2': Models.VGGUnet.VGGUnet2, 'fcn8': Models.FCN8.FCN8, 'fcn32': Models.FCN32.FCN32}
modelFN = modelFns[model_name]
# m = modelFN(n_classes, input_height=input_height, input_width=input_width)
input_shape = (input_height, input_width, 3)
m = modelFN(input_shape, n_classes)
m.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.adam(lr=0.0001),
metrics=['accuracy'])
if len(load_weights) > 0:
m.load_weights(load_weights)
print("Model output shape", m.output_shape)
output_height = 224
output_width = 224
G = Z1_LoadBatches.imageSegmentationGenerator(train_images_path, train_segs_path, train_batch_size, n_classes,
input_height, input_width, output_height, output_width)
if validate:
G2 = Z1_LoadBatches.imageSegmentationGenerator(val_images_path, val_segs_path, val_batch_size, n_classes,
input_height,
input_width, output_height, output_width)
if not validate:
for ep in range(epochs):
print("The epochs is %d" % ep)
m.fit_generator(G, 512, epochs=1)
m.save_weights(save_weights_path + "." + str(ep))
# m.save(save_weights_path + ".model." + str(ep))
else:
for ep in range(epochs):
print("The epochs is %d" % ep)
m.fit_generator(G, 512, validation_data=G2, validation_steps=200, epochs=1)
m.save_weights(save_weights_path + "." + str(ep))
# m.save(save_weights_path + ".model." + str(ep))
print(1)
| [
"[email protected]"
] | |
7e0bd4aa0d42e18e313d872a2d299a4df7241748 | 0184dd68ea868ce3e701caa74e75ab465fb22904 | /Cogs/ComicHelper.py | bf91852981efa3073c9aecfee87b871caee97ce4 | [
"MIT"
] | permissive | TheMasterGhost/CorpBot | b78ef20f647cb6de82aa1731603663c6e7da7420 | 3133d5b7fdfef09ac4b75fb42d91628b86d94ac0 | refs/heads/master | 2021-01-02T23:02:13.792470 | 2017-08-05T23:45:38 | 2017-08-05T23:45:38 | 99,449,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | import random
import math
import datetime as dt
import urllib.request
import urllib
import requests
from pyquery import PyQuery as pq
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
# This module contains all the shit methods used for getting comic URLs... ugh.
def julianDate(my_date):
# Takes a date string MM-DD-YYYY and
# returns Julian Date
date = my_date.split("-")
month = int(date[0])
day = int(date[1])
year = int(date[2])
month=(month-14)/12
year=year+4800
JDate=1461*(year+month)/4+367*(month-2-12*month)/12-(3*((year+month+100)/100))/4+day-32075
return JDate
# Function from: https://gist.github.com/jiffyclub/1294443
def date_to_jd(year,month,day):
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
# Function from: https://gist.github.com/jiffyclub/1294443
def jd_to_date(jd):
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
# Find string between 2 strings
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def find_first_between( source, start_sep, end_sep ):
result=[]
tmp=source.split(start_sep)
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
if len(result) == 0:
return None
else:
return result[0]
def find_last_between( source, start_sep, end_sep ):
result=[]
tmp=source.split(start_sep)
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
if len(result) == 0:
return None
else:
return result[len(result)-1] # Return last item
def getImageHTML ( url, ua : str = '' ):
try:
req = urllib.request.Request(url, data=None,headers={'User-Agent': ua})
with urllib.request.urlopen(req) as f:
htmlSource = str(f.read())
return htmlSource
except Exception as e:
return None
def getImageURL ( html ):
imageURL = find_between( html, "data-image=", "data-date=" )
return imageURL.replace('"', '').strip()
def getImageTitle ( html ):
imageTitle = find_between( html, "data-title=", "data-tags=" )
h = HTMLParser()
imageTitle = h.unescape(imageTitle)
#print(h.unescape(imageTitle))
return imageTitle.replace('"', '').strip()
# C&H Methods
def getCHURL ( html, date ):
# YYYY.MM.DD format
# <a href="[comic url]">2005.01.31</a>
comicBlock = find_last_between( html, '<a href="', "\">" + date + "</a>")
if not comicBlock:
return None
else:
return comicBlock.replace('"', '').strip()
def getCHImageURL ( html ):
# comicBlock = find_last_between( html, 'div id="comic-container"', "</div>")
# if comicBlock == None:
# return None
imageURL = find_last_between( html, 'id="main-comic" src=', '>' )
if not imageURL:
return None
imageURL = imageURL.replace('"', '').strip()
imageURL = imageURL.split("?t=")[0]
if imageURL[0:2] == "//":
# Add http?
imageURL = "http:" + imageURL
if imageURL[-1:] == "/":
# Strip trailing /
return imageURL[0:-1]
else:
return imageURL
# XKCD Methods
def getNewestXKCD ( html ):
comicBlock = find_last_between( html, 'div id="middleContainer"', "</div>")
if not comicBlock:
return None
imageURL = find_first_between( comicBlock, "href=", " title=" )
imageURL = imageURL.replace('/', '').strip()
return imageURL.replace('"', '').strip()
def getXKCDURL ( html, date ):
# YYYY-M(M)-D(D) format
# <a href="/17/" title="2006-1-1">What If</a>
comicBlock = find_last_between( html, 'div id="comic"', "</div>")
if not comicBlock:
return None
imageURL = find_first_between( html, "href=", " title=\"" + date + "\"" )
if imageURL == None:
return None
else:
return imageURL.replace('"', '').strip()
def getXKCDImageURL ( html ):
comicBlock = find_last_between( html, 'div id="comic"', "</div>")
if not comicBlock:
return None
imageURL = find_last_between( comicBlock, "img src=", "title=" )
imageURL = imageURL.replace('"', '').strip()
if imageURL[0:2] == "//":
# Add http?
return "http:" + imageURL
else:
return imageURL
def getXKCDImageTitle ( html ):
comicBlock = find_last_between( html, 'div id="comic"', "</div>")
if not comicBlock:
return None
imageTitle = find_last_between( comicBlock, "alt=", ">" )
# Drop srcset= if there
imageTitle = imageTitle.split('srcset=')[0]
h = HTMLParser()
imageTitle = h.unescape(imageTitle)
imageTitle = imageTitle.replace('"', '').strip()
imageTitle = imageTitle.replace('/', '').strip()
return imageTitle
# Garfield Minus Garfield Methods
def getGMGImageURL ( html ):
if not html:
return None
comicBlock = find_last_between( html, 'div class="photo"', "</a>")
if not comicBlock:
return None
imageURL = find_last_between( comicBlock, "img src=", " alt=" )
imageURL = imageURL.replace('"', '').strip()
return imageURL
# Garfield Methods
def getGImageURL ( html ):
if not html:
return None
comicBlock = find_last_between( html, 'img class="img-responsive" src=', ' width')
if not comicBlock:
return None
imageURL = comicBlock.replace('"', '').strip()
return imageURL
# Peanuts Methods
def getPeanutsImageURL ( html ):
if not html:
return None
dom = pq(html)
pic = dom('picture.img-fluid.item-comic-image')
pic = str(pic).strip().replace('\r', '').replace('\n', ' ').replace('\t', ' ')
comicBlock = find_last_between( pic, 'src=', '/>')
if not comicBlock:
return None
imageURL = comicBlock.replace('"', '').strip()
return imageURL | [
"[email protected]"
] | |
e7c5ad22260d7b4e3f25f02fe4b67b347c812a60 | 2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5 | /Platinum_clusters_Project/final_images/Pt7_O2_DFTsorted/Ptoxides_zorderimage_new_13th.py | d15461bef2e27188b59925709e6afbf4c89e3930 | [] | no_license | sivachiriki/GOFEE_Pt_V_supported | 5787d44294262870075f35f2d31c096021b7ce20 | 6bd700dac1f3e7c58394b758d75246ac6e07eade | refs/heads/master | 2022-04-08T11:38:13.038455 | 2020-03-09T10:48:31 | 2020-03-09T10:48:31 | 226,359,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,473 | py | from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
from math import ceil, floor
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from decimal import Decimal
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
#matplotlib.rcParams['xtick.direction'] = 'out'
#matplotlib.rcParams['ytick.direction'] = 'out'
#matplotlib.rc('font',**{'family':'sans-serif',
# 'sans-serif':['Helvetica'],
# 'size':14})
#matplotlib.rc('text',usetex=True)
#matplotlib.rcParams['text.latex.unicode']=False
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{bm}']
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{xfrac}']
#matplotlib.rcParams['mathtext.default'] = 'regular'
#matplotlib.rcParams['ps.usedistiller'] = 'xpdf'
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
# if atoms[ia].symbol == 'Ti':
# arad = aradii[atoms[ia].number] #* 0.9 * 0.5
# else:
arad = aradii[atoms[ia].number] #* 0.9
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, colorlenth,rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
positions =atoms.get_positions()
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==6):
colors[i] =[0.1, 0.2, 0.9]
if (atom.number ==8 and positions[i,2]>12.2):
colors[i] =[102/255, 0/255, 0/255]
# if (atom.number ==8 and i >=colorlenth*5-8):
# colors[i] =[102/255, 0/255, 0/255]
# if (atom.number ==8 and i >= 135+colorlenth*2 and i <colorlenth*3 ):
# colors[i] =[102/255, 0/255, 0/255]
# if (atom.number ==8 and i >= 135+colorlenth*3 and i <colorlenth*4 ):
# colors[i] =[102/255, 0/255, 0/255]
# if (atom.number ==8 and i >= 135+colorlenth*4 and i <colorlenth*5 ):
# colors[i] =[102/255, 0/255, 0/255]
# if (atom.number ==8 and i >= 135+colorlenth*5 and i <colorlenth*6 ):
# colors[i] =[102/255, 0/255, 0/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
data=read(sys.argv[1]+'@:')
energydif =np.zeros(len(data))
for j in range(len(data)):
GM_energy = data[0].get_potential_energy()
energydif[j] = (data[j].get_potential_energy() - GM_energy)
#print('{:3.3f}'.format(energydif[j]))
for j in range(13,65):
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
print(colorlenth)
# write('newimage.traj',atoms)
#exit()
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-8 or atom.index >=colorlenth*5]]
#view(atoms)
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+9.0]
del atoms[atoms.positions[:,0] <= centreofmass[0]-9.0]
del atoms[atoms.positions[:,1] >= centreofmass[1]+8.5]
del atoms[atoms.positions[:,1] <= centreofmass[1]-8.0]
colorlenth = len(atoms)
#view(atoms)
#exit()
plt.figure(figsize=(4.0,5.0))
gs = gridspec.GridSpec(2, 1,
height_ratios=[6.86,11.80])
cell = atoms.get_cell()
# 0 0
ax = plt.subplot(gs[0, 0])
img = atoms.copy()
plot_conf(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-8.0, centreofmass[0]+8.0])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
# 0 1
ax = plt.subplot(gs[1, 0])
cell = atoms.get_cell()
img = atoms.copy()
plot_conf(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-8.0, centreofmass[0]+8.0])
ax.set_ylim([centreofmass[1]-7.5, centreofmass[1]+8.5])
#name ='$\Delta E = {}$ eV'.format(math.ceil(energydif[j],3))
name ='$\Delta E = {:3.3f}$ eV'.format(energydif[j])
ax.text(0.1, -0.1, name, transform=ax.transAxes,fontsize=20)
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
gs.update(wspace=0.00,hspace=0.00)
plt.tight_layout()
name = sys.argv[2]
name =name+'_{}'.format(j)
savefig(name,bbox_inches='tight')
show()
exit()
| [
"[email protected]"
] | |
cb23d77c9d5aa51bc4b7c9bdfc2adf812601b646 | 352b07adca242665d0806e66414b596ffb6de63f | /application/services/__init__.py | 1fb16092cffb2e2673a033d71ead727473f60371 | [
"MIT"
] | permissive | Markcial/alembic | 03c89160b3d6fd3b0676ee956513b8c9fa669171 | 752cbb8038278d0b8c996a386c6632158a841299 | refs/heads/master | 2016-08-12T17:25:48.175522 | 2015-11-20T17:55:51 | 2015-11-20T17:55:51 | 45,428,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from ..configuration import storage_folder
import os
from uuid import uuid4
def create_snapshot_directory():
folder = storage_folder + os.path.sep + str(uuid4())
os.makedirs(folder)
return folder | [
"[email protected]"
] | |
6174a99a45b2e7980958866ecfbe22b20a773ce7 | 670a1d3904d5e572a1cd6e20e9c46e7a5d93e58d | /modules/gui/forms/main_window/input_module/input_sub_frames/input_lst_sw_frame.py | 1caf6b7f89df09045e04899433a2f5829947f10a | [] | no_license | bkleynhans/Landsat-Buoy-Calibration | aecce495ea41583c88e76692286b0a114e5e84f7 | f3c4d654c70ecc7c1333ba3fccf304768ceaac62 | refs/heads/master | 2022-12-21T00:20:38.561154 | 2020-05-26T20:36:50 | 2020-05-26T20:36:50 | 143,342,274 | 0 | 1 | null | 2022-12-08T01:49:46 | 2018-08-02T20:32:38 | Python | UTF-8 | Python | false | false | 9,505 | py | ###
#
# CIS Top of Atmosphere Radiance Calibration
#
# Program Description : The frame that displays and accepts user input when a Split Window process is run
# to calculate Land Surface Temperature using user supplied data and a landsat image.
# Created By : Benjamin Kleynhans
# Creation Date : July 16, 2019
# Authors : Benjamin Kleynhans
#
# Last Modified By : Benjamin Kleynhans
# Last Modified Date : September 16, 2019
# Filename : input_lst_sw_frame.py
#
###
# Imports
from tkinter import *
from tkinter import ttk
from datetime import date
from gui.forms.base_classes.gui_frame import Gui_Frame
from gui.forms.main_window.input_module.input_sub_frames.input_lst_sw_gb_frame import Input_Lst_Sw_Gb_Frame
from buoycalib import settings
import pdb
class Input_Lst_Sw_Frame(Gui_Frame):
# Settings Frame constructor
def __init__(self, master):
Gui_Frame.__init__(self, master, "input_lst_sw_frame")
# Add properties for gain and bias
master.frames[self.frame_name].input_values = {
'scene_id': '',
'lat': '',
'lon': '',
'emissivity_b10': '',
'emissivity_b11': '',
'add_gain_bias': False
}
self.create_lst_sw_frame(master)
# Create the actual frame as a separate window
def create_lst_sw_frame(self, master):
master.add(master.frames[self.frame_name], text = "Single")
master.tab(2, state = 'hidden')
self.create_scene_id(master)
self.create_lat(master)
self.create_lon(master)
self.create_emissivity(master)
# Create Input LandSufaceTemperature, SplitWindow GainBias frame
Input_Lst_Sw_Gb_Frame(master.frames[self.frame_name])
self.create_ask_gain_bias(master)
# Create Scene ID text entry area with examples in frame
def create_scene_id(self, master):
# Read in the scene id
ttk.Label(
master.frames[self.frame_name],
text = 'ID : ',
width = 20).grid(
row = 0,
column = 0,
padx = 10,
pady = 10,
sticky = 'nsew'
)
master.frames[self.frame_name].input_values['scene_id'] = ttk.Entry(master.frames[self.frame_name], width = 80)
master.frames[self.frame_name].input_values['scene_id'].grid(
row = 0,
column = 1,
columnspan = 3,
padx = 10,
pady = 10,
sticky = 'nsew'
)
# Header for examples
self.reference_label = ttk.Label(
master.frames[self.frame_name],
text = 'Example IDs :',
width = 20).grid(
row = 1,
column = 0,
padx = 10,
pady = 10,
sticky = 'nsew'
)
# Valid format examples
ttk.Label(
master.frames[self.frame_name],
text = 'Scene Id :',
width = 30).grid(
row = 1,
column = 1,
padx = 10,
pady = 10,
sticky = 'nsew'
)
self.scene_id = Text(master.frames[self.frame_name], height = 1, width = 40, borderwidth = 0)
self.scene_id.insert(1.0, "LC80110312017350LGN00")
self.scene_id.configure(
state = 'disabled',
inactiveselectbackground = self.scene_id.cget('selectbackground'))
self.scene_id.grid(
row = 1,
column = 2,
columnspan = 2,
padx = 10,
pady = 10,
sticky = 'nsew'
)
ttk.Label(
master.frames[self.frame_name],
text = 'Landsat Product Identifier :',
width = 30).grid(
row = 2,
column = 1,
padx = 10,
pady = 10,
sticky = 'nsew'
)
self.scene_id = Text(master.frames[self.frame_name], height = 1, width = 40, borderwidth = 0)
self.scene_id.insert(1.0, "LC08_L1TP_017030_20160614_20170220_01_T1")
self.scene_id.configure(
state = 'disabled',
inactiveselectbackground = self.scene_id.cget('selectbackground'))
self.scene_id.grid(
row = 2,
column = 2,
columnspan = 2,
padx = 10,
pady = 10,
sticky = 'nsew'
)
# Create Latitude entry area in frame
def create_lat(self, master):
# Read in the latitude
ttk.Label(master.frames[self.frame_name], text = 'Latitude (dec) : ', width = 20).grid(row = 3, column = 0, padx = 10, pady = 10, sticky = 'w')
master.frames[self.frame_name].input_values['lat'] = ttk.Entry(master.frames[self.frame_name], width = 30)
master.frames[self.frame_name].input_values['lat'].grid(row = 3, column = 1, padx = 10, pady = 10, sticky = 'e')
# Create Longtitude entry area in frame
def create_lon(self, master):
# Read in the longtitude
ttk.Label(master.frames[self.frame_name], text = 'Longtitude (dec) : ', width = 20).grid(row = 4, column = 0, padx = 10, pady = 10, sticky = 'w')
master.frames[self.frame_name].input_values['lon'] = ttk.Entry(master.frames[self.frame_name], width = 30)
master.frames[self.frame_name].input_values['lon'].grid(row = 4, column = 1, padx = 10, pady = 10, sticky = 'e')
# Create Latitude entry area in frame
def create_emissivity(self, master):
# Read in the latitude
ttk.Label(master.frames[self.frame_name], text = 'Emissivity Band 10 : ', width = 20).grid(row = 3, column = 2, padx = 10, pady = 10, sticky = 'w')
master.frames[self.frame_name].input_values['emissivity_b10'] = ttk.Entry(master.frames[self.frame_name], width = 30)
master.frames[self.frame_name].input_values['emissivity_b10'].insert('end', settings.DEFAULT_EMIS_B10)
master.frames[self.frame_name].input_values['emissivity_b10'].grid(row = 3, column = 3, padx = 10, pady = 10, sticky = 'e')
# Read in the longtitude
ttk.Label(master.frames[self.frame_name], text = 'Emissivity Band 11 : ', width = 20).grid(row = 4, column = 2, padx = 10, pady = 10, sticky = 'w')
master.frames[self.frame_name].input_values['emissivity_b11'] = ttk.Entry(master.frames[self.frame_name], width = 30)
master.frames[self.frame_name].input_values['emissivity_b11'].insert('end', settings.DEFAULT_EMIS_B11)
master.frames[self.frame_name].input_values['emissivity_b11'].grid(row = 4, column = 3, padx = 10, pady = 10, sticky = 'e')
# Create checkbox to ask for usage of gain and bias
def create_ask_gain_bias(self, master):
add_gain_bias = ttk.Checkbutton(
master.frames[self.frame_name],
text = "Add Gain and Bias to Calculation",
variable = master.frames[self.frame_name].input_values['add_gain_bias'],
command = lambda: self.change_frame_state(master))
add_gain_bias.grid(row = 5, column = 0, padx = 10, pady = (10, 0), sticky = 'w')
# Change the state of the gain and bias properties
def change_frame_state(self, master):
master.frames[self.frame_name].input_values['add_gain_bias'] = not master.frames[self.frame_name].input_values['add_gain_bias']
if master.frames[self.frame_name].input_values['add_gain_bias']:
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['gain_b10'].configure(state = 'normal')
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['gain_b11'].configure(state = 'normal')
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['bias_b10'].configure(state = 'normal')
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['bias_b11'].configure(state = 'normal')
else:
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['gain_b10'].configure(state = 'disabled')
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['gain_b11'].configure(state = 'disabled')
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['bias_b10'].configure(state = 'disabled')
master.frames[self.frame_name].frames['input_lst_sw_gb_frame'].input_values['bias_b11'].configure(state = 'disabled') | [
"[email protected]"
] | |
4b0cee2e9b8ac0dffcd8fcc2d27ddcf7f9e291fe | dfdd59d51c13adb6a9dde46fa1b184dde016bcad | /data/feature_extraction_test.py | cc95b57eab1323a39007febc86b022c464a1d375 | [
"Apache-2.0"
] | permissive | googleinterns/cl_analysis | bf0e64e79d704e9fb7bba44751efffd6084fe80e | 686cf49cd57ce61cba3cc11f0574b2a2cec596be | refs/heads/master | 2022-12-16T03:56:34.473083 | 2020-09-26T06:44:12 | 2020-09-26T06:44:12 | 277,630,242 | 5 | 3 | Apache-2.0 | 2020-09-26T00:01:06 | 2020-07-06T19:26:14 | Python | UTF-8 | Python | false | false | 7,211 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.feature_extraction import *
from data.test_constants import *
import numpy as np
class FeatureExtractionTest(unittest.TestCase):
def test_compute_count(self):
self.assertEqual(FeatureExtractor.compute_count(LIST1), 7)
def test_compute_count_on_nan_input(self):
self.assertEqual(FeatureExtractor.compute_count(np.nan), 0)
def test_compute_count_on_empty_lst(self):
self.assertEqual(FeatureExtractor.compute_count(EMPTY_LIST), 0)
def test_compute_avg(self):
self.assertEqual(FeatureExtractor.compute_avg(LIST1), 20/7)
def test_compute_avg_on_nan_input(self):
self.assertEqual(FeatureExtractor.compute_avg(np.nan), 0)
def test_compute_avg_on_empty_lst(self):
self.assertEqual(FeatureExtractor.compute_avg(EMPTY_LIST), 0)
def test_compute_sum(self):
self.assertEqual(FeatureExtractor.compute_sum(LIST1), 20)
def test_compute_sum_on_nan_input(self):
self.assertEqual(FeatureExtractor.compute_sum(np.nan), 0)
def test_compute_sum_on_empty_lst(self):
self.assertEqual(FeatureExtractor.compute_sum(EMPTY_LIST), 0)
def test_compute_nonzero_count(self):
self.assertEqual(
FeatureExtractor.compute_nonzero_count(LIST2), 5)
def test_compute_nonzero_count_on_nan_input(self):
self.assertEqual(FeatureExtractor.compute_nonzero_count(np.nan), 0)
def test_compute_nonzero_count_on_empty_lst(self):
self.assertEqual(FeatureExtractor.compute_nonzero_count(EMPTY_LIST), 0)
def test_compute_nonzero_avg(self):
self.assertEqual(
FeatureExtractor.compute_nonzero_avg(LIST2), 12/5)
def test_compute_nonzero_avg_on_nan_input(self):
self.assertEqual(FeatureExtractor.compute_nonzero_avg(np.nan), 0)
def test_compute_nonzero_avg_on_empty_lst(self):
self.assertEqual(FeatureExtractor.compute_nonzero_avg(EMPTY_LIST), 0)
def test_compute_nonzero_sum(self):
self.assertEqual(
FeatureExtractor.compute_nonzero_sum(LIST2), 12)
def test_compute_nonzero_sum_on_nan_input(self):
self.assertEqual(FeatureExtractor.compute_nonzero_sum(np.nan), 0)
def test_compute_nonzero_sum_on_empty_lst(self):
self.assertEqual(FeatureExtractor.compute_nonzero_sum(EMPTY_LIST), 0)
def test_compute_avg_count(self):
self.assertEqual(
FeatureExtractor.compute_avg_count(
COMMENT_LIST, SERIES), 2/5)
def test_compute_avg_count_on_nan_input(self):
self.assertEqual(
FeatureExtractor.compute_avg_count(np.nan, SERIES),0)
def test_compute_avg_count_on_empty_lst(self):
self.assertEqual(
FeatureExtractor.compute_avg_count(EMPTY_LIST, SERIES),0)
def test_compute_avg_count_on_empty_series(self):
self.assertEqual(
FeatureExtractor.compute_avg_count(EMPTY_LIST, pd.Series([])), 0)
def test_compute_total_check_runs(self):
self.assertEqual(
FeatureExtractor.compute_total_check_runs(CHECK_RUNS1, 0), 8)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(CHECK_RUNS1, 1), 11)
def test_compute_total_check_runs_on_nan_input(self):
self.assertEqual(
FeatureExtractor.compute_total_check_runs(np.nan, 0), 0)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(np.nan, 1), 0)
def test_compute_total_check_runs_on_empty_lst(self):
mock_lst = "[]"
self.assertEqual(
FeatureExtractor.compute_total_check_runs(EMPTY_LIST, 0), 0)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(EMPTY_LIST, 1), 0)
def test_compute_avg_check_runs(self):
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(CHECK_RUNS2, 0), 8/4)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(CHECK_RUNS2, 1), 11/4)
def test_compute_avg_check_runs_on_nan_input(self):
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(np.nan, 0), 0)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(np.nan, 1), 0)
def test_compute_avg_check_runs_on_empty_lst(self):
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(EMPTY_LIST, 0), 0)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(EMPTY_LIST, 1), 0)
def test_compute_total_file_changes(self):
self.assertEqual(
FeatureExtractor.compute_total_check_runs(FILES_CHANGES, 0), 260)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(FILES_CHANGES, 1), 100)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(FILES_CHANGES, 2), 360)
def test_compute_total_file_changes_on_nan_input(self):
self.assertEqual(
FeatureExtractor.compute_total_check_runs(np.nan, 0), 0)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(np.nan, 1), 0)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(np.nan, 2), 0)
def test_compute_total_file_changes_on_empty_lst(self):
mock_lst = "[]"
self.assertEqual(
FeatureExtractor.compute_total_check_runs(EMPTY_LIST, 0), 0)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(EMPTY_LIST, 1), 0)
self.assertEqual(
FeatureExtractor.compute_total_check_runs(EMPTY_LIST, 2), 0)
def test_compute_avg_file_changes(self):
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(FILES_CHANGES, 0), 260/3)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(FILES_CHANGES, 1), 100/3)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(FILES_CHANGES, 2), 360/3)
def test_compute_avg_file_changes_on_nan_input(self):
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(np.nan, 0), 0)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(np.nan, 1), 0)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(np.nan, 2), 0)
def test_compute_avg_file_changes_on_empty_lst(self):
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(EMPTY_LIST, 0), 0)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(EMPTY_LIST, 1), 0)
self.assertEqual(
FeatureExtractor.compute_avg_check_runs(EMPTY_LIST, 2), 0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5800e259f76e16f79660fddf9ca78e2a4c7e1f9b | ea59d89f144c3cf741033f895d5ab47e79651b29 | /SQL Alchemy and Flask/controller.py | 06815112f428eb8249e84a976d3ed60a8e41302f | [] | no_license | Sanojdon/projects-at-byte | 70d5a0f34246af1e6c7920a001785091ca5bfa1d | f4d57f0d841bb23352f200efcd486b78c2cdd82a | refs/heads/master | 2021-09-04T11:46:48.105469 | 2018-01-18T10:57:12 | 2018-01-18T10:57:12 | 111,078,079 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | import model
import wrapper
def registration(name, age, phone, addr, email, uname, pword):
res = model.check_username(uname)
if (res is "Valid"):
status = model.add_user(name, age, phone, addr, email, uname, pword)
return status
else:
return "Exist"
def login(uname, pword):
uid, name, pw = model.get_details(uname)
if(uid is not None and pw is not None):
return uid, name, pw
else:
return (None, None, None)
def view_portfolio(uid):
re, result, res = model.portfolio(uid)
return re, result, res
def company_search(comp):
check = wrapper.Markit()
data = check.company_search(comp)
if(data is not None):
return data
else:
return (False)
def get_quote(sym):
quote = wrapper.Markit()
data = quote.get_quote(sym)
if(data is not None):
return data
else:
return (False)
def buy_shares(sym, no, uid):
data = get_quote(sym)
if(data == False):
return (0, 0, "Connect")
last_price = data["LastPrice"]
total = no * last_price
option = model.buy_shares(sym, no, last_price, total, uid)
return(total, last_price, True)
def sell_shares(sym, no, uid):
data = get_quote(sym)
if(data == False):
return (0, 0, "Connect")
last_price = data["LastPrice"]
total = no * last_price
opt = model.sell_shares(sym, no, last_price, total, uid)
return (total, last_price, True)
| [
"[email protected]"
] | |
ae707fb0acca12ab92e09fd061f213bda1ddcbc3 | 352f91a6de8977d969d0f5b1241f694d2d3e1dfd | /manage.py | 44db58646f80adfc9b41242ab110793e84109bbd | [
"MIT"
] | permissive | macfire/flask-base | a1a5d91c6b049d7f5542132e953be9d547482520 | 4cda948ab3fce26f8e339e08e20ddecaebf950da | refs/heads/master | 2016-09-08T00:38:30.723685 | 2014-08-26T17:55:30 | 2014-08-26T17:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python
import os
from app import create_app, db
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db )
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
1a14307f9e7ccc379056848ece64a4c1c3316104 | d5ee678fd4eeb1b5e01987ea5ac03eafc3ec6afb | /commonutils/BooleanUtils.py | 24dea300e722fde53dc9379eb38bf33a601b511d | [] | no_license | deepakcern/ExoPieUtils | 91cd2e9e0e454d645a028ef84bc485fc0e0ad1a8 | d4deebbc94795454e8d2ee62ca35a09608261d9f | refs/heads/master | 2021-12-27T23:33:49.718670 | 2020-02-06T09:07:17 | 2020-02-06T09:07:17 | 207,745,649 | 0 | 0 | null | 2019-09-11T07:04:22 | 2019-09-11T07:04:22 | null | UTF-8 | Python | false | false | 1,233 | py | import os
import sys
import optparse
import numpy
import pandas
import math
from root_pandas import read_root
from pandas import DataFrame
from pandas import Series
from ROOT import TLorentzVector, TMath
import numpy
def logical_AND(all_booleans):
return (len(all_booleans) == all_booleans.count(True))
def logical_OR(all_booleans):
return (all_booleans.count(True) > 0)
def logical_AND_List2(a, b):
return (numpy.array(a) & numpy.array(b))
def logical_AND_List3(a, b, c):
return ( logical_AND_List2( logical_AND_List2(a,b), c) )
def logical_AND_List4(a, b, c, d):
return logical_AND_List2 ( logical_AND_List2(a,b) , logical_AND_List2(c,d) )
def logical_AND_List5(a, b, c, d, e):
return logical_AND_List2( logical_AND_List3(a, b, c), logical_AND_List2(d, e) )
def logical_AND_List6(a, b, c, d, e, f):
return logical_AND_List2( logical_AND_List3(a, b, c), logical_AND_List3(d, e, f) )
def logical_AND_List7(a, b, c, d, e, f, g):
return logical_AND_List3( logical_AND_List3(a, b, c), logical_AND_List3(d, e, f), g )
def WhereIsTrue(testList_, nCut_=0):
pass_index=[]
if len(testList_)>=nCut_:
pass_index = numpy.where(testList_)[0]
return pass_index
| [
"[email protected]"
] | |
7a6c3c754af69d31a48002434e8a4bd47fa81991 | 47719bc52ee997efed6a9c92684a06227ac84abf | /loveLetter/imgame.py | 2d081d99f7ccd55626e8c95a7b98c7aafd2bafb6 | [] | no_license | soyo42/imGames | 216885b8d2d12773be0cbe0d1ccde3057493e706 | 2b3ab2a6f550156e010b57c10844771812290138 | refs/heads/master | 2021-01-19T07:35:33.134160 | 2014-03-30T10:29:47 | 2014-03-30T10:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,297 | py | # python module imgame
from abc import ABCMeta, abstractmethod
import sys
import json
import random
import pickle
import os
import logging
CARDS = {1:'strazna', 2:'knaz', 3:'baron', 4:'komorna', 5:'princ', 6:'kral', 7:'hrabenka', 8:'princezna'}
CARDS_REQUIRING_TARGET = (1, 2, 3, 5, 6)
class Dispatcher:
'''holds configuration, responsible for dispatching game action to appropriate handler'''
_LOGGER = logging.getLogger('imgame.Dispatcher')
STATE_INIT = 'init'
STATE_TURN = 'turn'
STATE_INTERACTION = 'interaction'
def __init__(self, localStore):
self.localStore = localStore
def dispatch(self, action, data):
Dispatcher._LOGGER.debug('dispatching action: \033[35;1m{0}\033[0m'.format(action))
heapBox = HeapBox(data)
handler = None
if action == 'init':
handler = InitHandler(self)
elif action == 'turn':
handler = TurnHandler(self)
elif action == 'interac':
handler = InteractHandler(self)
elif action == 'inspect':
pass
handler.handle(heapBox)
class LocalContext:
'''local data store'''
def __init__(self):
self.hand = []
self.heapInitStash = None
self.name = None
class HeapBox:
'''mobile data store, exchanged among users'''
_PLAYER_LIST = 'playerList'
_ACTIVE_PLAYER = 'activePlayer'
_TARGET_PLAYER = 'targetPlayer'
_HEAP = 'heap'
_STATE = 'state'
_HISTORY = 'history'
def __init__(self, rawData):
self._data = {}
self._data[HeapBox._PLAYER_LIST] = []
self._data[HeapBox._ACTIVE_PLAYER] = None
self._data[HeapBox._TARGET_PLAYER] = None
self._data[HeapBox._HEAP] = []
self._data[HeapBox._STATE] = 'init'
self._data[HeapBox._HISTORY] = []
self.isEmpty = rawData == None or rawData.strip() == ''
if not self.isEmpty:
self._data = self._decode(rawData)
def addPlayer(self, playerName):
if playerName in self._data[HeapBox._PLAYER_LIST]:
raise Exception('\033[31;1mplayer already added -> {}\033[0m'.format(playerName))
self._data[HeapBox._PLAYER_LIST].append(playerName)
def getPlayerList(self):
return self._data[HeapBox._PLAYER_LIST]
def setActivePlayer(self, playerName):
self._data[HeapBox._ACTIVE_PLAYER] = playerName
def getActivePlayer(self):
return self._data[HeapBox._ACTIVE_PLAYER]
def setTargetPlayer(self, playerName):
self._data[HeapBox._TARGET_PLAYER] = playerName
def getTargetPlayer(self):
return self._data[HeapBox._TARGET_PLAYER]
def getNextPlayer(self):
active = self.getActivePlayer()
allPlayers = self.getPlayerList()
nextIndex = allPlayers.index(active) + 1
if nextIndex >= len(allPlayers):
nextIndex = 0
return allPlayers[nextIndex]
def setState(self, state):
self._data[HeapBox._STATE] = state
def getState(self):
return self._data[HeapBox._STATE]
def getHeap(self):
return self._data[HeapBox._HEAP]
def addToHistory(self, player, card, target):
historyItem = HistoryItem()
historyItem.setPlayer(player)
historyItem.setCard(card)
historyItem.setTarget(target)
self._data[HeapBox._HISTORY].append(historyItem.toMap())
def _decode(self, rawData):
# base64 decode
# ungzip
# unmarshall json
# heap - cards left
# last round history
# players list
# active player
# game state
return json.loads(rawData)
def encode(self):
# marshall json
# gzip
# base64 decode
#print(self._data)
out = json.dumps(self._data)
return out
class HistoryItem:
PLAYER = 'player'
CARD = 'card'
TARGET = 'target'
def __init__(self):
self._data = {}
def setPlayer(self, player):
self._data[HistoryItem.PLAYER] = player
def setCard(self, card):
self._data[HistoryItem.CARD] = card
def setTarget(self, target):
self._data[HistoryItem.TARGET] = target
def getPlayer(self):
return self._data[HistoryItem.PLAYER]
def getCard(self):
return self._data[HistoryItem.CARD]
def getTarget(self):
return self._data[HistoryItem.TARGET]
def toMap(self):
return self._data
class BaseHandler():
__metaclass__ = ABCMeta
_LOG = logging.getLogger('imgame.BaseHandler')
def __init__(self, config):
self._localStore = config.localStore
if os.path.isfile(self._localStore):
with open(self._localStore, 'rb') as storageFile:
self._localContext = pickle.load(storageFile)
if self._localContext.heapInitStash:
BaseHandler._LOG.debug('HEAP STASH in use - you are the firestarter!')
BaseHandler._LOG.debug('you are: {}'.format(self._localContext.name))
else:
self._localContext = None
def storeLocalContext(self):
with open(self._localStore, 'wb') as storageFile:
pickle.dump(self._localContext, storageFile)
def drawCard(self, heapBox):
if len(self._localContext.hand) > 1:
raise Exception('hand full already: {}'.format(self._localContext.hand))
random.shuffle(heapBox.getHeap())
self._localContext.hand.append(heapBox.getHeap().pop(0))
BaseHandler._LOG.info('on hand:{}'.format(self._localContext.hand))
def dumpHeapBox(self, heapBox):
out = heapBox.encode()
BaseHandler._LOG.debug('\n{0}\n{1}\n{0}'.format(10*'-', out))
with open('out', 'w') as msgOut:
msgOut.write(out)
@staticmethod
def askForChoice(question, options):
print '\033[33;1m{}\033[0m'.format(question)
choice = None
while choice == None:
for i in range(len(options)):
print '{}. {}'.format(i+1, options[i])
idx = raw_input('Enter choice number: ')
try:
choiceIdx = int(idx) - 1
justCheckRange = options[choiceIdx]
choice = choiceIdx
except Exception as e:
BaseHandler._LOG.warn('answer not understood: {}'.format(e))
return choice
@abstractmethod
def handle(self, heapBox):
pass
class InitHandler(BaseHandler):
'''Provisioning of game:
- player name
- draw first card
'''
_LOG = logging.getLogger('imgame.InitHandler')
def __init__(self, localstore):
super(InitHandler, self).__init__(localstore)
self._stashSize = 8
def handle(self, heapBox):
if heapBox.isEmpty:
InitHandler._LOG.debug('creating heapBox from SCRATCH')
else:
InitHandler._LOG.debug('init with EXISTING heapBox')
if heapBox.getState() != Dispatcher.STATE_INIT:
raise Exception('incorrect state occured: {}, expected: {}'.format(heapBox.getState(), Dispatcher.STATE_INIT))
# get player name from localContext
if self._localContext == None:
self._localContext = LocalContext()
defaultName = self._localContext.name
# detection of point where the heap stash must be moved back to heapBox
if defaultName and not heapBox.isEmpty and self._localContext.heapInitStash:
# move stashed heap back to heapBox
heapBox.getHeap().extend(self._localContext.heapInitStash)
InitHandler._LOG.debug('\033[35mHEAP STASH moved back to heapBox (+{})\033[0m'.format(len(self._localContext.heapInitStash)))
self._localContext.heapInitStash = None
heapBox.setActivePlayer(defaultName)
heapBox.setState(Dispatcher.STATE_TURN)
InitHandler._LOG.info('\033[32;1mgame initialized, let\'s play\033[0m')
else:
# get user name from stdin
playerName = InitHandler.askForPlayerName(defaultName)
heapBox.addPlayer(playerName)
heapBox.setActivePlayer(playerName)
self._localContext.name = playerName
if heapBox.isEmpty:
heapBox.getHeap().extend(InitHandler.createHeap())
heapBox.setState(Dispatcher.STATE_INIT)
# stash half of cards from heap
self._localContext.heapInitStash = heapBox.getHeap()[0:self._stashSize]
del(heapBox.getHeap()[0:self._stashSize])
InitHandler._LOG.debug('\033[35mHEAP STASH created ({})\033[0m'.format(len(self._localContext.heapInitStash)))
# draw first card
del(self._localContext.hand[:])
self.drawCard(heapBox)
self.dumpHeapBox(heapBox)
self.storeLocalContext()
@staticmethod
def askForPlayerName(defaultName):
nameQuery = 'Enter player name [{}]: '.format(defaultName)
playerName = None
while not playerName:
playerName = raw_input(nameQuery)
if not playerName and defaultName != None:
playerName = defaultName
return playerName
@staticmethod
def createHeap():
heap = [1,1,1,1,1, 2,2, 3,3, 4,4, 5,5, 6, 7, 8]
InitHandler._LOG.debug('heap created: {}({})'.format(heap, len(heap)))
random.shuffle(heap)
return heap
class TurnHandler(BaseHandler):
'''Pulling through a turn:
- draw card
- dump card + choose target, if feasible
Or interact as target:
- reply upon request
'''
_LOG = logging.getLogger('imgame.TurnHandler')
def handle(self, heapBox):
if heapBox.getState() == Dispatcher.STATE_TURN:
self.prepareNextTurn(heapBox)
elif heapBox.getState() == Dispatcher.STATE_INTERACTION:
self.interact(heapBox)
else:
TurnHandler._LOG.error('unexpected game state: {}'.format(heapBox.getState()))
def prepareNextTurn(self, heapBox):
# simple check of order
nextPlayer = heapBox.getNextPlayer()
if nextPlayer != self._localContext.name:
raise Exception('wrong player, expected: {}'.format(nextPlayer))
heapBox.activePlayer = nextPlayer
self.drawCard(heapBox)
# ask user to choose card
choice = BaseHandler.askForChoice('Which card would you like to use?',
['{} [{}]'.format(CARDS[i], i) for i in self._localContext.hand])
cardToUse = self._localContext.hand[choice]
TurnHandler._LOG.debug('chosen: {0} -> [{1}]'.format(cardToUse, CARDS[cardToUse]))
del(self._localContext.hand[choice])
# optional: offer target
if TurnHandler.needsTarget(cardToUse):
choice = BaseHandler.askForChoice('Choose target player?',
heapBox.getPlayerList())
targetPlayer = heapBox.getPlayerList()[choice]
TurnHandler._LOG.debug('target player: \'{0}\''.format(targetPlayer))
if targetPlayer != self._localContext.name:
# add to heapBox
heapBox.setTargetPlayer(targetPlayer)
heapBox.setState(Dispatcher.STATE_INTERACTION)
# log draw to heapBox
self.logAction(heapBox, cardToUse, heapBox.getTargetPlayer())
# send heapBox to next player or target
nextPlayer = None
if heapBox.getTargetPlayer() != None:
nextPlayer = heapBox.getTargetPlayer()
else:
nextPlayer = heapBox.getNextPlayer()
TurnHandler._LOG.info('now you need to deliver heapBox to: {0}'.format(nextPlayer))
# dump
self.storeLocalContext()
self.dumpHeapBox(heapBox)
def interact(self, heapBox):
print 'interacting...'
def logAction(self, heapBox, card, target):
# user -> card [-> target ]
heapBox.addToHistory(self._localContext.name, card, target)
@staticmethod
def needsTarget(card):
needs = False
if card in CARDS_REQUIRING_TARGET:
needs = True
return needs
if __name__ == '__main__':
raise Exception('Package invoked, but it does not contain any standalone code.')
| [
"[email protected]"
] | |
c1fba2d97391f3c3ce83fd6a93450a9a84a04fad | 9ad8eba7f7335404a5c403af1f69f2fd9fa5d606 | /ssq.py | ffa9c8edb2bfba2976ef18d4a8c73e5312aed080 | [] | no_license | renhl/learn | 2ea743c172acfaa8c880d3e1ec8a3b19c002b106 | b27aab9a2885b76c0c09e1f82ab56829ef04df98 | refs/heads/master | 2020-06-25T16:28:09.264396 | 2018-01-19T10:14:34 | 2018-01-19T10:14:34 | 96,976,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018-01-02 18:20
# @Author : renhl
# @Site :
# @File : ssq.py
# @Software: PyCharm Community Edition
import random
h = random.sample(range(1,34),6) # 随机生成1-33的list,取出6个元素
h.sort() # 将list排序
l = random.sample(range(1,17),1)
h1 = str(h)[1:len(str(h))-1] # 将list中的元素依次放入h1中
l1 = str(l)[1:len(str(l))-1]
print("红球:",h1,"蓝球:",l1) | [
"[email protected]"
] | |
05f04c656adb5e55c8aa4be8872f7e96431b7d25 | e3825f7912d6fa38c4878b42afd2a3e643ebd2f6 | /src/run.py | 45ad32e7502824e2fa0e28bd89be92c709dff925 | [] | no_license | winnieliuzzz/Whale_of_Deal | 8c5078f68e15ef7654acafea6f80083d496c96f2 | 5641af2b595846a25caa756f2463f9fb96e73be2 | refs/heads/master | 2020-03-22T12:27:42.132516 | 2018-08-06T04:19:08 | 2018-08-06T04:19:08 | 140,041,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from src.app import app
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'], port=4990) | [
"[email protected]"
] | |
a8175e7a8d2f0309b301e0e1f1ed7ac348b75786 | 9fc18966198c4122f2d82d5e05ee8b4542a2c347 | /Fattree.py | 2764c8c0d1e924d7d71b84df4deaa9e71bf7e73f | [] | no_license | hyili/SDN_multicast_replication | 63c2c35a82309bafd4a1d9959e073b1406eedc54 | 6b6edb2c8ae697c81501ffbb0818ac0dba9ef05c | refs/heads/master | 2021-01-20T21:23:52.863153 | 2015-07-22T04:31:18 | 2015-07-22T04:31:18 | 39,134,247 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,480 | py | #!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.node import RemoteController
from mininet.cli import CLI
#class MyTopo( Topo ):
# "Simple topology example."
#
# def __init__( net ):
# "Create custom topo."
#
# # Initialize topology
# Topo.__init__( net )
def perfTest():
"Create network and run simple performance test"
#topo = MyTopo()
net = Mininet( topo=None, link=TCLink, controller=RemoteController )
net.addController('c', controller=RemoteController, ip='127.0.0.1', port=6653)
# Add hosts and switches
Pod_host = []
Pod_switch = []
Aggr_switch = []
Core_switch = []
for i in range(1001, 1005, 1):
t = net.addSwitch( str(i) )
Core_switch.append( t )
for i in range(2001, 2009, 1):
t = net.addSwitch( str(i) )
Aggr_switch.append( t )
for i in range(3001, 3009, 1):
t = net.addSwitch( str(i) )
Pod_switch.append( t )
for i in range(0, 16, 1):
Pod_host.append( net.addHost( str(i) ) )
net.addLink(Pod_host[0], Pod_switch[0], bw=100)
net.addLink(Pod_host[1], Pod_switch[0], bw=100)
net.addLink(Pod_host[2], Pod_switch[1], bw=100)
net.addLink(Pod_host[3], Pod_switch[1], bw=100)
net.addLink(Pod_host[4], Pod_switch[2], bw=100)
net.addLink(Pod_host[5], Pod_switch[2], bw=100)
net.addLink(Pod_host[6], Pod_switch[3], bw=100)
net.addLink(Pod_host[7], Pod_switch[3], bw=100)
net.addLink(Pod_host[8], Pod_switch[4], bw=100)
net.addLink(Pod_host[9], Pod_switch[4], bw=100)
net.addLink(Pod_host[10], Pod_switch[5], bw=100)
net.addLink(Pod_host[11], Pod_switch[5], bw=100)
net.addLink(Pod_host[12], Pod_switch[6], bw=100)
net.addLink(Pod_host[13], Pod_switch[6], bw=100)
net.addLink(Pod_host[14], Pod_switch[7], bw=100)
net.addLink(Pod_host[15], Pod_switch[7], bw=100)
net.addLink(Pod_switch[0], Aggr_switch[0], bw=100)
net.addLink(Pod_switch[0], Aggr_switch[1], bw=100)
net.addLink(Pod_switch[1], Aggr_switch[0], bw=100)
net.addLink(Pod_switch[1], Aggr_switch[1], bw=100)
net.addLink(Pod_switch[2], Aggr_switch[2], bw=100)
net.addLink(Pod_switch[2], Aggr_switch[3], bw=100)
net.addLink(Pod_switch[3], Aggr_switch[2], bw=100)
net.addLink(Pod_switch[3], Aggr_switch[3], bw=100)
net.addLink(Pod_switch[4], Aggr_switch[4], bw=100)
net.addLink(Pod_switch[4], Aggr_switch[5], bw=100)
net.addLink(Pod_switch[5], Aggr_switch[4], bw=100)
net.addLink(Pod_switch[5], Aggr_switch[5], bw=100)
net.addLink(Pod_switch[6], Aggr_switch[6], bw=100)
net.addLink(Pod_switch[6], Aggr_switch[7], bw=100)
net.addLink(Pod_switch[7], Aggr_switch[6], bw=100)
net.addLink(Pod_switch[7], Aggr_switch[7], bw=100)
net.addLink(Core_switch[0], Aggr_switch[0], bw=1000)
net.addLink(Core_switch[0], Aggr_switch[2], bw=1000)
net.addLink(Core_switch[0], Aggr_switch[4], bw=1000)
net.addLink(Core_switch[0], Aggr_switch[6], bw=1000)
net.addLink(Core_switch[1], Aggr_switch[1], bw=1000)
net.addLink(Core_switch[1], Aggr_switch[3], bw=1000)
net.addLink(Core_switch[1], Aggr_switch[5], bw=1000)
net.addLink(Core_switch[1], Aggr_switch[7], bw=1000)
net.addLink(Core_switch[2], Aggr_switch[0], bw=1000)
net.addLink(Core_switch[2], Aggr_switch[2], bw=1000)
net.addLink(Core_switch[2], Aggr_switch[4], bw=1000)
net.addLink(Core_switch[2], Aggr_switch[6], bw=1000)
net.addLink(Core_switch[3], Aggr_switch[1], bw=1000)
net.addLink(Core_switch[3], Aggr_switch[3], bw=1000)
net.addLink(Core_switch[3], Aggr_switch[5], bw=1000)
net.addLink(Core_switch[3], Aggr_switch[7], bw=1000)
net.start()
for i in range(0, 4, 1):
t = Core_switch[i]
t.cmd('ovs-vsctl set bridge '+str(i)+' stp-enable=true')
for i in range(0, 8, 1):
t = Aggr_switch[i]
t.cmd('ovs-vsctl set bridge '+str(i)+' stp-enable=true')
for i in range(0, 8, 1):
t = Pod_switch[i]
t.cmd('ovs-vsctl set bridge '+str(i)+' stp-enable=true')
# print "*** Dumping host connections ***"
# dumpNodeConnections( net.hosts )
# print "*** Testing network connectivity ***"
# net.pingAll()
# print "*** Testing bandwidth between h1 and h2 ***"
# h0, h1, h2 = net.get( '0', '1', '15' )
# h1.popen('iperf -s -u -i 1 > SamePod', shell=True)
# h2.popen('iperf -s -u -i 1 > DiffPod', shell=True)
# h0.cmdPrint('iperf -c '+h1.IP()+' -u -t 10 -i 1 -b 100m')
# h0.cmdPrint('iperf -c '+h2.IP()+' -u -t 10 -i 1 -b 100m')
print "*** Starting CLI ***"
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
perfTest()
| [
"[email protected]"
] | |
8edeb38ba9eac708dc2867083639618cc2864801 | e2a4f7620535a0919b7dee9674e17207d164590a | /Hospital/urls.py | d50a9e8ae4b8b375f5cf35c76fd5747cb3db3ec5 | [] | no_license | pydjamolpomaji/SunriseMultispecialityHospital | 2a1f8990845f732e19dbd752312ebe5e5d2dbabd | c09a122e4347ebe3d0ff2fa980a554279211cf77 | refs/heads/master | 2023-03-06T08:16:11.407573 | 2021-02-17T10:10:48 | 2021-02-17T10:10:48 | 339,680,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | from django.urls import path
from Hospital import views
app_name = 'Hospital'
urlpatterns = [
path('', views.home, name=''),
path('contact/', views.contact, name='contact'),
path('about/', views.about, name='about'),
path('doctors/', views.doctors, name='doctors'),
path('tpa_insurances/', views.tpa_insurances, name='tpa_insurances'),
# Speciality
path('general_medicine/', views.general_medicine, name='general_medicine'),
path('pediatrician_neonatologist/', views.pediatrician_neonatologist, name='pediatrician_neonatologist'),
path('obstetrician_gynecologist/', views.obstetrician_gynecologist, name='obstetrician_gynecologist'),
path('general_laparoscopic_surgery/', views.general_laparoscopic_surgery, name='general_laparoscopic_surgery'),
path('orthopedics/', views.orthopedics, name='orthopedics'),
path('dermatology_skin_vd/', views.dermatology_skin_vd, name='dermatology_skin_vd'),
path('ent/', views.ent, name='ent'),
path('psychiatry/', views.psychiatry, name='psychiatry'),
path('psychology_and_counselling/', views.psychology_and_counselling, name='psychology_and_counselling'),
path('physiotherapist/', views.physiotherapist, name='physiotherapist'),
path('dietitian_nutritionist/', views.dietitian_and_nutritionist, name='dietitian_nutritionist'),
# Super Speciality
path('neurologist/', views.neurologist, name='neurologist'),
path('nephrology/', views.nephrology, name='nephrology'),
path('haematology_oncology/', views.haematology_oncology, name='haematology_oncology'),
path('urology/', views.urology, name='urology'),
path('pediatric_surgery/', views.pediatric_surgery, name='pediatric_surgery'),
path('plastic_surgeon/', views.plastic_surgeon, name='plastic_surgeon'),
path('dietitian_and_nutrition/', views.dietitian_and_nutrition, name='dietitian_and_nutrition'),
path('testmail/', views.testmail, name='testmail'),
]
| [
"[email protected]"
] | |
8cf1ea00facabb67b4c522ae197458c031ebd077 | 792f7d6b8fd0cedf1a5a6967a9a463eae30e7de9 | /sfftkrw/schema/v0_5_8.py | 70d11885c15894bd84a2591c3529f3e694235035 | [
"Apache-2.0"
] | permissive | emdb-empiar/sfftk-rw | d4053418dde11e88b37dde7e4a2ba3a237861854 | b96389faa20214b970aeb58c36412cbf691d87da | refs/heads/master | 2023-02-15T21:34:11.005361 | 2023-01-27T13:23:20 | 2023-01-27T13:23:20 | 214,158,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224,971 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Oct 20 11:49:30 2016 by generateDS.py version 2.22a.
#
# Command line options:
# ('-o', 'sfftkrw/sfftkrw/schema/emdb_sff.py')
#
# Command line arguments:
# /Users/pkorir/Documents/workspace/schema/trunk/DA_schema/segmentation_da.xsd
#
# Command line:
# /usr/local/bin/generateDS.py -o "sfftkrw/sfftkrw/schema/emdb_sff.py" /Users/pkorir/Documents/workspace/schema/trunk/DA_schema/segmentation_da.xsd
#
# Current working directory (os.getcwd()):
# trunk
#
import base64
import datetime as datetime_
import re as re_
import sys
import warnings as warnings_
from lxml import etree as etree_
Validate_simpletypes_ = True
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class segmentation(GeneratedsSuper):
"""Link to assembly or component id. Version History 0.2, 2015/3/15,
Ardan Patwardhan: 1) Added support for color 2) Added support
for triangle representation of segment - this is in support of
Amira segmentations 0.3, 2015/10/21, Paul Korir: 1) Added
voxelTransform 2) Added filePath for segmentLinks to HDF5
(Segger) segmentations 3) Added segmentObjects with contours and
meshes for IMOD 0.4, 2015/11/04, Paul Korir: 1) Added alpha for
rgb color (transparency) 0.5, 2015/12/14, Paul Korir: 1) Removed
'voxelTransform' 2) Moved 'numberOfCopies' as
'numberOfInstances' to be a sequence element within
'biologicalAnnotation' 3) Made 'color' a choice between 'name'
(X11/HTML4 sRGB colour space names) and 'rgba' 4) Added a
'transform' element, which is a possibly empty list of
transformation matrices referenced within the document by a non-
negative identifier 5) Renamed 'segmentLink' to
'voxelSegmentation' and added a 'transformId' element 6) Renamed
'color' element to 'colour' and 'colorType' to 'colourType' 7)
Structured 'shapePrimitive' to a list of 'shapePrimitive'
(substitution group for 'ellipsoid', 'cuboid', 'pyramid',
'cylinder') 8) Identified and labeled deprecated data types 9)
Renamed shapePrimitive 'pyramid' to 'cone' and added tentative
definition 10) Removed 'origin' and 'viewVectorRotation'
elements from shape primitives (cuboid, ellipsoid, cone,
cylinder) and replaced with 'transformId' element 11) Renamed
references to 'triangles' to 'polygons' e.g. 'triangleListType'
now 'polygonListType' 0.5.1, 2015/12/23, Paul Korir: 1) Change
colourType to have rgbaType minOccurs=1 2) Added 'attribute'
element (e.g. for FOM) to shapePrimitive 3) Deleted comment:
'Are the CCP4 format masks going to be allowed? If so, we could
represent the information of the mask (mapType) here.' from
'voxelSegmentation'. 4) Added segment 'format' element to
'voxelSegmentation' restricted to values 'runLengthEncoding',
'MRC', or 'Segger' 5) Renamed 'voxelSegmentation' to
'threeDVolume' and deprecated 'subTomogramAverage'. 6)
Restructured 'transforms' into a list ('transformList') of
'transform', which is a substitutionGroup of
'transformationMatrix', 'canonicalEulerAngles' or
'viewVectorRotation'. 7) Added an 'angleDegreeType' to restrict
angle range in degrees [0,360]. 0.5.2, 2016/01/04, Paul Korir:
1) Made 'transformList' optional 2) Added 'id' attributes for
'canonicalEulerAngle' and 'viewVectorRotation' 3) Annotations
for 'filePath' 4) Added 'other' to types for 'externalReference'
and optional 'otherTypes' attribute 5) Changed type of 'name'
element in 'colour' from 'xs:string' to 'xs:token' 6) Created
'threeDVolumeType' to be used by 'threeDVolume' and
'subtomogramAverage' 7) Added 'id' attributes for
'shapePrimitive' and 'threeDVolumeType' 8) Renamed 'contours' to
'contourList' 9) Created 'meshList' as a sequence of 'mesh'.
Each 'mesh' has an 'id' 0.5.3, 2016/01/06, Paul Korir: 1)
Renamed 'transformationMatrixType' attribute from 'transformId'
to 'id' 2) Created 'dimensionType' for all 'shapePrimitive'
dimensions 0.5.4, 2016/01/06, Paul Korir: 1) Added element
'primaryDescriptor' of type restricted string to 'threeDVolume',
'shapePrimitiveList', 'contourList', 'meshList' 0.5.5,
2016/01/22, Paul Korir: 1) Changed 'vertices' and 'polygons' to
'vertexList' and 'polygonList', respectively. 2) Added attribute
'designation' restricted to string 'vertex' (default) and
'normal' to 'vertex' element. 0.5.6, 2016/01/25, Paul Korir: 1)
Changed attribute 'polygonID' from 'positiveInteger' to
'nonNegativeInteger' 0.5.7, 2016/01/26, Paul Korir: 1) Change
element names to save disk space: 'vertex' to 'v', 'vertexID' to
'vID', 'point' to 'p', 'polygon' to 'P', 'polygonID' to 'PID' 2)
Changed 'vertexType' to have ID of type 'xs:nonNegativeInteger'
(from 'xs:positiveInteger') 0.5.8, 2016/04/26, Paul Korir: 1)
Added 'version' element at the root. 0.5.8, 2016/08/08, Paul
Korir: 2) Made 'shapePrimitive' element 'attribute (not present
in 'subtomogramAverage' optional (minOccurs=0) 0.5.8,
2016/08/17, Paul Korir: 3) Added a 'transformId' element to
'contourList' and to 'mesh'. 0.5.8, 2016/10/04, Paul Korir: 4)
Simplified shape primitives by eliminating internal layering
e.g. cone/dimensions/height to cone/height 0.5.8, 2016/10/14,
Paul Korir: 5) (Finally) renamed 'surfaceType' to 'meshType' for
consistency. 0.5.8, 2016/10/20, Paul Korir: 6) 'macromolecule'
and 'complex' changed from 'positiveInteger' to 'string'"""
subclass = None
superclass = None
def __init__(self, schemaVersion='0.5.8', version=None, name=None, software=None, transformList=None, filePath=None, primaryDescriptor=None, segmentList=None, details=None):
self.original_tagname_ = None
self.schemaVersion = _cast(None, schemaVersion)
self.version = version
self.name = name
self.software = software
self.transformList = transformList
self.filePath = filePath
self.primaryDescriptor = primaryDescriptor
self.validate_primaryDescriptorType(self.primaryDescriptor)
self.segmentList = segmentList
self.details = details
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, segmentation)
if subclass is not None:
return subclass(*args_, **kwargs_)
if segmentation.subclass:
return segmentation.subclass(*args_, **kwargs_)
else:
return segmentation(*args_, **kwargs_)
factory = staticmethod(factory)
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_software(self): return self.software
def set_software(self, software): self.software = software
def get_transformList(self): return self.transformList
def set_transformList(self, transformList): self.transformList = transformList
def get_filePath(self): return self.filePath
def set_filePath(self, filePath): self.filePath = filePath
def get_primaryDescriptor(self): return self.primaryDescriptor
def set_primaryDescriptor(self, primaryDescriptor): self.primaryDescriptor = primaryDescriptor
def get_segmentList(self): return self.segmentList
def set_segmentList(self, segmentList): self.segmentList = segmentList
def get_details(self): return self.details
def set_details(self, details): self.details = details
def get_schemaVersion(self): return self.schemaVersion
def set_schemaVersion(self, schemaVersion): self.schemaVersion = schemaVersion
def validate_primaryDescriptorType(self, value):
# Validate type primaryDescriptorType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['threeDVolume', 'shapePrimitiveList', 'contourList', 'meshList']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on primaryDescriptorType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.version is not None or
self.name is not None or
self.software is not None or
self.transformList is not None or
self.filePath is not None or
self.primaryDescriptor is not None or
self.segmentList is not None or
self.details is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='segmentation', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='segmentation')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='segmentation', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='segmentation'):
if self.schemaVersion != "0.5.8" and 'schemaVersion' not in already_processed:
already_processed.add('schemaVersion')
outfile.write(' schemaVersion=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.schemaVersion), input_name='schemaVersion')), ))
def exportChildren(self, outfile, level, namespace_='', name_='segmentation', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.version is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sversion>%s</%sversion>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.version), input_name='version')), namespace_, eol_))
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), namespace_, eol_))
if self.software is not None:
self.software.export(outfile, level, namespace_, name_='software', pretty_print=pretty_print)
if self.transformList is not None:
self.transformList.export(outfile, level, namespace_, name_='transformList', pretty_print=pretty_print)
if self.filePath is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfilePath>%s</%sfilePath>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.filePath), input_name='filePath')), namespace_, eol_))
if self.primaryDescriptor is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sprimaryDescriptor>%s</%sprimaryDescriptor>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.primaryDescriptor), input_name='primaryDescriptor')), namespace_, eol_))
if self.segmentList is not None:
self.segmentList.export(outfile, level, namespace_, name_='segmentList', pretty_print=pretty_print)
if self.details is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdetails>%s</%sdetails>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.details), input_name='details')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('schemaVersion', node)
if value is not None and 'schemaVersion' not in already_processed:
already_processed.add('schemaVersion')
self.schemaVersion = value
self.schemaVersion = ' '.join(self.schemaVersion.split())
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'version':
version_ = child_.text
version_ = self.gds_validate_string(version_, node, 'version')
self.version = version_
elif nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'software':
obj_ = softwareType.factory()
obj_.build(child_)
self.software = obj_
obj_.original_tagname_ = 'software'
elif nodeName_ == 'transformList':
obj_ = transformListType.factory()
obj_.build(child_)
self.transformList = obj_
obj_.original_tagname_ = 'transformList'
elif nodeName_ == 'filePath':
filePath_ = child_.text
filePath_ = re_.sub(String_cleanup_pat_, " ", filePath_).strip()
filePath_ = self.gds_validate_string(filePath_, node, 'filePath')
self.filePath = filePath_
elif nodeName_ == 'primaryDescriptor':
primaryDescriptor_ = child_.text
primaryDescriptor_ = self.gds_validate_string(primaryDescriptor_, node, 'primaryDescriptor')
self.primaryDescriptor = primaryDescriptor_
# validate type primaryDescriptorType
self.validate_primaryDescriptorType(self.primaryDescriptor)
elif nodeName_ == 'segmentList':
obj_ = segmentListType.factory()
obj_.build(child_)
self.segmentList = obj_
obj_.original_tagname_ = 'segmentList'
elif nodeName_ == 'details':
details_ = child_.text
details_ = self.gds_validate_string(details_, node, 'details')
self.details = details_
# end class segmentation
class softwareType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, version=None, processingDetails=None):
self.original_tagname_ = None
self.name = name
self.version = version
self.processingDetails = processingDetails
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, softwareType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if softwareType.subclass:
return softwareType.subclass(*args_, **kwargs_)
else:
return softwareType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_processingDetails(self): return self.processingDetails
def set_processingDetails(self, processingDetails): self.processingDetails = processingDetails
def hasContent_(self):
if (
self.name is not None or
self.version is not None or
self.processingDetails is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='softwareType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='softwareType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='softwareType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='softwareType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='softwareType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), namespace_, eol_))
if self.version is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sversion>%s</%sversion>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.version), input_name='version')), namespace_, eol_))
if self.processingDetails is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sprocessingDetails>%s</%sprocessingDetails>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.processingDetails), input_name='processingDetails')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = re_.sub(String_cleanup_pat_, " ", name_).strip()
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'version':
version_ = child_.text
version_ = re_.sub(String_cleanup_pat_, " ", version_).strip()
version_ = self.gds_validate_string(version_, node, 'version')
self.version = version_
elif nodeName_ == 'processingDetails':
processingDetails_ = child_.text
processingDetails_ = self.gds_validate_string(processingDetails_, node, 'processingDetails')
self.processingDetails = processingDetails_
# end class softwareType
class transformationMatrixType(GeneratedsSuper):
"""data is a space-separated sequence of numbers to be converted to
floats. The number of items in 'data' is equivalent to the
product of 'rows' and 'cols'."""
subclass = None
superclass = None
def __init__(self, id=None, rows=None, cols=None, data=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.rows = rows
self.cols = cols
self.data = data
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, transformationMatrixType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if transformationMatrixType.subclass:
return transformationMatrixType.subclass(*args_, **kwargs_)
else:
return transformationMatrixType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rows(self): return self.rows
def set_rows(self, rows): self.rows = rows
def get_cols(self): return self.cols
def set_cols(self, cols): self.cols = cols
def get_data(self): return self.data
def set_data(self, data): self.data = data
def get_id(self): return self.id
def set_id(self, id): self.id = id
def hasContent_(self):
if (
self.rows is not None or
self.cols is not None or
self.data is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='transformationMatrixType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='transformationMatrixType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='transformationMatrixType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='transformationMatrixType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='transformationMatrixType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.rows is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%srows>%s</%srows>%s' % (namespace_, self.gds_format_integer(self.rows, input_name='rows'), namespace_, eol_))
if self.cols is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scols>%s</%scols>%s' % (namespace_, self.gds_format_integer(self.cols, input_name='cols'), namespace_, eol_))
if self.data is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdata>%s</%sdata>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.data), input_name='data')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'rows':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'rows')
self.rows = ival_
elif nodeName_ == 'cols':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'cols')
self.cols = ival_
elif nodeName_ == 'data':
data_ = child_.text
data_ = self.gds_validate_string(data_, node, 'data')
self.data = data_
# end class transformationMatrixType
class canonicalEulerAnglesType(GeneratedsSuper):
"""Euler angler according to Heymann et al. 2005
(doi:10.1016/j.jsb.2005.06.001)Angles are specified in degrees
together with their bounds."""
subclass = None
superclass = None
def __init__(self, id=None, phi=None, theta=None, psi=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.phi = phi
self.validate_angleDegreeType(self.phi)
self.theta = theta
self.validate_angleDegreeType(self.theta)
self.psi = psi
self.validate_angleDegreeType(self.psi)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, canonicalEulerAnglesType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if canonicalEulerAnglesType.subclass:
return canonicalEulerAnglesType.subclass(*args_, **kwargs_)
else:
return canonicalEulerAnglesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_phi(self): return self.phi
def set_phi(self, phi): self.phi = phi
def get_theta(self): return self.theta
def set_theta(self, theta): self.theta = theta
def get_psi(self): return self.psi
def set_psi(self, psi): self.psi = psi
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_angleDegreeType(self, value):
# Validate type angleDegreeType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on angleDegreeType' % {"value" : value} )
if value > 360:
warnings_.warn('Value "%(value)s" does not match xsd maxInclusive restriction on angleDegreeType' % {"value" : value} )
def hasContent_(self):
if (
self.phi is not None or
self.theta is not None or
self.psi is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='canonicalEulerAnglesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='canonicalEulerAnglesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='canonicalEulerAnglesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='canonicalEulerAnglesType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='canonicalEulerAnglesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.phi is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sphi>%s</%sphi>%s' % (namespace_, self.gds_format_float(self.phi, input_name='phi'), namespace_, eol_))
if self.theta is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stheta>%s</%stheta>%s' % (namespace_, self.gds_format_float(self.theta, input_name='theta'), namespace_, eol_))
if self.psi is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spsi>%s</%spsi>%s' % (namespace_, self.gds_format_float(self.psi, input_name='psi'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'phi':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'phi')
self.phi = fval_
# validate type angleDegreeType
self.validate_angleDegreeType(self.phi)
elif nodeName_ == 'theta':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'theta')
self.theta = fval_
# validate type angleDegreeType
self.validate_angleDegreeType(self.theta)
elif nodeName_ == 'psi':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'psi')
self.psi = fval_
# validate type angleDegreeType
self.validate_angleDegreeType(self.psi)
# end class canonicalEulerAnglesType
class viewVectorRotationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, x=None, y=None, z=None, r=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.x = x
self.y = y
self.z = z
self.r = r
self.validate_angleDegreeType(self.r)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, viewVectorRotationType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if viewVectorRotationType.subclass:
return viewVectorRotationType.subclass(*args_, **kwargs_)
else:
return viewVectorRotationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def get_r(self): return self.r
def set_r(self, r): self.r = r
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_angleDegreeType(self, value):
# Validate type angleDegreeType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on angleDegreeType' % {"value" : value} )
if value > 360:
warnings_.warn('Value "%(value)s" does not match xsd maxInclusive restriction on angleDegreeType' % {"value" : value} )
def hasContent_(self):
if (
self.x is not None or
self.y is not None or
self.z is not None or
self.r is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='viewVectorRotationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='viewVectorRotationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='viewVectorRotationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='viewVectorRotationType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='viewVectorRotationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.x is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sx>%s</%sx>%s' % (namespace_, self.gds_format_float(self.x, input_name='x'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_float(self.y, input_name='y'), namespace_, eol_))
if self.z is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sz>%s</%sz>%s' % (namespace_, self.gds_format_float(self.z, input_name='z'), namespace_, eol_))
if self.r is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sr>%s</%sr>%s' % (namespace_, self.gds_format_float(self.r, input_name='r'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'x':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'x')
self.x = fval_
elif nodeName_ == 'y':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'y')
self.y = fval_
elif nodeName_ == 'z':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'z')
self.z = fval_
elif nodeName_ == 'r':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'r')
self.r = fval_
# validate type angleDegreeType
self.validate_angleDegreeType(self.r)
# end class viewVectorRotationType
class segmentType(GeneratedsSuper):
"""For the root segment: parentSegmentID = 0"""
subclass = None
superclass = None
def __init__(self, id=None, parentID=None, biologicalAnnotation=None, complexesAndMacromolecules=None, colour=None, threeDVolume=None, shapePrimitiveList=None, contourList=None, meshList=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.parentID = _cast(int, parentID)
self.biologicalAnnotation = biologicalAnnotation
self.complexesAndMacromolecules = complexesAndMacromolecules
self.colour = colour
self.threeDVolume = threeDVolume
self.shapePrimitiveList = shapePrimitiveList
self.contourList = contourList
self.meshList = meshList
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, segmentType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if segmentType.subclass:
return segmentType.subclass(*args_, **kwargs_)
else:
return segmentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_biologicalAnnotation(self): return self.biologicalAnnotation
def set_biologicalAnnotation(self, biologicalAnnotation): self.biologicalAnnotation = biologicalAnnotation
def get_complexesAndMacromolecules(self): return self.complexesAndMacromolecules
def set_complexesAndMacromolecules(self, complexesAndMacromolecules): self.complexesAndMacromolecules = complexesAndMacromolecules
def get_colour(self): return self.colour
def set_colour(self, colour): self.colour = colour
def get_threeDVolume(self): return self.threeDVolume
def set_threeDVolume(self, threeDVolume): self.threeDVolume = threeDVolume
def get_shapePrimitiveList(self): return self.shapePrimitiveList
def set_shapePrimitiveList(self, shapePrimitiveList): self.shapePrimitiveList = shapePrimitiveList
def get_contourList(self): return self.contourList
def set_contourList(self, contourList): self.contourList = contourList
def get_meshList(self): return self.meshList
def set_meshList(self, meshList): self.meshList = meshList
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_parentID(self): return self.parentID
def set_parentID(self, parentID): self.parentID = parentID
def hasContent_(self):
if (
self.biologicalAnnotation is not None or
self.complexesAndMacromolecules is not None or
self.colour is not None or
self.threeDVolume is not None or
self.shapePrimitiveList is not None or
self.contourList is not None or
self.meshList is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='segmentType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='segmentType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='segmentType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='segmentType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.parentID is not None and 'parentID' not in already_processed:
already_processed.add('parentID')
outfile.write(' parentID="%s"' % self.gds_format_integer(self.parentID, input_name='parentID'))
def exportChildren(self, outfile, level, namespace_='', name_='segmentType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.biologicalAnnotation is not None:
self.biologicalAnnotation.export(outfile, level, namespace_, name_='biologicalAnnotation', pretty_print=pretty_print)
if self.complexesAndMacromolecules is not None:
self.complexesAndMacromolecules.export(outfile, level, namespace_, name_='complexesAndMacromolecules', pretty_print=pretty_print)
if self.colour is not None:
self.colour.export(outfile, level, namespace_, name_='colour', pretty_print=pretty_print)
if self.threeDVolume is not None:
self.threeDVolume.export(outfile, level, namespace_, name_='threeDVolume', pretty_print=pretty_print)
if self.shapePrimitiveList is not None:
self.shapePrimitiveList.export(outfile, level, namespace_, name_='shapePrimitiveList', pretty_print=pretty_print)
if self.contourList is not None:
self.contourList.export(outfile, level, namespace_, name_='contourList', pretty_print=pretty_print)
if self.meshList is not None:
self.meshList.export(outfile, level, namespace_, name_='meshList', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
value = find_attr_value_('parentID', node)
if value is not None and 'parentID' not in already_processed:
already_processed.add('parentID')
try:
self.parentID = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.parentID < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'biologicalAnnotation':
obj_ = biologicalAnnotationType.factory()
obj_.build(child_)
self.biologicalAnnotation = obj_
obj_.original_tagname_ = 'biologicalAnnotation'
elif nodeName_ == 'complexesAndMacromolecules':
obj_ = macromoleculesAndComplexesType.factory()
obj_.build(child_)
self.complexesAndMacromolecules = obj_
obj_.original_tagname_ = 'complexesAndMacromolecules'
elif nodeName_ == 'colour':
obj_ = colourType.factory()
obj_.build(child_)
self.colour = obj_
obj_.original_tagname_ = 'colour'
elif nodeName_ == 'threeDVolume':
obj_ = threeDVolumeType.factory()
obj_.build(child_)
self.threeDVolume = obj_
obj_.original_tagname_ = 'threeDVolume'
elif nodeName_ == 'shapePrimitiveList':
obj_ = shapePrimitiveListType.factory()
obj_.build(child_)
self.shapePrimitiveList = obj_
obj_.original_tagname_ = 'shapePrimitiveList'
elif nodeName_ == 'contourList':
obj_ = contourListType.factory()
obj_.build(child_)
self.contourList = obj_
obj_.original_tagname_ = 'contourList'
elif nodeName_ == 'meshList':
obj_ = meshListType.factory()
obj_.build(child_)
self.meshList = obj_
obj_.original_tagname_ = 'meshList'
# end class segmentType
class macromoleculesAndComplexesType(GeneratedsSuper):
"""Depending on the problem, one can either reference the sample on the
macromolecule level or the complex level."""
subclass = None
superclass = None
def __init__(self, macromolecule=None, complex=None):
self.original_tagname_ = None
self.macromolecule = macromolecule
self.complex = complex
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, macromoleculesAndComplexesType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if macromoleculesAndComplexesType.subclass:
return macromoleculesAndComplexesType.subclass(*args_, **kwargs_)
else:
return macromoleculesAndComplexesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_macromolecule(self): return self.macromolecule
def set_macromolecule(self, macromolecule): self.macromolecule = macromolecule
def get_complex(self): return self.complex
def set_complex(self, complex): self.complex = complex
def hasContent_(self):
if (
self.macromolecule is not None or
self.complex is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='macromoleculesAndComplexesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='macromoleculesAndComplexesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='macromoleculesAndComplexesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='macromoleculesAndComplexesType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='macromoleculesAndComplexesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.macromolecule is not None:
self.macromolecule.export(outfile, level, namespace_, name_='macromolecule', pretty_print=pretty_print)
if self.complex is not None:
self.complex.export(outfile, level, namespace_, name_='complex', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'macromolecule':
obj_ = macromoleculeType.factory()
obj_.build(child_)
self.macromolecule = obj_
obj_.original_tagname_ = 'macromolecule'
elif nodeName_ == 'complex':
obj_ = complexType.factory()
obj_.build(child_)
self.complex = obj_
obj_.original_tagname_ = 'complex'
# end class macromoleculesAndComplexesType
class threeDVolumeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, file=None, objectPath=None, contourLevel=None, transformId=None, format=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.file = file
self.objectPath = objectPath
self.contourLevel = contourLevel
self.transformId = transformId
self.format = format
self.validate_formatType(self.format)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, threeDVolumeType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if threeDVolumeType.subclass:
return threeDVolumeType.subclass(*args_, **kwargs_)
else:
return threeDVolumeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_file(self): return self.file
def set_file(self, file): self.file = file
def get_objectPath(self): return self.objectPath
def set_objectPath(self, objectPath): self.objectPath = objectPath
def get_contourLevel(self): return self.contourLevel
def set_contourLevel(self, contourLevel): self.contourLevel = contourLevel
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_formatType(self, value):
# Validate type formatType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['runLengthEncoding', 'MRC', 'Segger']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on formatType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.file is not None or
self.objectPath is not None or
self.contourLevel is not None or
self.transformId is not None or
self.format is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='threeDVolumeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='threeDVolumeType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='threeDVolumeType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='threeDVolumeType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='threeDVolumeType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.file is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfile>%s</%sfile>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.file), input_name='file')), namespace_, eol_))
if self.objectPath is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sobjectPath>%s</%sobjectPath>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.objectPath), input_name='objectPath')), namespace_, eol_))
if self.contourLevel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scontourLevel>%s</%scontourLevel>%s' % (namespace_, self.gds_format_float(self.contourLevel, input_name='contourLevel'), namespace_, eol_))
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
if self.format is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sformat>%s</%sformat>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.format), input_name='format')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'file':
file_ = child_.text
file_ = re_.sub(String_cleanup_pat_, " ", file_).strip()
file_ = self.gds_validate_string(file_, node, 'file')
self.file = file_
elif nodeName_ == 'objectPath':
objectPath_ = child_.text
objectPath_ = re_.sub(String_cleanup_pat_, " ", objectPath_).strip()
objectPath_ = self.gds_validate_string(objectPath_, node, 'objectPath')
self.objectPath = objectPath_
elif nodeName_ == 'contourLevel':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'contourLevel')
self.contourLevel = fval_
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
elif nodeName_ == 'format':
format_ = child_.text
format_ = self.gds_validate_string(format_, node, 'format')
self.format = format_
# validate type formatType
self.validate_formatType(self.format)
# end class threeDVolumeType
class meshType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, vertexList=None, polygonList=None, transformId=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.vertexList = vertexList
self.polygonList = polygonList
self.transformId = transformId
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, meshType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if meshType.subclass:
return meshType.subclass(*args_, **kwargs_)
else:
return meshType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_vertexList(self): return self.vertexList
def set_vertexList(self, vertexList): self.vertexList = vertexList
def get_polygonList(self): return self.polygonList
def set_polygonList(self, polygonList): self.polygonList = polygonList
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def get_id(self): return self.id
def set_id(self, id): self.id = id
def hasContent_(self):
if (
self.vertexList is not None or
self.polygonList is not None or
self.transformId is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='meshType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='meshType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='meshType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='meshType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='meshType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.vertexList is not None:
self.vertexList.export(outfile, level, namespace_, name_='vertexList', pretty_print=pretty_print)
if self.polygonList is not None:
self.polygonList.export(outfile, level, namespace_, name_='polygonList', pretty_print=pretty_print)
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'vertexList':
obj_ = vertexListType.factory()
obj_.build(child_)
self.vertexList = obj_
obj_.original_tagname_ = 'vertexList'
elif nodeName_ == 'polygonList':
obj_ = polygonListType.factory()
obj_.build(child_)
self.polygonList = obj_
obj_.original_tagname_ = 'polygonList'
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
# end class meshType
class vertexListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, numVertices=None, v=None):
self.original_tagname_ = None
self.numVertices = _cast(int, numVertices)
if v is None:
self.v = []
else:
self.v = v
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, vertexListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if vertexListType.subclass:
return vertexListType.subclass(*args_, **kwargs_)
else:
return vertexListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_v(self): return self.v
def set_v(self, v): self.v = v
def add_v(self, value): self.v.append(value)
def insert_v_at(self, index, value): self.v.insert(index, value)
def replace_v_at(self, index, value): self.v[index] = value
def get_numVertices(self): return self.numVertices
def set_numVertices(self, numVertices): self.numVertices = numVertices
def hasContent_(self):
if (
self.v
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='vertexListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='vertexListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='vertexListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='vertexListType'):
if self.numVertices is not None and 'numVertices' not in already_processed:
already_processed.add('numVertices')
outfile.write(' numVertices="%s"' % self.gds_format_integer(self.numVertices, input_name='numVertices'))
def exportChildren(self, outfile, level, namespace_='', name_='vertexListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for v_ in self.v:
v_.export(outfile, level, namespace_, name_='v', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('numVertices', node)
if value is not None and 'numVertices' not in already_processed:
already_processed.add('numVertices')
try:
self.numVertices = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.numVertices <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'v':
obj_ = vertexType.factory()
obj_.build(child_)
self.v.append(obj_)
obj_.original_tagname_ = 'v'
# end class vertexListType
class polygonListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, numPolygons=None, P=None):
self.original_tagname_ = None
self.numPolygons = _cast(int, numPolygons)
if P is None:
self.P = []
else:
self.P = P
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, polygonListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if polygonListType.subclass:
return polygonListType.subclass(*args_, **kwargs_)
else:
return polygonListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_P(self): return self.P
def set_P(self, P): self.P = P
def add_P(self, value): self.P.append(value)
def insert_P_at(self, index, value): self.P.insert(index, value)
def replace_P_at(self, index, value): self.P[index] = value
def get_numPolygons(self): return self.numPolygons
def set_numPolygons(self, numPolygons): self.numPolygons = numPolygons
def hasContent_(self):
if (
self.P
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='polygonListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='polygonListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='polygonListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='polygonListType'):
if self.numPolygons is not None and 'numPolygons' not in already_processed:
already_processed.add('numPolygons')
outfile.write(' numPolygons="%s"' % self.gds_format_integer(self.numPolygons, input_name='numPolygons'))
def exportChildren(self, outfile, level, namespace_='', name_='polygonListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for P_ in self.P:
P_.export(outfile, level, namespace_, name_='P', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('numPolygons', node)
if value is not None and 'numPolygons' not in already_processed:
already_processed.add('numPolygons')
try:
self.numPolygons = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.numPolygons <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'P':
obj_ = polygonType.factory()
obj_.build(child_)
self.P.append(obj_)
obj_.original_tagname_ = 'P'
# end class polygonListType
class vertexType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, vID=None, designation='vertex', x=None, y=None, z=None):
self.original_tagname_ = None
self.vID = _cast(int, vID)
self.designation = _cast(None, designation)
self.x = x
self.y = y
self.z = z
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, vertexType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if vertexType.subclass:
return vertexType.subclass(*args_, **kwargs_)
else:
return vertexType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def get_vID(self): return self.vID
def set_vID(self, vID): self.vID = vID
def get_designation(self): return self.designation
def set_designation(self, designation): self.designation = designation
def hasContent_(self):
if (
self.x is not None or
self.y is not None or
self.z is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='vertexType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='vertexType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='vertexType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='vertexType'):
if self.vID is not None and 'vID' not in already_processed:
already_processed.add('vID')
outfile.write(' vID="%s"' % self.gds_format_integer(self.vID, input_name='vID'))
if self.designation != "vertex" and 'designation' not in already_processed:
already_processed.add('designation')
outfile.write(' designation=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.designation), input_name='designation')), ))
def exportChildren(self, outfile, level, namespace_='', name_='vertexType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.x is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sx>%s</%sx>%s' % (namespace_, self.gds_format_float(self.x, input_name='x'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_float(self.y, input_name='y'), namespace_, eol_))
if self.z is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sz>%s</%sz>%s' % (namespace_, self.gds_format_float(self.z, input_name='z'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('vID', node)
if value is not None and 'vID' not in already_processed:
already_processed.add('vID')
try:
self.vID = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.vID < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('designation', node)
if value is not None and 'designation' not in already_processed:
already_processed.add('designation')
self.designation = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'x':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'x')
self.x = fval_
elif nodeName_ == 'y':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'y')
self.y = fval_
elif nodeName_ == 'z':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'z')
self.z = fval_
# end class vertexType
class polygonType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, PID=None, v=None):
self.original_tagname_ = None
self.PID = _cast(int, PID)
if v is None:
self.v = []
else:
self.v = v
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, polygonType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if polygonType.subclass:
return polygonType.subclass(*args_, **kwargs_)
else:
return polygonType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_v(self): return self.v
def set_v(self, v): self.v = v
def add_v(self, value): self.v.append(value)
def insert_v_at(self, index, value): self.v.insert(index, value)
def replace_v_at(self, index, value): self.v[index] = value
def get_PID(self): return self.PID
def set_PID(self, PID): self.PID = PID
def hasContent_(self):
if (
self.v
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='polygonType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='polygonType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='polygonType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='polygonType'):
if self.PID is not None and 'PID' not in already_processed:
already_processed.add('PID')
outfile.write(' PID="%s"' % self.gds_format_integer(self.PID, input_name='PID'))
def exportChildren(self, outfile, level, namespace_='', name_='polygonType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for v_ in self.v:
showIndent(outfile, level, pretty_print)
outfile.write('<%sv>%s</%sv>%s' % (namespace_, self.gds_format_integer(v_, input_name='v'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('PID', node)
if value is not None and 'PID' not in already_processed:
already_processed.add('PID')
try:
self.PID = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.PID < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'v':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'v')
self.v.append(ival_)
# end class polygonType
class colourType(GeneratedsSuper):
"""The 'name' corresponds to the sRGB colour space name and maps
precisely to an RGB value. When this happens, the alpha channel
should be set to '1' (opaque). Requires the use of an external
Python module (webcolors.py)."""
subclass = None
superclass = None
def __init__(self, name=None, rgba=None):
self.original_tagname_ = None
self.name = name
self.rgba = rgba
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, colourType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if colourType.subclass:
return colourType.subclass(*args_, **kwargs_)
else:
return colourType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_rgba(self): return self.rgba
def set_rgba(self, rgba): self.rgba = rgba
def hasContent_(self):
if (
self.name is not None or
self.rgba is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='colourType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='colourType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='colourType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='colourType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='colourType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), namespace_, eol_))
if self.rgba is not None:
self.rgba.export(outfile, level, namespace_, name_='rgba', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = re_.sub(String_cleanup_pat_, " ", name_).strip()
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'rgba':
obj_ = rgbaType.factory()
obj_.build(child_)
self.rgba = obj_
obj_.original_tagname_ = 'rgba'
# end class colourType
class rgbaType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, red=None, green=None, blue=None, alpha=1.0):
self.original_tagname_ = None
self.red = red
self.validate_colourChannelType(self.red)
self.green = green
self.validate_colourChannelType(self.green)
self.blue = blue
self.validate_colourChannelType(self.blue)
self.alpha = alpha
self.validate_colourChannelType(self.alpha)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, rgbaType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if rgbaType.subclass:
return rgbaType.subclass(*args_, **kwargs_)
else:
return rgbaType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_red(self): return self.red
def set_red(self, red): self.red = red
def get_green(self): return self.green
def set_green(self, green): self.green = green
def get_blue(self): return self.blue
def set_blue(self, blue): self.blue = blue
def get_alpha(self): return self.alpha
def set_alpha(self, alpha): self.alpha = alpha
def validate_colourChannelType(self, value):
# Validate type colourChannelType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0.0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on colourChannelType' % {"value" : value} )
if value > 1.0:
warnings_.warn('Value "%(value)s" does not match xsd maxInclusive restriction on colourChannelType' % {"value" : value} )
def hasContent_(self):
if (
self.red is not None or
self.green is not None or
self.blue is not None or
self.alpha != 1.0
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='rgbaType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='rgbaType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='rgbaType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='rgbaType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='rgbaType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.red is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sred>%s</%sred>%s' % (namespace_, self.gds_format_float(self.red, input_name='red'), namespace_, eol_))
if self.green is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sgreen>%s</%sgreen>%s' % (namespace_, self.gds_format_float(self.green, input_name='green'), namespace_, eol_))
if self.blue is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sblue>%s</%sblue>%s' % (namespace_, self.gds_format_float(self.blue, input_name='blue'), namespace_, eol_))
if self.alpha != 1.0:
showIndent(outfile, level, pretty_print)
outfile.write('<%salpha>%s</%salpha>%s' % (namespace_, self.gds_format_float(self.alpha, input_name='alpha'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'red':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'red')
self.red = fval_
# validate type colourChannelType
self.validate_colourChannelType(self.red)
elif nodeName_ == 'green':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'green')
self.green = fval_
# validate type colourChannelType
self.validate_colourChannelType(self.green)
elif nodeName_ == 'blue':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'blue')
self.blue = fval_
# validate type colourChannelType
self.validate_colourChannelType(self.blue)
elif nodeName_ == 'alpha':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'alpha')
self.alpha = fval_
# validate type colourChannelType
self.validate_colourChannelType(self.alpha)
# end class rgbaType
class contourType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, p=None):
self.original_tagname_ = None
self.id = _cast(int, id)
if p is None:
self.p = []
else:
self.p = p
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, contourType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if contourType.subclass:
return contourType.subclass(*args_, **kwargs_)
else:
return contourType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_p(self): return self.p
def set_p(self, p): self.p = p
def add_p(self, value): self.p.append(value)
def insert_p_at(self, index, value): self.p.insert(index, value)
def replace_p_at(self, index, value): self.p[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
def hasContent_(self):
if (
self.p
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='contourType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='contourType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='contourType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='contourType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='contourType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for p_ in self.p:
p_.export(outfile, level, namespace_, name_='p', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'p':
obj_ = floatVectorType.factory()
obj_.build(child_)
self.p.append(obj_)
obj_.original_tagname_ = 'p'
# end class contourType
class floatVectorType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, x=None, y=None, z=None):
self.original_tagname_ = None
self.x = _cast(float, x)
self.y = _cast(float, y)
self.z = _cast(float, z)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, floatVectorType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if floatVectorType.subclass:
return floatVectorType.subclass(*args_, **kwargs_)
else:
return floatVectorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='floatVectorType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='floatVectorType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='floatVectorType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='floatVectorType'):
if self.x is not None and 'x' not in already_processed:
already_processed.add('x')
outfile.write(' x="%s"' % self.gds_format_float(self.x, input_name='x'))
if self.y is not None and 'y' not in already_processed:
already_processed.add('y')
outfile.write(' y="%s"' % self.gds_format_float(self.y, input_name='y'))
if self.z is not None and 'z' not in already_processed:
already_processed.add('z')
outfile.write(' z="%s"' % self.gds_format_float(self.z, input_name='z'))
def exportChildren(self, outfile, level, namespace_='', name_='floatVectorType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('x', node)
if value is not None and 'x' not in already_processed:
already_processed.add('x')
try:
self.x = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (x): %s' % exp)
value = find_attr_value_('y', node)
if value is not None and 'y' not in already_processed:
already_processed.add('y')
try:
self.y = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (y): %s' % exp)
value = find_attr_value_('z', node)
if value is not None and 'z' not in already_processed:
already_processed.add('z')
try:
self.z = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (z): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class floatVectorType
class transform(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, transform)
if subclass is not None:
return subclass(*args_, **kwargs_)
if transform.subclass:
return transform.subclass(*args_, **kwargs_)
else:
return transform(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='transform', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='transform')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='transform', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='transform'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='transform', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class transform
class shapePrimitive(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, shapePrimitive)
if subclass is not None:
return subclass(*args_, **kwargs_)
if shapePrimitive.subclass:
return shapePrimitive.subclass(*args_, **kwargs_)
else:
return shapePrimitive(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='shapePrimitive', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='shapePrimitive')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='shapePrimitive', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='shapePrimitive'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='shapePrimitive', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class shapePrimitive
class cuboid(GeneratedsSuper):
"""3D rectangle'attribute' could refer to figure of merit (FOM)"""
subclass = None
superclass = None
def __init__(self, id=None, x=None, y=None, z=None, transformId=None, attribute=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.x = x
self.validate_dimensionType(self.x)
self.y = y
self.validate_dimensionType(self.y)
self.z = z
self.validate_dimensionType(self.z)
self.transformId = transformId
self.attribute = attribute
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cuboid)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cuboid.subclass:
return cuboid.subclass(*args_, **kwargs_)
else:
return cuboid(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def get_attribute(self): return self.attribute
def set_attribute(self, attribute): self.attribute = attribute
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_dimensionType(self, value):
# Validate type dimensionType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on dimensionType' % {"value" : value} )
def hasContent_(self):
if (
self.x is not None or
self.y is not None or
self.z is not None or
self.transformId is not None or
self.attribute is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cuboid', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cuboid')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cuboid', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cuboid'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='cuboid', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.x is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sx>%s</%sx>%s' % (namespace_, self.gds_format_float(self.x, input_name='x'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_float(self.y, input_name='y'), namespace_, eol_))
if self.z is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sz>%s</%sz>%s' % (namespace_, self.gds_format_float(self.z, input_name='z'), namespace_, eol_))
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
if self.attribute is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sattribute>%s</%sattribute>%s' % (namespace_, self.gds_format_float(self.attribute, input_name='attribute'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'x':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'x')
self.x = fval_
# validate type dimensionType
self.validate_dimensionType(self.x)
elif nodeName_ == 'y':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'y')
self.y = fval_
# validate type dimensionType
self.validate_dimensionType(self.y)
elif nodeName_ == 'z':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'z')
self.z = fval_
# validate type dimensionType
self.validate_dimensionType(self.z)
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
elif nodeName_ == 'attribute':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'attribute')
self.attribute = fval_
# end class cuboid
class ellipsoid(GeneratedsSuper):
"""3D sphere"""
subclass = None
superclass = None
def __init__(self, id=None, x=None, y=None, z=None, transformId=None, attribute=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.x = x
self.validate_dimensionType(self.x)
self.y = y
self.validate_dimensionType(self.y)
self.z = z
self.validate_dimensionType(self.z)
self.transformId = transformId
self.attribute = attribute
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ellipsoid)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ellipsoid.subclass:
return ellipsoid.subclass(*args_, **kwargs_)
else:
return ellipsoid(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def get_attribute(self): return self.attribute
def set_attribute(self, attribute): self.attribute = attribute
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_dimensionType(self, value):
# Validate type dimensionType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on dimensionType' % {"value" : value} )
def hasContent_(self):
if (
self.x is not None or
self.y is not None or
self.z is not None or
self.transformId is not None or
self.attribute is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ellipsoid', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ellipsoid')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ellipsoid', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ellipsoid'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='ellipsoid', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.x is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sx>%s</%sx>%s' % (namespace_, self.gds_format_float(self.x, input_name='x'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_float(self.y, input_name='y'), namespace_, eol_))
if self.z is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sz>%s</%sz>%s' % (namespace_, self.gds_format_float(self.z, input_name='z'), namespace_, eol_))
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
if self.attribute is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sattribute>%s</%sattribute>%s' % (namespace_, self.gds_format_float(self.attribute, input_name='attribute'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'x':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'x')
self.x = fval_
# validate type dimensionType
self.validate_dimensionType(self.x)
elif nodeName_ == 'y':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'y')
self.y = fval_
# validate type dimensionType
self.validate_dimensionType(self.y)
elif nodeName_ == 'z':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'z')
self.z = fval_
# validate type dimensionType
self.validate_dimensionType(self.z)
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
elif nodeName_ == 'attribute':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'attribute')
self.attribute = fval_
# end class ellipsoid
class cylinder(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, height=None, diameter=None, transformId=None, attribute=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.height = height
self.validate_dimensionType(self.height)
self.diameter = diameter
self.validate_dimensionType(self.diameter)
self.transformId = transformId
self.attribute = attribute
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cylinder)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cylinder.subclass:
return cylinder.subclass(*args_, **kwargs_)
else:
return cylinder(*args_, **kwargs_)
factory = staticmethod(factory)
def get_height(self): return self.height
def set_height(self, height): self.height = height
def get_diameter(self): return self.diameter
def set_diameter(self, diameter): self.diameter = diameter
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def get_attribute(self): return self.attribute
def set_attribute(self, attribute): self.attribute = attribute
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_dimensionType(self, value):
# Validate type dimensionType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on dimensionType' % {"value" : value} )
def hasContent_(self):
if (
self.height is not None or
self.diameter is not None or
self.transformId is not None or
self.attribute is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cylinder', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cylinder')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cylinder', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cylinder'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='cylinder', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.height is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sheight>%s</%sheight>%s' % (namespace_, self.gds_format_float(self.height, input_name='height'), namespace_, eol_))
if self.diameter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdiameter>%s</%sdiameter>%s' % (namespace_, self.gds_format_float(self.diameter, input_name='diameter'), namespace_, eol_))
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
if self.attribute is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sattribute>%s</%sattribute>%s' % (namespace_, self.gds_format_float(self.attribute, input_name='attribute'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'height':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'height')
self.height = fval_
# validate type dimensionType
self.validate_dimensionType(self.height)
elif nodeName_ == 'diameter':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'diameter')
self.diameter = fval_
# validate type dimensionType
self.validate_dimensionType(self.diameter)
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
elif nodeName_ == 'attribute':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'attribute')
self.attribute = fval_
# end class cylinder
class cone(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, height=None, bottomRadius=None, transformId=None, attribute=None):
self.original_tagname_ = None
self.id = _cast(int, id)
self.height = height
self.validate_dimensionType(self.height)
self.bottomRadius = bottomRadius
self.validate_dimensionType(self.bottomRadius)
self.transformId = transformId
self.attribute = attribute
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cone)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cone.subclass:
return cone.subclass(*args_, **kwargs_)
else:
return cone(*args_, **kwargs_)
factory = staticmethod(factory)
def get_height(self): return self.height
def set_height(self, height): self.height = height
def get_bottomRadius(self): return self.bottomRadius
def set_bottomRadius(self, bottomRadius): self.bottomRadius = bottomRadius
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def get_attribute(self): return self.attribute
def set_attribute(self, attribute): self.attribute = attribute
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_dimensionType(self, value):
# Validate type dimensionType, a restriction on xs:float.
if value is not None and Validate_simpletypes_:
if value < 0:
warnings_.warn('Value "%(value)s" does not match xsd minInclusive restriction on dimensionType' % {"value" : value} )
def hasContent_(self):
if (
self.height is not None or
self.bottomRadius is not None or
self.transformId is not None or
self.attribute is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cone', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cone')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cone', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cone'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='', name_='cone', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.height is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sheight>%s</%sheight>%s' % (namespace_, self.gds_format_float(self.height, input_name='height'), namespace_, eol_))
if self.bottomRadius is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sbottomRadius>%s</%sbottomRadius>%s' % (namespace_, self.gds_format_float(self.bottomRadius, input_name='bottomRadius'), namespace_, eol_))
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
if self.attribute is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sattribute>%s</%sattribute>%s' % (namespace_, self.gds_format_float(self.attribute, input_name='attribute'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.id < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'height':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'height')
self.height = fval_
# validate type dimensionType
self.validate_dimensionType(self.height)
elif nodeName_ == 'bottomRadius':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'bottomRadius')
self.bottomRadius = fval_
# validate type dimensionType
self.validate_dimensionType(self.bottomRadius)
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
elif nodeName_ == 'attribute':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'attribute')
self.attribute = fval_
# end class cone
class meshPointType(GeneratedsSuper):
"""A mesh is a sequence of trianglesDeprecated"""
subclass = None
superclass = None
def __init__(self, index=None, designation=None, point=None):
self.original_tagname_ = None
self.index = _cast(int, index)
self.designation = _cast(None, designation)
if point is None:
self.point = []
else:
self.point = point
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, meshPointType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if meshPointType.subclass:
return meshPointType.subclass(*args_, **kwargs_)
else:
return meshPointType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_point(self): return self.point
def set_point(self, point): self.point = point
def add_point(self, value): self.point.append(value)
def insert_point_at(self, index, value): self.point.insert(index, value)
def replace_point_at(self, index, value): self.point[index] = value
def get_index(self): return self.index
def set_index(self, index): self.index = index
def get_designation(self): return self.designation
def set_designation(self, designation): self.designation = designation
def hasContent_(self):
if (
self.point
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='meshPointType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='meshPointType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='meshPointType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='meshPointType'):
if self.index is not None and 'index' not in already_processed:
already_processed.add('index')
outfile.write(' index="%s"' % self.gds_format_integer(self.index, input_name='index'))
if self.designation is not None and 'designation' not in already_processed:
already_processed.add('designation')
outfile.write(' designation=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.designation), input_name='designation')), ))
def exportChildren(self, outfile, level, namespace_='', name_='meshPointType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for point_ in self.point:
point_.export(outfile, level, namespace_, name_='point', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('index', node)
if value is not None and 'index' not in already_processed:
already_processed.add('index')
try:
self.index = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.index < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('designation', node)
if value is not None and 'designation' not in already_processed:
already_processed.add('designation')
self.designation = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'point':
obj_ = floatVectorType.factory()
obj_.build(child_)
self.point.append(obj_)
obj_.original_tagname_ = 'point'
# end class meshPointType
class hsvType(GeneratedsSuper):
"""Deprecated"""
subclass = None
superclass = None
def __init__(self, hue=None, saturation=None, value=None):
self.original_tagname_ = None
self.hue = hue
self.saturation = saturation
self.value = value
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, hsvType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if hsvType.subclass:
return hsvType.subclass(*args_, **kwargs_)
else:
return hsvType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_hue(self): return self.hue
def set_hue(self, hue): self.hue = hue
def get_saturation(self): return self.saturation
def set_saturation(self, saturation): self.saturation = saturation
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
self.hue is not None or
self.saturation is not None or
self.value is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='hsvType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='hsvType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='hsvType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='hsvType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='hsvType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.hue is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shue>%s</%shue>%s' % (namespace_, self.gds_format_float(self.hue, input_name='hue'), namespace_, eol_))
if self.saturation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%ssaturation>%s</%ssaturation>%s' % (namespace_, self.gds_format_float(self.saturation, input_name='saturation'), namespace_, eol_))
if self.value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%svalue>%s</%svalue>%s' % (namespace_, self.gds_format_float(self.value, input_name='value'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'hue':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'hue')
self.hue = fval_
elif nodeName_ == 'saturation':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'saturation')
self.saturation = fval_
elif nodeName_ == 'value':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'value')
self.value = fval_
# end class hsvType
class runLengthEncodingType(GeneratedsSuper):
"""Deprecated"""
subclass = None
superclass = None
def __init__(self, string=None, offset=None, boxSize=None):
self.original_tagname_ = None
self.string = string
self.validate_stringType(self.string)
self.offset = offset
self.boxSize = boxSize
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, runLengthEncodingType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if runLengthEncodingType.subclass:
return runLengthEncodingType.subclass(*args_, **kwargs_)
else:
return runLengthEncodingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_string(self): return self.string
def set_string(self, string): self.string = string
def get_offset(self): return self.offset
def set_offset(self, offset): self.offset = offset
def get_boxSize(self): return self.boxSize
def set_boxSize(self, boxSize): self.boxSize = boxSize
def validate_stringType(self, value):
# Validate type stringType, a restriction on xs:token.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_stringType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_stringType_patterns_, ))
validate_stringType_patterns_ = [['^\\d+(,\\d+)+$']]
def hasContent_(self):
if (
self.string is not None or
self.offset is not None or
self.boxSize is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='runLengthEncodingType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='runLengthEncodingType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='runLengthEncodingType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='runLengthEncodingType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='runLengthEncodingType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.string is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstring>%s</%sstring>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.string), input_name='string')), namespace_, eol_))
if self.offset is not None:
self.offset.export(outfile, level, namespace_, name_='offset', pretty_print=pretty_print)
if self.boxSize is not None:
self.boxSize.export(outfile, level, namespace_, name_='boxSize', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'string':
string_ = child_.text
string_ = re_.sub(String_cleanup_pat_, " ", string_).strip()
string_ = self.gds_validate_string(string_, node, 'string')
self.string = string_
# validate type stringType
self.validate_stringType(self.string)
elif nodeName_ == 'offset':
obj_ = integerVectorType.factory()
obj_.build(child_)
self.offset = obj_
obj_.original_tagname_ = 'offset'
elif nodeName_ == 'boxSize':
obj_ = positiveIntegerVectorType.factory()
obj_.build(child_)
self.boxSize = obj_
obj_.original_tagname_ = 'boxSize'
# end class runLengthEncodingType
class integerVectorType(GeneratedsSuper):
"""Deprecated"""
subclass = None
superclass = None
def __init__(self, x=None, y=None, z=None):
self.original_tagname_ = None
self.x = x
self.y = y
self.z = z
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, integerVectorType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if integerVectorType.subclass:
return integerVectorType.subclass(*args_, **kwargs_)
else:
return integerVectorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def hasContent_(self):
if (
self.x is not None or
self.y is not None or
self.z is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='integerVectorType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='integerVectorType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='integerVectorType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='integerVectorType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='integerVectorType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.x is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sx>%s</%sx>%s' % (namespace_, self.gds_format_integer(self.x, input_name='x'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_integer(self.y, input_name='y'), namespace_, eol_))
if self.z is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sz>%s</%sz>%s' % (namespace_, self.gds_format_integer(self.z, input_name='z'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'x':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'x')
self.x = ival_
elif nodeName_ == 'y':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'y')
self.y = ival_
elif nodeName_ == 'z':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'z')
self.z = ival_
# end class integerVectorType
class positiveIntegerVectorType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, x=None, y=None):
self.original_tagname_ = None
self.x = x
self.y = y
self.y = y
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, positiveIntegerVectorType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if positiveIntegerVectorType.subclass:
return positiveIntegerVectorType.subclass(*args_, **kwargs_)
else:
return positiveIntegerVectorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_x(self): return self.x
def set_x(self, x): self.x = x
def get_y(self): return self.y
def set_y(self, y): self.y = y
def get_z(self): return self.z
def set_z(self, z): self.z = z
def hasContent_(self):
if (
self.x is not None or
self.y is not None or
self.y is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='positiveIntegerVectorType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='positiveIntegerVectorType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='positiveIntegerVectorType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='positiveIntegerVectorType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='positiveIntegerVectorType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.x is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sx>%s</%sx>%s' % (namespace_, self.gds_format_integer(self.x, input_name='x'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_integer(self.y, input_name='y'), namespace_, eol_))
if self.y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sy>%s</%sy>%s' % (namespace_, self.gds_format_integer(self.y, input_name='y'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'x':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'x')
self.x = ival_
elif nodeName_ == 'y':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'y')
self.y = ival_
elif nodeName_ == 'y':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'y')
self.y = ival_
# end class positiveIntegerVectorType
class transformListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, transform=None):
self.original_tagname_ = None
if transform is None:
self.transform = []
else:
self.transform = transform
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, transformListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if transformListType.subclass:
return transformListType.subclass(*args_, **kwargs_)
else:
return transformListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_transform(self): return self.transform
def set_transform(self, transform): self.transform = transform
def add_transform(self, value): self.transform.append(value)
def insert_transform_at(self, index, value): self.transform.insert(index, value)
def replace_transform_at(self, index, value): self.transform[index] = value
def hasContent_(self):
if (
self.transform
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='transformListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='transformListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='transformListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='transformListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='transformListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for transform_ in self.transform:
transform_.export(outfile, level, namespace_, name_='transform', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'transform':
obj_ = transform.factory()
obj_.build(child_)
self.transform.append(obj_)
obj_.original_tagname_ = 'transform'
elif nodeName_ == 'transformationMatrix':
obj_ = transformationMatrixType.factory()
obj_.build(child_)
self.transform.append(obj_)
obj_.original_tagname_ = 'transformationMatrix'
elif nodeName_ == 'canonicalEulerAngles':
obj_ = canonicalEulerAnglesType.factory()
obj_.build(child_)
self.transform.append(obj_)
obj_.original_tagname_ = 'canonicalEulerAngles'
elif nodeName_ == 'viewVectorRotation':
obj_ = viewVectorRotationType.factory()
obj_.build(child_)
self.transform.append(obj_)
obj_.original_tagname_ = 'viewVectorRotation'
# end class transformListType
class segmentListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, segment=None):
self.original_tagname_ = None
if segment is None:
self.segment = []
else:
self.segment = segment
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, segmentListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if segmentListType.subclass:
return segmentListType.subclass(*args_, **kwargs_)
else:
return segmentListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def add_segment(self, value): self.segment.append(value)
def insert_segment_at(self, index, value): self.segment.insert(index, value)
def replace_segment_at(self, index, value): self.segment[index] = value
def hasContent_(self):
if (
self.segment
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='segmentListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='segmentListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='segmentListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='segmentListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='segmentListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for segment_ in self.segment:
segment_.export(outfile, level, namespace_, name_='segment', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'segment':
obj_ = segmentType.factory()
obj_.build(child_)
self.segment.append(obj_)
obj_.original_tagname_ = 'segment'
# end class segmentListType
class biologicalAnnotationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, description=None, externalReferences=None, numberOfInstances=None):
self.original_tagname_ = None
self.description = description
if externalReferences is None:
self.externalReferences = []
else:
self.externalReferences = externalReferences
self.numberOfInstances = numberOfInstances
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, biologicalAnnotationType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if biologicalAnnotationType.subclass:
return biologicalAnnotationType.subclass(*args_, **kwargs_)
else:
return biologicalAnnotationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_externalReferences(self): return self.externalReferences
def set_externalReferences(self, externalReferences): self.externalReferences = externalReferences
def add_externalReferences(self, value): self.externalReferences.append(value)
def insert_externalReferences_at(self, index, value): self.externalReferences.insert(index, value)
def replace_externalReferences_at(self, index, value): self.externalReferences[index] = value
def get_numberOfInstances(self): return self.numberOfInstances
def set_numberOfInstances(self, numberOfInstances): self.numberOfInstances = numberOfInstances
def hasContent_(self):
if (
self.description is not None or
self.externalReferences or
self.numberOfInstances is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='biologicalAnnotationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='biologicalAnnotationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='biologicalAnnotationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='biologicalAnnotationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='biologicalAnnotationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), namespace_, eol_))
for externalReferences_ in self.externalReferences:
externalReferences_.export(outfile, level, namespace_, name_='externalReferences', pretty_print=pretty_print)
if self.numberOfInstances is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snumberOfInstances>%s</%snumberOfInstances>%s' % (namespace_, self.gds_format_integer(self.numberOfInstances, input_name='numberOfInstances'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
elif nodeName_ == 'externalReferences':
obj_ = externalReferencesType.factory()
obj_.build(child_)
self.externalReferences.append(obj_)
obj_.original_tagname_ = 'externalReferences'
elif nodeName_ == 'numberOfInstances':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'numberOfInstances')
self.numberOfInstances = ival_
# end class biologicalAnnotationType
class description(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, description)
if subclass is not None:
return subclass(*args_, **kwargs_)
if description.subclass:
return description.subclass(*args_, **kwargs_)
else:
return description(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='description', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='description')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='description', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='description'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='description', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class description
class externalReferencesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, type_=None, otherType=None, valueOf_=None):
self.original_tagname_ = None
self.type_ = _cast(None, type_)
self.otherType = _cast(None, otherType)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, externalReferencesType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if externalReferencesType.subclass:
return externalReferencesType.subclass(*args_, **kwargs_)
else:
return externalReferencesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_otherType(self): return self.otherType
def set_otherType(self, otherType): self.otherType = otherType
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
1 if type(self.valueOf_) in [int,float] else self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='externalReferencesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='externalReferencesType')
if self.hasContent_():
outfile.write('>')
outfile.write((quote_xml(self.valueOf_) if type(self.valueOf_) is str else self.gds_encode(str(self.valueOf_))))
self.exportChildren(outfile, level + 1, namespace_='', name_='externalReferencesType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='externalReferencesType'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), ))
if self.otherType is not None and 'otherType' not in already_processed:
already_processed.add('otherType')
outfile.write(' otherType=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.otherType), input_name='otherType')), ))
def exportChildren(self, outfile, level, namespace_='', name_='externalReferencesType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.type_ = ' '.join(self.type_.split())
value = find_attr_value_('otherType', node)
if value is not None and 'otherType' not in already_processed:
already_processed.add('otherType')
self.otherType = value
self.otherType = ' '.join(self.otherType.split())
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class externalReferencesType
class shapePrimitiveListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, shapePrimitive=None):
self.original_tagname_ = None
if shapePrimitive is None:
self.shapePrimitive = []
else:
self.shapePrimitive = shapePrimitive
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, shapePrimitiveListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if shapePrimitiveListType.subclass:
return shapePrimitiveListType.subclass(*args_, **kwargs_)
else:
return shapePrimitiveListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_shapePrimitive(self): return self.shapePrimitive
def set_shapePrimitive(self, shapePrimitive): self.shapePrimitive = shapePrimitive
def add_shapePrimitive(self, value): self.shapePrimitive.append(value)
def insert_shapePrimitive_at(self, index, value): self.shapePrimitive.insert(index, value)
def replace_shapePrimitive_at(self, index, value): self.shapePrimitive[index] = value
def hasContent_(self):
if (
self.shapePrimitive
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='shapePrimitiveListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='shapePrimitiveListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='shapePrimitiveListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='shapePrimitiveListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='shapePrimitiveListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for shapePrimitive_ in self.shapePrimitive:
shapePrimitive_.export(outfile, level, namespace_, name_='shapePrimitive', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'shapePrimitive':
obj_ = shapePrimitive.factory()
obj_.build(child_)
self.shapePrimitive.append(obj_)
obj_.original_tagname_ = 'shapePrimitive'
elif nodeName_ == 'cuboid':
obj_ = cuboid.factory()
obj_.build(child_)
self.shapePrimitive.append(obj_)
obj_.original_tagname_ = 'cuboid'
elif nodeName_ == 'ellipsoid':
obj_ = ellipsoid.factory()
obj_.build(child_)
self.shapePrimitive.append(obj_)
obj_.original_tagname_ = 'ellipsoid'
elif nodeName_ == 'cylinder':
obj_ = cylinder.factory()
obj_.build(child_)
self.shapePrimitive.append(obj_)
obj_.original_tagname_ = 'cylinder'
elif nodeName_ == 'cone':
obj_ = cone.factory()
obj_.build(child_)
self.shapePrimitive.append(obj_)
obj_.original_tagname_ = 'cone'
elif nodeName_ == 'subtomogramAverage':
obj_ = threeDVolumeType.factory()
obj_.build(child_)
self.shapePrimitive.append(obj_)
obj_.original_tagname_ = 'subtomogramAverage'
# end class shapePrimitiveListType
class contourListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, contour=None, transformId=None):
self.original_tagname_ = None
if contour is None:
self.contour = []
else:
self.contour = contour
self.transformId = transformId
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, contourListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if contourListType.subclass:
return contourListType.subclass(*args_, **kwargs_)
else:
return contourListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_contour(self): return self.contour
def set_contour(self, contour): self.contour = contour
def add_contour(self, value): self.contour.append(value)
def insert_contour_at(self, index, value): self.contour.insert(index, value)
def replace_contour_at(self, index, value): self.contour[index] = value
def get_transformId(self): return self.transformId
def set_transformId(self, transformId): self.transformId = transformId
def hasContent_(self):
if (
self.contour or
self.transformId is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='contourListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='contourListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='contourListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='contourListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='contourListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for contour_ in self.contour:
contour_.export(outfile, level, namespace_, name_='contour', pretty_print=pretty_print)
if self.transformId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stransformId>%s</%stransformId>%s' % (namespace_, self.gds_format_integer(self.transformId, input_name='transformId'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'contour':
obj_ = contourType.factory()
obj_.build(child_)
self.contour.append(obj_)
obj_.original_tagname_ = 'contour'
elif nodeName_ == 'transformId':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'transformId')
self.transformId = ival_
# end class contourListType
class meshListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, mesh=None):
self.original_tagname_ = None
if mesh is None:
self.mesh = []
else:
self.mesh = mesh
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, meshListType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if meshListType.subclass:
return meshListType.subclass(*args_, **kwargs_)
else:
return meshListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_mesh(self): return self.mesh
def set_mesh(self, mesh): self.mesh = mesh
def add_mesh(self, value): self.mesh.append(value)
def insert_mesh_at(self, index, value): self.mesh.insert(index, value)
def replace_mesh_at(self, index, value): self.mesh[index] = value
def hasContent_(self):
if (
self.mesh
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='meshListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='meshListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='meshListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='meshListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='meshListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for mesh_ in self.mesh:
mesh_.export(outfile, level, namespace_, name_='mesh', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'mesh':
obj_ = meshType.factory()
obj_.build(child_)
self.mesh.append(obj_)
obj_.original_tagname_ = 'mesh'
# end class meshListType
class macromoleculeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None):
self.original_tagname_ = None
if id is None:
self.id = []
else:
self.id = id
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, macromoleculeType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if macromoleculeType.subclass:
return macromoleculeType.subclass(*args_, **kwargs_)
else:
return macromoleculeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def add_id(self, value): self.id.append(value)
def insert_id_at(self, index, value): self.id.insert(index, value)
def replace_id_at(self, index, value): self.id[index] = value
def hasContent_(self):
if (
self.id
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='macromoleculeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='macromoleculeType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='macromoleculeType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='macromoleculeType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='macromoleculeType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for id_ in self.id:
showIndent(outfile, level, pretty_print)
outfile.write('<%sid>%s</%sid>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(id_), input_name='id')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'id':
id_ = child_.text
id_ = self.gds_validate_string(id_, node, 'id')
self.id.append(id_)
# end class macromoleculeType
class complexType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None):
self.original_tagname_ = None
if id is None:
self.id = []
else:
self.id = id
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, complexType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if complexType.subclass:
return complexType.subclass(*args_, **kwargs_)
else:
return complexType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def add_id(self, value): self.id.append(value)
def insert_id_at(self, index, value): self.id.insert(index, value)
def replace_id_at(self, index, value): self.id[index] = value
def hasContent_(self):
if (
self.id
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='complexType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='complexType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='complexType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='complexType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='complexType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for id_ in self.id:
showIndent(outfile, level, pretty_print)
outfile.write('<%sid>%s</%sid>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(id_), input_name='id')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'id':
id_ = child_.text
id_ = self.gds_validate_string(id_, node, 'id')
self.id.append(id_)
# end class complexType
GDSClassesMapping = {
'P': polygonType,
'biologicalAnnotation': biologicalAnnotationType,
'boxSize': positiveIntegerVectorType,
'canonicalEulerAngles': canonicalEulerAnglesType,
'colour': colourType,
'complex': complexType,
'complexesAndMacromolecules': macromoleculesAndComplexesType,
'contour': contourType,
'contourList': contourListType,
'externalReferences': externalReferencesType,
'macromolecule': macromoleculeType,
'mesh': meshType,
'meshList': meshListType,
'offset': integerVectorType,
'p': floatVectorType,
'point': floatVectorType,
'polygonList': polygonListType,
'rgba': rgbaType,
'segment': segmentType,
'segmentList': segmentListType,
'shapePrimitiveList': shapePrimitiveListType,
'software': softwareType,
'subtomogramAverage': threeDVolumeType,
'threeDVolume': threeDVolumeType,
'transformList': transformListType,
'transformationMatrix': transformationMatrixType,
'v': vertexType,
'vertexList': vertexListType,
'viewVectorRotation': viewVectorRotationType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'segmentation'
rootClass = segmentation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'segmentation'
rootClass = segmentation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
parser = None
doc = parsexml_(StringIO(inString), parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'segmentation'
rootClass = segmentation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'segmentation'
rootClass = segmentation
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from emdb_sff import *\n\n')
sys.stdout.write('import emdb_sff as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"biologicalAnnotationType",
"canonicalEulerAnglesType",
"colourType",
"complexType",
"cone",
"contourListType",
"contourType",
"cuboid",
"cylinder",
"description",
"ellipsoid",
"externalReferencesType",
"floatVectorType",
"hsvType",
"integerVectorType",
"macromoleculeType",
"macromoleculesAndComplexesType",
"meshListType",
"meshPointType",
"meshType",
"polygonListType",
"polygonType",
"positiveIntegerVectorType",
"rgbaType",
"runLengthEncodingType",
"segmentListType",
"segmentType",
"segmentation",
"shapePrimitive",
"shapePrimitiveListType",
"softwareType",
"threeDVolumeType",
"transform",
"transformListType",
"transformationMatrixType",
"vertexListType",
"vertexType",
"viewVectorRotationType"
]
| [
"[email protected]"
] | |
b56ed8e751404aa8ba75b4dc5a993d49f79f45e3 | a7ab35ff204d7c4679ce8b1bf7da8fa363a7a708 | /algo_problems/q361-380/q375_topdown.py | 083a0579f37b3972b5767ffb94c88e4a5110a449 | [] | no_license | lin13k/practice | c68e7270be2694cb9737c35af8beb19db7e30d65 | c3efe1a5839c3ff1c320c0fcfc7b65a9462f7b52 | refs/heads/master | 2021-01-19T11:05:43.521468 | 2018-11-12T09:42:29 | 2018-11-12T09:42:29 | 87,928,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py |
# 1 - > 0
# 2 - > 1
# 3 - > 2
# 4 - > 2 + 3 - > 1 + 3
# 5 - > 3 + 4
# 6 - > 3 + 5
# 7 - > 4 + 6
# 8 - > 4 + 6 + 7 - > 4 + 5 + 7
# 9 - > 5 + 7 + 8 - > 5 + 6 + 8
import sys
class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
table = [[0 for i in range(n + 1)] for j in range(n + 1)]
return self.dp(table, 1, n)
def dp(self, table, s, e):
if s >= e:
return 0
if table[s][e] != 0:
return table[s][e]
result = sys.maxsize
for i in range(s, e + 1):
tmp = i + max(self.dp(table, s, i - 1), self.dp(table, i + 1, e))
result = min(result, tmp)
table[s][e] = result
return result
if __name__ == '__main__':
s = Solution()
print(s.getMoneyAmount(1))
print(s.getMoneyAmount(2))
print(s.getMoneyAmount(3))
print(s.getMoneyAmount(4))
print(s.getMoneyAmount(5))
print(s.getMoneyAmount(6))
print(s.getMoneyAmount(7))
print(s.getMoneyAmount(8))
print(s.getMoneyAmount(9))
| [
"[email protected]"
] | |
49a29165e17c706e65b2616e526b169e06874c63 | 46de08fb9b97e0df41c31157b4f1034941ffe14e | /app/expart_process.py | 790ef9f1c1d1cc8c2ef5f2440b20ce68b49fa009 | [] | no_license | 8secz-johndpope/Reinvent-Your-Look | d3b93c83bcac01603b58c77fd506ff81836086e9 | 530db294cc61073d41c727521721e45cb5c9dbc2 | refs/heads/master | 2021-09-19T08:56:04.165091 | 2018-07-26T01:43:24 | 2018-07-26T01:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py | from flask import render_template, redirect, url_for, request, session
from app import webapp
from app.config import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
import boto3
from app import swap, exchange_part, index
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
s3 = boto3.resource('s3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
s3up = boto3.client('s3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
@webapp.route('/exchange_part_process')
# Return file upload form
def exchange_part_process():
#if index.get_auth() is False:
# return redirect(url_for('frontpage'))
if 'authenticated' not in session:
return redirect(url_for('frontpage'))
username = session['username']
#username = index.get_username()
# urls = exchange_part.get_urls()
# exchange_part.set_urls(urls)
# print(exchange_part.get_urls())
urls = session['urls']
url = session['url']
#print(url)
return render_template("upload/expart_process.html", username=username, url=url, urls=urls)
@webapp.route('/image_exchange_part_process', methods=['POST'])
# Upload a new image and exchange it
def image_exchange_part_process():
if 'authenticated' not in session:
return redirect(url_for('frontpage'))
username = session['username']
# username = index.get_username()
# pic = exchange_part.get_pic()
urls = session['urls']
#urls = exchange_part.get_urls()
#urls = geturls()
#print(urls)
image4 = request.form.get('image4')
if image4 == urls[0]:
#s3.Bucket('a3test2').download_file('hc.jpg', 'app/static/hc.jpg')
f4 = 'app/static/bigxi.jpg'
if image4 == urls[1]:
#s3.Bucket('a3test2').download_file('hu.jpg', 'app/static/hu.jpg')
f4 = 'app/static/emilia.jpg'
if image4 == urls[2]:
#s3.Bucket('a3test2').download_file('kobe.jpg', 'app/static/kobe.jpg')
f4 = 'app/static/emma.jpg'
if image4 == urls[3]:
# s3.Bucket('a3test2').download_file('kobe.jpg', 'app/static/kobe.jpg')
f4 = 'app/static/hc.jpg'
if image4 == urls[4]:
f4 = 'app/static/hu.jpg'
if image4 == urls[5]:
f4 = 'app/static/justin.jpg'
if image4 == urls[6]:
f4 = 'app/static/kobe.jpg'
if image4 == urls[7]:
f4 = 'app/static/na.jpg'
if image4 == urls[8]:
f4 = 'app/static/pu.jpg'
if image4 == urls[9]:
f4 = 'app/static/taylor.jpg'
# save the picture into dictionory and bucket
part = request.form.get('part')
if part == 'eyes':
OVERLAY_POINTS = [LEFT_EYE_POINTS + RIGHT_EYE_POINTS, ]
partname = "eyes"
if part == 'brow':
OVERLAY_POINTS = [LEFT_BROW_POINTS + RIGHT_BROW_POINTS, ]
partname = "brow"
if part == 'nose':
OVERLAY_POINTS = [NOSE_POINTS, ]
partname = "nose"
if part == 'mouse':
OVERLAY_POINTS = [MOUTH_POINTS, ]
partname = "mouse"
pic = session['pic']
idpic = str(username + pic[11:-4] + f4[11:-4] + partname)
swap.main(image1=pic, image2=f4, username=username, OVERLAY_POINTS=OVERLAY_POINTS, part=partname)
session['pic'] = '/tmp/' + idpic + '.jpg'
# exchange_part.set_pic('/tmp/' + idpic + '.jpg')
return redirect(url_for('exchange_part_process'))
| [
"[email protected]"
] | |
3cea6275e8b81b5e9a783e808641dd5522fd0232 | a80afb70f5038247ebc3414f8762f7b6292903f1 | /rt1/general_functions.py | 1ccd2002697a226545e3b5cf6e691f0c23f78b92 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ndraeger/rt1 | 69aac6eef9cf9c7f3d2ee58271ba756aebad31c6 | 8cf30a3b3604b78b1422388e479b28c921d01c09 | refs/heads/master | 2023-01-05T18:39:43.933873 | 2020-10-28T13:06:00 | 2020-10-28T13:06:00 | 295,400,966 | 0 | 0 | Apache-2.0 | 2020-09-14T11:55:26 | 2020-09-14T11:55:25 | null | UTF-8 | Python | false | false | 10,631 | py | # -*- coding: utf-8 -*-
"""helper functions that are used both in rtfits and rtplots"""
import sys
import numpy as np
from itertools import tee, islice
from collections import OrderedDict
def rectangularize(array, return_mask=False, dim=None,
return_masked=False, dtype=None):
"""
return a rectangularized version of the input-array by repeating the
last value to obtain the smallest possible rectangular shape.
input:
- array = [[1,2,3], [1], [1,2]]
output:
- return_masked=False: [[1,2,3], [1,1,1], [1,2,2]]
- return_masked=True: [[1,2,3], [1,--,--], [1,2,--]]
Parameters
----------
array: list of lists
the input-data that is intended to be rectangularized
return_mask: bool (default = False)
indicator if weights and mask should be evaluated or not
dim: int (default = None)
the dimension of the rectangularized array
if None, the shortest length of all sub-lists will be used
return_masked: bool (default=False)
indicator if a masked-array should be returned
dtype: type (default = None)
the dtype of the returned array. If None, the dtype of the first
element will be used
Returns
-------
new_array: array-like
a rectangularized version of the input-array
mask: array-like (only if 'weights_and_mask' is True)
a mask indicating the added values
"""
# use this method to get the dtype of the first element since it works with
# pandas-Series, lists, arrays, dict-value views, etc.
if dtype is None:
dtype = np.array(next(islice(array, 1))).dtype
if dim is None:
# get longest dimension of sub-arrays
dim = len(max(array, key=len))
if return_mask is True or return_masked is True:
newarray = np.empty((len(array), dim), dtype=dtype)
mask = np.full((len(array), dim), False, dtype=bool)
for i, s in enumerate(array):
le = len(s)
newarray[i, :le] = s
newarray[i, le:] = s[-1]
mask[i, le:] = True
if return_masked is True:
return np.ma.masked_array(newarray, mask)
else:
return [newarray, mask]
else:
newarray = np.empty((len(array), dim), dtype=dtype)
for i, s in enumerate(array):
le = len(s)
newarray[i, :le] = s
newarray[i, le:] = s[-1]
return newarray
def meandatetime(datetimes):
"""
calculate the average date from a given list of datetime-objects
(can be applied to a pandas-Series via Series.apply(meandatetime))
Parameters
----------
datetimes: list
a list of datetime-objects
Returns
-------
meandate: Timestamp
"""
if len(datetimes) == 1:
return datetimes[0]
x = datetimes
deltas = (x[0] - x[1:])/len(x)
meandelta = sum(deltas)
meandate = x[0] - meandelta
return meandate
def dBsig0convert(val, inc,
dB, sig0,
fitdB, fitsig0):
"""
A convenience-function to convert an array of measurements (and it's
associated incidence-angles).
- between linear- and dB units `( val_dB = 10 * log10(val_linear) )`
- between sigma0 and intensity `( sig0 = 4 * pi * cos(inc) * I )`
Parameters
----------
val: array-like
the backscatter-values that should be converted
inc: array-like
the associated incidence-angle values (in radians)
dB: bool
indicator if the output-dataset should be in dB or not
sig0: bool
indicator if the output-values should be intensity or sigma_0
fitdB: bool
indicator if the input-values have been provided in linear-units
or in dB
fitsig0: bool
indicator if the input-values are given as sigma0 or intensity
Returns
-------
val : array-like
the converted values
"""
if sig0 is not fitsig0:
# if results are provided in dB convert them to linear units before
# applying the sig0-intensity conversion
if fitdB is True:
val = 10**(val/10.)
# convert sig0 to intensity
if sig0 is False and fitsig0 is True:
val = val/(4.*np.pi*np.cos(inc))
# convert intensity to sig0
if sig0 is True and fitsig0 is False:
val = 4.*np.pi*np.cos(inc)*val
# convert back to dB if required
if dB is True:
val = 10.*np.log10(val)
elif dB is not fitdB:
# if dB output is required, convert to dB
if dB is True and fitdB is False:
val = 10.*np.log10(val)
# if linear output is required, convert to linear units
if dB is False and fitdB is True:
val = 10**(val/10.)
return val
def pairwise(iterable, pairs=2):
"""
a generator to return n consecutive values from an iterable, e.g.:
pairs = 2
s -> (s0,s1), (s1,s2), (s2, s3), ...
pairs = 3
s -> (s0, s1, s2), (s1, s2, s3), (s2, s3, s4), ...
adapted from https://docs.python.org/3.7/library/itertools.html
"""
x = tee(iterable, pairs)
for n, n_iter in enumerate(x[1:]):
[next(n_iter, None) for i in range(n + 1)]
return zip(*x)
def split_into(iterable, sizes):
"""
a generator that splits the iterable into iterables with the given sizes
see more_itertools split_into for details:
https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.split_into
"""
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def scale(x, out_range=(0, 1),
domainfuncs=(np.nanmin, np.nanmax)):
"""
scale an array between out_range = (min, max) where the range of the
array is evaluated via the domainfuncs (min-function, max-funcion)
useful domainfuncs are:
>>> np.nanmin()
>>> np.nanmax()
>>> from itertools import partial
>>> partial(np.percentile, q=95)
Notice: using functions like np.percentile might result in values that
exceed the specified `out_range`! (e.g. if the out-range is (0,1),
a min-function of np.percentile(q=5) might result in negative values!)
"""
domain = domainfuncs[0](x), domainfuncs[1](x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] +
out_range[0]) / 2
def update_progress(progress, max_prog=100,
title="", finalmsg=" DONE\r\n",
progress2=None):
"""
print a progress-bar
adapted from: https://blender.stackexchange.com/a/30739
"""
length = 25 # the length of the progress bar
block = int(round(length*progress/max_prog))
if progress2 is not None:
msg = (f'\r{title} {"#"*block + "-"*(length-block)}' +
f' {progress} [{progress2}] / {max_prog}')
else:
msg = (f'\r{title} {"#"*block + "-"*(length-block)}' +
f' {progress} / {max_prog}')
if progress >= max_prog:
msg = f'\r{finalmsg:<79}\n'
sys.stdout.write(msg)
sys.stdout.flush()
def dt_to_hms(td):
"""
convert a datetime.timedelta object into days, hours,
minutes and seconds
"""
days, hours, minutes = td.days, td.seconds // 3600, td.seconds % 3600 // 60
seconds = td.seconds - hours*3600 - minutes*60
return days, hours, minutes, seconds
def groupby_unsorted(a, key=lambda x: x, sort=False, get=lambda x: x):
"""
group the elements of the input-array and return it as a dict with a list
of the found values. optionally use a key- and a get- function.
if sort is True, a OrderedDict with sorted keys will be returned
roughly equivalent to:
>>> # if only the input-array a is provided
... {unique value of a: [found copies of the unique value]}
... # if a and a key-function is provided
... {key(a) : [...values with the same key(a)...]}
... # if both a key- and a get-function is provided
... {key(a) : [get(x) for x in ...values with the same key(a)...]}
"""
# always use an OrderedDict to ensure sort-order for python < 3.6
d = OrderedDict()
for item in a:
d.setdefault(key(item), []).append(get(item))
if sort is True:
return OrderedDict(sorted(d.items()))
else:
return d
def interpolate_to_index(data, index, data_index=None, **interp1d_kwargs):
"""
A wrapper around scipy.interp1d to interpolate a dataset to a given index
Parameters
----------
data : list, array-like, pandas.Series or pandas.DataFrame
The input-data as list, array, pandas.Series or pandas.DataFrame
If the data is provided as pandas Series or DataFrame, the index
must support a method .to_julian_date() to convert the timestamps
into numerical values.
index : array-like
the index to which the dataset should be interpolated.
It must support a method .to_julian_date()
data_index : TYPE, optional
DESCRIPTION. The default is None.
**interp1d_kwargs :
additional keyword-arguments passed to scipy.interpolate.interp1d
the default is (fill_value=None, bounds_error=False)
Returns
-------
TYPE
DESCRIPTION.
"""
from pandas import Series, DataFrame
from scipy.interpolate import interp1d
kwargs = dict(fill_value=None, bounds_error=False)
kwargs.update(interp1d_kwargs)
if isinstance(data, Series):
# perform a linear interpolation to the auxiliary data timestamps
f = interp1d(data.index.to_julian_date(), data.values, **kwargs)
x = f(index.to_julian_date())
return Series(x, index)
elif isinstance(data, DataFrame):
f = interp1d(data.index.to_julian_date(), data.values, axis=0,
**kwargs)
x = f(index.to_julian_date())
return DataFrame(x, index, columns=data.columns)
elif isinstance(data, (list, np.ndarray)):
assert data_index is not None, ('you must provide "data_index"' +
'if data is provided as list or array')
f = interp1d(data_index.to_julian_date(), data.values, **kwargs)
x = f(index.to_julian_date())
return Series(x, index)
| [
"[email protected]"
] | |
5f29651ef08073862e157211e6fe9629fb7df746 | a8750439f200e4efc11715df797489f30e9828c6 | /Miscellaneous/merge_sort_inversion.py | dce584f2558a137d4b5602831310ba331d7ad8b2 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | def mergeSort(arr, array_size):
tmp = [0] * array_size
return _mergeSort(arr, tmp, 0, array_size - 1)
def _mergeSort(arr,tmp, left, right):
inv_count = 0
if right > left:
mid = (right + left) // 2
inv_count = _mergeSort(arr,tmp,left, mid)
inv_count += _mergeSort(arr,tmp,mid+1, right)
inv_count += merge(arr,tmp, left, mid+1, right)
print(inv_count)
return inv_count
def merge(arr,tmp, left, mid, right):
inv_count = 0
i = left
j = mid
k = left
while i <= mid - 1 and j <= right:
k += 1
if arr[i] <= arr[j]:
i += 1
tmp[k] = arr[i]
else:
j += 1
tmp[k] = arr[j]
inv_count += (mid - 1)
while i <= mid - 1:
k += 1
i += 1
tmp[k] = arr[i]
while j <= right:
k += 1
j += 1
tmp[k] = arr[j]
return inv_count
arr = [1, 20, 6, 4, 5]
mergeSort(arr,5)
| [
"[email protected]"
] | |
1e96f7e1f089f5f0ebbc8e035216173c31de6a1a | 28250e6b6a325d3c09d3828074bd9411887c892f | /object_oriented_prgms/first prmg.py | a7661b44f3ad5796c9141cd8e66e14889d2bd20d | [] | no_license | vidya3979/luminardjango | ed6f7353df139867093d33b57a4fcc6bb174bb54 | 083f83d82fae3737cb92709ec985a9b89a8598b8 | refs/heads/master | 2023-02-10T16:25:45.361026 | 2021-01-08T06:17:21 | 2021-01-08T06:17:21 | 315,545,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | class person:
def set_person(self,name,age,gender):
self.name=name
self.age=age
self.gender=gender
def print_person(self):
print("name=",self.name)
print("age=",self.age)
print("gender=",self.gender)
obj=person()
obj.set_person("ajay",25,"male")
obj1=person()
obj1.set_person("ravi",30,"male")
obj.print_person()
obj1.print_person()
| [
"[email protected]"
] | |
727ed139b54f7f87311aee11789b802bf5f9049c | efb1d5a4decb944ab377f186389f2b7b67e5943c | /Input Output/ac2015_serializacion/ac21.py | 4c6e22ab5927778d6f42f5c6b3e71936a66074fc | [] | no_license | lechodiman/IIC2233-Learning-Stuff | 978f86a8e771481c170d0a7c41b9ec118014b051 | f2b328d3a4015f7e2c7b132d3b21297ae4e7dbca | refs/heads/master | 2021-03-27T19:39:31.763665 | 2018-02-11T00:34:46 | 2018-02-11T00:34:46 | 120,977,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,885 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from PyQt5 import QtCore
from PyQt5.QtWidgets import (QDialog, QDialogButtonBox, QLabel, QLineEdit,
QVBoxLayout, QPushButton, QWidget, QHBoxLayout, QApplication)
import datetime
import pickle
import os
class Cliente:
def __init__(self, nombre, identificador, Gastado):
self.Nombre = nombre
self.ID = identificador
self.GastoAcumulado = Gastado
def __getstate__(self):
# Serialización
nueva = self.__dict__.copy()
nueva.update({"UltimaCompra": str(datetime.datetime)})
return nueva
def __setstate__(self, state):
# Deserialización
self.__dict__ = state
def actualizarGasto(self, gastado):
self.GastoAcumulado += gastado
class VentanaCajero(QDialog):
def __init__(self, parent=None, username=""):
super(VentanaCajero, self).__init__(parent)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(
QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self.clienteLabel = QLabel("Nombre cliente", self)
self.clienteText = QLineEdit(self)
self.idLabel = QLabel("RUT", self)
self.idText = QLineEdit(self)
self.gastadoLabel = QLabel("Gastado", self)
self.gastadoText = QLineEdit(self)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.addWidget(self.clienteLabel)
self.verticalLayout.addWidget(self.clienteText)
self.verticalLayout.addWidget(self.idLabel)
self.verticalLayout.addWidget(self.idText)
self.verticalLayout.addWidget(self.gastadoLabel)
self.verticalLayout.addWidget(self.gastadoText)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.serializarCliente)
self.buttonBox.rejected.connect(self.close)
def serializarCliente(self):
#####
ID_cliente = self.idText.text()
Archivo_cliente = str(ID_cliente) + ".walkcart"
# Verificamos si cliente existe en DB
if Archivo_cliente in os.listdir("ClientesDB"):
# Existe -> Abrimos, cambiamos y cerramos
with open("ClientesDB/" + Archivo_cliente, "rb") as file:
cliente_existente = pickle.load(file)
cliente_existente.actualizarGasto(int(self.gastadoText.text()))
with open("ClientesDB/" + Archivo_cliente, "wb") as file:
pickle.dump(cliente_existente, file)
else:
# No existe -> Creamos y cerramos
nuevo_cliente = Cliente(
self.clienteText.text(),
int(self.idText.text()),
int(self.gastadoText.text())
)
nuevo_archivo = "ClientesDB/" + str(nuevo_cliente.ID) + ".walkcart"
with open(nuevo_archivo, "wb") as file:
pickle.dump(nuevo_cliente)
#####
self.clienteText.setText("")
self.idText.setText("")
self.gastadoText.setText("")
class VentanaAdmin(QDialog):
def __init__(self, parent=None):
super(VentanaAdmin, self).__init__(parent)
self.archivoButton = QPushButton("TOP")
self.archivoButton.clicked.connect(self.generarArchivo)
self.cancelButton = QPushButton("Cancel")
self.cancelButton.clicked.connect(self.close)
self.horizontalLayout = QVBoxLayout(self)
self.horizontalLayout.addWidget(self.archivoButton)
self.horizontalLayout.addWidget(self.cancelButton)
def generarArchivo(self):
#####
# Tomamos una lista con todos los clientes
cliente_TOP = False
for file_ in os.listdir("ClientesDB"):
if file_.endswith(".walkcart"):
with open("ClientesDB/" + file_, "rb") as file:
nuevo_cliente = pickle.load(file)
if not cliente_TOP:
cliente_TOP = nuevo_cliente
else:
if cliente_TOP.GastoAcumulado <= nuevo_cliente:
cliente_TOP = nuevo_cliente
Top = cliente_TOP
# Serializamos
import json
with open("TOP.walkcart", "w") as file_:
json.dump(Top.__dict__, file_)
#####
class Input(QWidget):
def __init__(self, parent=None):
super(Input, self).__init__(parent)
self.userNameText = QLineEdit(self)
self.pushButtonWindow = QPushButton(self)
self.pushButtonWindow.setText("Iniciar Sesión")
self.pushButtonWindow.clicked.connect(self.on_pushButton_clicked)
self.layout = QHBoxLayout(self)
self.layout.addWidget(self.userNameText)
self.layout.addWidget(self.pushButtonWindow)
@QtCore.pyqtSlot()
def on_pushButton_clicked(self):
#####
# Obtenemos el input dado
Usuario = self.userNameText.text()
# Tomamos la lista de cajeros
with open("cajeros.walkcart", "rb") as file:
CajerosAutorizados = pickle.load(file)
print(CajerosAutorizados)
# Identificación de usuario
if Usuario == "WalkcartUnlimited":
# Interfaz para admin
self.ventanaUsuario = VentanaAdmin(self)
self.hide()
self.ventanaUsuario.show()
elif Usuario in CajerosAutorizados:
# Interfaz para cajero
self.ventanaUsuario = VentanaCajero(
self, username=self.userNameText.text())
self.hide()
self.ventanaUsuario.show()
#####
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
app.setApplicationName('Log-in WM')
main = Input()
main.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
d61ef4246552698da222d71d303dbb481973e586 | 4bfa419ad5beba3b66bd9bebdfa56cd2934f5330 | /basicforms/settings.py | 508b0b5b2ba54fc1db17f884b62627295b9f28cd | [] | no_license | pranay-prajapati/journal.github.io | 226e1f02b191e9179abb7cc01c4b39b4b7b5ec9c | 4f0c936e6d4395e484b962800631f824aa0ebd3c | refs/heads/master | 2022-12-23T10:07:57.388471 | 2020-09-29T10:04:13 | 2020-09-29T10:04:13 | 290,825,282 | 0 | 0 | null | 2020-09-29T10:04:14 | 2020-08-27T16:23:42 | Python | UTF-8 | Python | false | false | 3,337 | py | """
Django settings for basicforms project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR = os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rfgj!j=uq5b(zk#%ik8hz=873#d9_ia&cc3uk=$hq551rl3n+h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basicapp',
'crispy_forms',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'basicforms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'basicforms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| [
"[email protected]"
] | |
a411450a3bbeced98c395baa76aab1feb00394c1 | 6af96cf3c590a5418e87873e892fe704698c8ef8 | /25_string.py | 047ae2c540aaf7feffad090455079dada0b3c7d9 | [] | no_license | vikasjoshis001/Python-Course | f228ed362160831ee00c8498e679186463887982 | 40efa480b3b39b3abd1b2a0c6bad0af3db2ce205 | refs/heads/master | 2023-05-03T15:37:20.841229 | 2021-05-21T14:41:18 | 2021-05-21T14:41:18 | 283,190,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | s=input("Enter an string: ")
i=0
s1=""
for x in s:
if (s.index(x)==i):
s1+=x
i+=1
print(s1) | [
"[email protected]"
] | |
546d609ebd7eb846aed88664b0fc6b131d9e842b | c683adc4b5c2afd03cf8b1f8e00137fa5141dad2 | /vjezbanje_nizovi/zad_10.py | cd27378ca3c8fcbe98e0624233bc355525947d0b | [] | no_license | azunic/pyhton-tasks | a3656fe442eddf225756bf760e2be659986df66d | 13f5d9dedfbfb244b8c86db3990f24d155e575f5 | refs/heads/master | 2022-12-07T06:56:01.235940 | 2020-08-20T12:14:13 | 2020-08-20T12:14:13 | 286,251,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 17:14:46 2020
@author: Ana
"""
# Učitati niz A od n članova. I
#spisati najmanji od njih po apsolutnoj vrijednosti.
# Pri ovome koristiti prvi član niza.
niz = []
n = int(input("Unesite brok koliko zelite da vam bude niz:"))
for j in range(0,n):
broj = int(input("Unesite broj "))
niz.append(broj)
prviclan = niz[0]
for j in range(0, n):
if(prviclan > niz[j]):
prviclan = niz[j]
print(prviclan)
apsolutna = abs(prviclan)
print (apsolutna) | [
"[email protected]"
] | |
3d6fab8a882c37b7d3952e5946b3ba0c18c9699a | fb497e71d257a333a39776d092c2ad9c53d21cf5 | /door_and_motion/catarazzi.py | ce7bf57887777475562e3e764f37e28d5b45d5dc | [] | no_license | jovankaas/catarazzi | eabcecbc1e3b751560bb46f2596335378a497f70 | 4f26345c436635d45e686bdf39992273e3012fb9 | refs/heads/master | 2023-01-01T20:19:07.256182 | 2020-10-25T14:03:14 | 2020-10-25T14:03:14 | 258,998,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | ../doorsensor/catarazzi.py | [
"[email protected]"
] | |
6e7bfbc7632870831fc0c7a921c390f007845f75 | 6ba78cb76623157c19571e6b3d7875f6474f1719 | /flh-split-into-series-and-create-network.py | a200f11283f97b8bd9ee42f5dbb539e1c97195d0 | [] | no_license | FannieLouHamerPapers/code | 4be5f38147cd5bab7f059537e5d288be72121441 | 8d7dd816fc57eaa60e499d73f84e14c3dd9e339c | refs/heads/master | 2021-07-12T00:20:33.074532 | 2020-07-20T18:34:35 | 2020-07-20T18:34:35 | 183,095,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,920 | py | import pandas as pd
import networkx as nx
from networkx.algorithms import bipartite
fullset=pd.read_csv('https://raw.githubusercontent.com/FannieLouHamerPapers/NamedEntities/master/flh_ner_all.csv', low_memory=False)
Delta_Opportunities_Corporation_DOC_Series=fullset.loc[fullset.doc_title_full.str.contains('Delta Opportunities Corporation (D', regex=False)]
Freedom_Farms_Corporation_FFC_Series=fullset.loc[fullset.doc_title_full.str.contains('Freedom Farms Corporation (FFC) Series', regex=False)]
Other_Organization_Series_I=fullset.loc[fullset.doc_title_full.str.contains('Other Organization Series I:|Other Organization Series I,|Other Organization Series I ', regex=True)]
Other_Organization_Series_II=fullset.loc[fullset.doc_title_full.str.contains('Other Organization Series II:', regex=False)]
Other_Organizational_Series_I=fullset.loc[fullset.doc_title_full.str.contains('Other Organizational Series I:', regex=False)]
Other_Organizational_Series_II=fullset.loc[fullset.doc_title_full.str.contains('Other Organizational Series II:', regex=False)]
Other_Material=fullset.loc[fullset.doc_title_full.str.contains('Other Material:', regex=False)]
Mississippians_United_To_Elect_Negro_Candidates_Series=fullset.loc[fullset.doc_title_full.str.contains('ippians United To Elect Negro Candidates Series', regex=False)]
Delta_Ministry_Series=fullset.loc[fullset.doc_title_full.str.contains('Delta Ministry Series:', regex=False)]
Personal_Series=fullset.loc[fullset.doc_title_full.str.contains('Personal:|Personal Series Correspondence|Personal Series:', regex=True)]
Mississippi_Freedom_Democratic_Party_MFDP_Series=fullset.loc[fullset.doc_title_full.str.contains('Mississippi Freedom Democratic Party (M', regex=False)]
Delta_Opportunities_Corporation_DOC_Series.name='DeltaOpportunitiesCorporationDOCSeries'
Freedom_Farms_Corporation_FFC_Series.name='FreedomFarmsCorporationFFCSeries'
Mississippians_United_To_Elect_Negro_Candidates_Series.name='MississippiansUnitedToElectNegroCandidatesSeries'
Delta_Ministry_Series.name='DeltaMinistrySeries'
Personal_Series.name='PersonalSeries'
Mississippi_Freedom_Democratic_Party_MFDP_Series.name='MississippiFreedomDemocraticPartyMFDPSeries'
series_name = [Delta_Opportunities_Corporation_DOC_Series, Freedom_Farms_Corporation_FFC_Series, Mississippians_United_To_Elect_Negro_Candidates_Series, Delta_Ministry_Series, Personal_Series, Mississippi_Freedom_Democratic_Party_MFDP_Series]
all_freq=[5,10]
for series in series_name:
nodes_full=series['entity'].drop_duplicates()
#create empty multigraph - multigraph is an undirected graph with parallel edges
G = nx.MultiGraph()
#import edge dataframe and create network
G = nx.from_pandas_edgelist(series, source='doc', target='entity', edge_attr=True)
#project the graph onto entities, removing documents from the graph
full_graph = bipartite.weighted_projected_graph(G, nodes_full)
#convert the projected edge list
full_proj = nx.to_pandas_edgelist(full_graph)
#pull nodes list from the projected edge lists
nodes=full_proj['source'].append(full_proj['target']).drop_duplicates()
nodes_proj = pd.DataFrame({'id': nodes, 'label': nodes})
#print node & edge list for the full projected graph
full_proj.to_csv('flh_' + series.name +'_ner_all_proj_edges.csv', index=False)
nodes_proj.to_csv('flh_' + series.name +'_ner_all_proj_nodes.csv', index=False)
for x in all_freq:
#filter data
filtered_edges=full_proj.loc[full_proj.weight >= x]
nodes=filtered_edges['source'].append(filtered_edges['target']).drop_duplicates()
filtered_nodes = pd.DataFrame({'id': nodes, 'label': nodes})
#print csvs
filtered_edges.to_csv('flh_' + series.name +'_ner_all_proj_edges_freq'+ str(x) + '.csv', index=False)
filtered_nodes.to_csv('flh_' + series.name +'_ner_all_proj_nodes_freq'+ str(x) + '.csv', index=False)
| [
"[email protected]"
] | |
3a15b1c775acf52ab76e4a0976ea0acdd4d1b27b | 83d806103224ca9aac9b6f353b58490035e8344c | /Analysis/Graph.py | d144b917957114a0e12f569b314389cf2012f2f3 | [] | no_license | bumsubp/BPSpatial | 2693de0303ebee02ea38bd55e37daf5f282754ff | 157b0248dbd7234d6ae77e70c82ede2e12556ed0 | refs/heads/master | 2021-06-29T04:16:58.337779 | 2021-02-12T07:12:04 | 2021-02-12T07:12:04 | 216,970,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,516 | py | """
@author: bspark
"""
import networkx as nx
import math
import sympy as sp
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import BPSpatial.FileIO.RTree as rtree
class geometry():
"""
Class to get variety of basic information geometry
"""
@staticmethod
def lineSpec(node1, node2):
"""
Method for getting spec (slope, intercept, and distance)of a line between two input nodes
Parameters
----------
node1: tuple
(x, y) of node1
node2: tuple
(x, y) of node2
Returns
-------
m: float or True/False
slope of a line of node1 and node2 \n
True - parallel to y axis \n
False - paraller to x axis \n
b: float
intercept
d: float or -1
distance between node1 and node2 \n
-1 - when node1 and node2 are the same \n
"""
if node1 == node2:
m = 0
b = 0
d = -1
return m, b, d
elif node1[0] == node2[0]: # parallel to y axis
m = True
b = node1[0]
d = abs(node2[1] - node1[1])
return m, b, d
elif node1[1] == node2[1]: # parallel to x axis
m = False
b = node1[1]
d = abs(node2[0] - node1[0])
return m, b, d
else:
m = (node2[1] - node1[1]) / (node2[0] - node1[0])
b = (node1[1]) - m*node1[0]
d = geometry.lineLength(node1, node2)
return m, b, d
@staticmethod
def lineLength(node1, node2):
"""
Method for getting distance between two input nodes
Parameters
----------
node1: tuple
(x, y) of node1
node2: tuple
(x, y) of node2
Return
distance: float
"""
return ((node2[1] - node1[1])**2 + (node2[0] - node1[0])**2)**(1/2)
@staticmethod
def isPointOnLine(node1, node2, point):
"""
Method for check if a point is on a line of two input nodes
Parameters
----------
node1: tuple
(x, y) of node1
node2: tuple
(x, y) of node2
point: tuple
(x, y) of a point to check
Return
------
output: boolean
True, if a point is on the line of two nodes \n
False,otherwise
"""
m, b, d = geometry.lineSpec(node1, node2)
if d == -1: # if two nodes are the same
if node1 == point:
return True
else:
return False
else:
if m == True: # parallel to y axis
if point[0] == b and \
(((node1[1] <= point[1]) and (point[1] <= node2[1])) or\
((node2[1] <= point[1]) and (point[1] <= node1[1]))):
return True
else:
return False
elif m == False:
if point[1] == b and \
(((node1[0] <= point[0]) and (point[0] <= node2[0])) or\
((node2[0] <= point[0]) and (point[0] <= node1[0]))):
return True
else:
return False
else:
if(abs(point[1] - (m*point[0] + b)) < 0.05) and \
(((node1[0] <= point[0]) and (point[0] <= node2[0])) or\
((node2[0] <= point[0]) and (point[0] <= node1[0]))) and\
(((node1[1] <= point[1]) and (point[1] <= node2[1])) or\
((node2[1] <= point[1]) and (point[1] <= node1[1]))):
return True
else:
return False
@staticmethod
def findNearPointOnLine(node1, node2, point):
"""
Method for finding the nearest point on a straight infinite line form by
two nodes from a point. This cannot guarantee the output point is on
the line created by the two nodes.
Parameters
----------
node1: tuple
(x, y) of node1
node2: tuple
(x, y) of node2
point: tuple
(x, y) of a point to check
Return
------
x: float
x coordinate of the nearest point
y: float
x coordinate of the nearest point
"""
p=point[0]
q=point[1]
a=node1[0]
b=node1[1]
c=node2[0]
d=node2[1]
x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p
y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q
return x, y
@staticmethod
def findPointOnLine(node1, node2, distance):
"""
Method for finding a point aparting distance from node1
on the line of node1 and node2
Parameters
----------
node1: tuple
(x, y) of node1
node2: tuple
(x, y) of node2
distance: float
Return
------
xy: list
coordinate set of the ouput point on the line of the input nodes
"""
m, b, _ = geometry.lineSpec(node1, node2)
xy = []
if m == True: # parallel to y axis
xy.append(node1[0])
if node1[1] <= node2[1]:
xy.append(node1[1] + distance)
else:
xy.append(node1[1] - distance)
elif m == False: # parallel to x axis
if node1[0] <= node2[0]:
xy.append(node1[0] + distance)
else:
xy.append(node1[0] - distance)
xy.append(node1[1])
else:
x = sp.Symbol('x')
z = (x-node1[0])**2 + (m*x+b-node1[1])**2 - distance**2
xSolution = sp.solve(z, x)
for xSol in xSolution:
if (xSol >= node1[0] and xSol <= node2[0]) or (xSol <= node1[0] and xSol >= node2[0]):
xy.append(xSol)
xy.append(xSol*m + b)
return xy
class graphCalculate():
"""
Class to implementing a variety of calculation of networkx graph object
"""
@staticmethod
def addDistance(graph):
"""
Method for adding distance of edges in the input polyline graph
Parameters
----------
graph: networkx graph
polyine graph to add the distance of each edge
"""
distanceList = graphCalculate._calculateDistance(graph)
for dist, edge in zip(distanceList, graph.edges(data=True)):
edge[2]['distance'] = dist
@staticmethod
def addCenter(graph, decimals=6):
"""
Method for adding center point coordinate of edges in the input polyline graph
Parameters
----------
graph: networkx graph
polyine graph to add the center coordinates of each edge
"""
for edge in graph.edges(data=True):
prevVertex = None
distnaceToGo = (edge[2]['distance']) / 2
for ind, vertex in enumerate(edge[2]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
distance = geometry.lineLength(prevVertex, currVertex)
if distnaceToGo > distance:
distnaceToGo -= distance
else:
centerPoint = geometry.findPointOnLine(prevVertex, currVertex, distnaceToGo)
edge[2]['center'] = [round(centerPoint[0], decimals), round(centerPoint[1], decimals)]
break
prevVertex = currVertex
@staticmethod
def _calculateDistance(graph):
distanceList = []
for edge in graph.edges(data=True):
dist = 0
prevVertex = None
for ind, vertex in enumerate(edge[2]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
segLen = geometry.lineLength(prevVertex, currVertex)
dist+= segLen
prevVertex = currVertex
distanceList.append(dist)
return distanceList
class near():
"""
Class to find the nearest point from a point on a line
"""
@staticmethod
def pointOnPolyline(polylineGraph, pointGraph, criterion='', threshold=0):
"""
Method for finding the nearest point of individual point feature of
a point graph among a set of line features of polyline graph
Parameters
----------
polylineGraph: networkx graph
polyline graph in which a set of output points will be located
pointGraph: networkx graph
point graph including points to check
criterion: string
a common column name of point and line features
when finding the nearest point
threshold: float
searching distance threshold when finding the nearest point
Return
------
pntGraph: networkx graph
point graph of the nearest points
"""
pntGraph = pointGraph.copy()
idxLine, edgeIdCoordDict = rtree.rtree_polyline(polylineGraph)
for pnt in pntGraph.nodes(data=True):
nearPolylineId = list(idxLine.nearest(rtree.pointRect(pnt[0]), 3))
pointCandidateDict = {}
for edgeId in nearPolylineId:
edge = edgeIdCoordDict[edgeId] # key of polyline Graph
if criterion == '':
distToStartNode = geometry.lineLength(pnt[0], list(edge[0]))
distToEndNode = geometry.lineLength(pnt[0], list(edge[1]))
if threshold == 0:
pointCandidateDict[distToStartNode] = edge[0]
pointCandidateDict[distToEndNode] = edge[1]
else:
if distToStartNode <= threshold:
pointCandidateDict[distToStartNode] = edge[0]
if distToEndNode <= threshold:
pointCandidateDict[distToEndNode] = edge[1]
prevVertex = None
for ind, vertex in enumerate(polylineGraph.edge[edge[0]][edge[1]]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
nearX, nearY = geometry.findNearPointOnLine(prevVertex, currVertex, pnt[0])
nearXY = (nearX, nearY)
onLine = geometry.isPointOnLine(prevVertex, currVertex, nearXY)
if onLine == True:
distPointLine = geometry.lineLength(nearXY, pnt[0])
if threshold == 0:
pointCandidateDict[distPointLine] = nearXY
else:
if distPointLine <= threshold:
pointCandidateDict[distPointLine] = nearXY
prevVertex = currVertex
else: # if there is a criterion
if polylineGraph.edge[edge[0]][edge[1]][criterion] == pnt[1][criterion]:
distToStartNode = geometry.lineLength(pnt[0], list(edge[0]))
distToEndNode = geometry.lineLength(pnt[0], list(edge[1]))
if threshold == 0:
pointCandidateDict[distToStartNode] = edge[0]
pointCandidateDict[distToEndNode] = edge[1]
else:
if distToStartNode <= threshold:
pointCandidateDict[distToStartNode] = edge[0]
if distToEndNode <= threshold:
pointCandidateDict[distToEndNode] = edge[1]
prevVertex = None
for ind, vertex in enumerate(polylineGraph.edge[edge[0]][edge[1]]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
nearX, nearY = geometry.findNearPointOnLine(prevVertex, currVertex, pnt[0])
nearXY = (nearX, nearY)
onLine = geometry.isPointOnLine(prevVertex, currVertex, nearXY)
if onLine == True:
distPointLine = geometry.lineLength(nearXY, pnt[0])
if threshold == 0:
pointCandidateDict[distPointLine] = nearXY
else:
if distPointLine <= threshold:
pointCandidateDict[distPointLine] = nearXY
prevVertex = currVertex
# after looking through the entire edges, select the edge id with the minimum distance
if len(pointCandidateDict) != 0:
finalPnt = pointCandidateDict[min(sorted(pointCandidateDict))]
pntGraph.node[finalPnt] = pntGraph.node.pop(pnt[0])
else:
pntGraph.node.pop(pnt[0])
return pntGraph
class split():
"""
Class to implementing split process for polyline graph objects
"""
@staticmethod
def _findPointIter(startNode, endNode, distance):
d = geometry.lineLength(startNode, endNode)
if d < distance:
tempNode = endNode
remainD = d
iteration = False
else:
solution = geometry.findPointOnLine(startNode, endNode, distance)
tempNode = tuple(solution)
remainD = 0
iteration = True
return tempNode, remainD, iteration
@staticmethod
def _splitNodes(graph, distance, decimals=6):
splitNodesList = []
for edge in graph.edges(data=True):
# for one edge, find the new edges (sub-edges) and nodes based on the distance
nodeList = [] # node list on a edge list[tuple()]
prevVertex = None
currVertex = None
remainD = 0 # remaining distance to go
distance = distance
distanceToGo = distance
nodeList.append(edge[0]) # starts with the first node
# if the length of the edge is smaller than the distance, add the end node
# without any sub-node
edgeLen = geometry.lineLength(edge[0], edge[1])
# if the length of the edge is greater than the distance
if edgeLen >= distance:
# find the nodes every after the distance
for ind, vertex in enumerate(edge[2]['coordinates']):
iteration = True
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
iterationNum = 0
while iteration:
if iterationNum != 0:
distanceToGo = distance
distanceToGo -= remainD
tempPoint, remainD, iteration = split._findPointIter(prevVertex, currVertex, distanceToGo)
if tempPoint != currVertex:
nodeList.append((round(tempPoint[0], decimals), round(tempPoint[1], decimals)))
iterationNum+=1
prevVertex = tempPoint
nodeList.append(edge[1]) #endNode
splitNodesList.extend(nodeList)
return list(set(splitNodesList))
@staticmethod
def byDistance(graph, distance, edgeAttrs=[]):
"""
Method for spliting polyline object every length of the distance
Parameters
----------
graph: networkx graph
polyline type graph to split
distance: float
Return
------
splitGraph: network garph
split polyline type graph
"""
#==============================================================================
# fromNode, toNode, {Ind, coordinates, distance, center}
# fromNode (tuple): coordinate of start node
# toNode (tuple): coordinate of end node
# Ind (int): identifier
# coordinates (doubl list): a list of coordinates of vertices of the segment
# distnace (float): distance of the segment
# center (list): coordinate of center of the segment
#==============================================================================
splitGraph = nx.DiGraph()
nodeList = split._splitNodes(graph, distance)
splitGraph.add_nodes_from(nodeList)
attrsDict = {}
splitEdgeInd = 0
for edgeInd, edge in enumerate(graph.edges(data=True)):
vertexDict = {}
prevVertex = None
cumulDistance = 0
# for each eage, locate the nodes between the corrseponding vertices
for ind, vertex in enumerate(edge[2]['coordinates']):
if ind == 0:
prevVertex = vertex
# add the first vertex in the vertex list
vertexDict[cumulDistance] = prevVertex
else:
currVertex = vertex
# find the nodes which are between the prevVertex and currVertex
m, b, d = geometry.lineSpec(prevVertex, currVertex)
for node in nodeList:
if m == True: # parallel to y axis
if node[0] == b and \
(((prevVertex[1] <= node[1]) and (node[1] <= currVertex[1])) or\
((currVertex[1] <= node[1]) and (node[1] <= prevVertex[1]))):
# if the node is between the vertices,
# calculate the length between the prevVertex and the node
seglen = geometry.lineLength(prevVertex, node)
# add the node into the dictionary
vertexDict[seglen+cumulDistance] = node
elif m == False:
if node[1] == b and \
(((prevVertex[0] <= node[0]) and (node[0] <= currVertex[0])) or\
((currVertex[0] <= node[0]) and (node[0] <= prevVertex[0]))):
# if the node is between the vertices,
# calculate the length between the prevVertex and the node
seglen = geometry.lineLength(prevVertex, node)
# add the node into the dictionary
vertexDict[seglen+cumulDistance] = node
else:
if(abs(node[1] - (m*node[0] + b)) < 0.05) and \
(((prevVertex[0] <= node[0]) and (node[0] <= currVertex[0])) or\
((currVertex[0] <= node[0]) and (node[0] <= prevVertex[0]))) and\
(((prevVertex[1] <= node[1]) and (node[1] <= currVertex[1])) or\
((currVertex[1] <= node[1]) and (node[1] <= prevVertex[1]))):
# if the node is between the vertices,
# calculate the length between the prevVertex and the node
seglen = geometry.lineLength(prevVertex, node)
# add the node into the dictionary
vertexDict[seglen+cumulDistance] = node
cumulDistance += geometry.lineLength(prevVertex, currVertex)
vertexDict[cumulDistance] = currVertex
prevVertex = currVertex
orderedVertexList = []
for key in sorted(vertexDict.keys()):
orderedVertexList.append(list(vertexDict[key]))
startInd1 = 0
for ind1, vertex1 in enumerate(orderedVertexList):
# check whether vertex1 is in nodelist
endInd1 = 0
for splitNode in nodeList:
if tuple(vertex1) == splitNode:
endInd1 = ind1
coordList = orderedVertexList[startInd1:endInd1 + 1]
startVertex = tuple(orderedVertexList[startInd1])
endVertex = tuple(orderedVertexList[endInd1])
if startVertex != endVertex:
splitGraph.add_edge(startVertex, endVertex, Ind=splitEdgeInd, coordinates = coordList)
attrsDict[(startVertex, endVertex)] = {}
for edgeAttr in edgeAttrs:
attrsDict[(startVertex, endVertex)][edgeAttr] = edge[2][edgeAttr]
startInd1 = endInd1
splitEdgeInd+=1
# print(edgeAttrs)
# if edgeAttrs != []:
# for attr in edgeAttrs:
# print(attr)
# attrDict = {key1:val1[attr] for key1, val1 in attrsDict.items()}
# print(attrDict)
# nx.set_edge_attributes(splitGraph, attr, attrDict)
for edge in splitGraph.edges(data=True):
for attr in edgeAttrs:
edge[2][attr] = attrsDict[(edge[0], edge[1])][attr]
graphCalculate.addDistance(splitGraph)
graphCalculate.addCenter(splitGraph)
return splitGraph
@staticmethod
def splitLineAtPoint(lineGraph, pointGraph, nodeAttrs=[], decimals=6):
"""
Method for split a polyline type graph at individual points of input point list
Parameters
----------
lineGraph: networkx graph
polyline type graph to split
pointGraph: networkx graph
point type graph by which a line graph is split
nodeAttrs: nested list
list of attribute tuple of attribute name and its type in the pointGraph \n
[('ID', 'int'), ('Latitude', 'float')]
Return
------
lineGraphCopy: networkx graph
split polyline type graph based on a set of points
"""
lineGraphCopy = lineGraph.copy()
initConvert = {'str': '', 'int': -1, 'float': -1, 'bool':False}
for node in nodeAttrs:
nx.set_edge_attributes(lineGraphCopy, 'node1_{}'.format(node[0]), initConvert[node[1]])
nx.set_edge_attributes(lineGraphCopy, 'node2_{}'.format(node[0]), initConvert[node[1]])
attrs = set(list(lineGraphCopy.edges(data=True)[0][2].keys()))
attrs = attrs.difference(set(['Ind', 'coordinates']))
# remains = set(['Ind', 'coordinates'] + list(attrs))
# delAttrs = attrs.difference(remains)
#
# for edge in lineGraphCopy.edges(data=True):
# for delAttr in delAttrs:
# del edge[2][delAttr]
pointList = []
for point in pointGraph.nodes(data=True):
pointList.append([point[0][0], point[0][1]])
tempId = len(pointGraph.nodes(data=True))
# for every center point (splitFeatures)
for pointInd, point in enumerate(pointList):
# print('{}/{}'.format(pointInd, len(pointList)))
# find the edge that the center point located
done = False
for edge in lineGraphCopy.edges(data=True):
# among the segments in the edge
prevVertex = None
for ind, vertex in enumerate(edge[2]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
onLine = geometry.isPointOnLine(prevVertex, currVertex, point)
near_x,near_y = geometry.findNearPointOnLine(prevVertex, currVertex, point)
near_dist = geometry.lineLength((near_x, near_y), point)
# if the center point is on the segment
if onLine == True and near_dist < 0.0001:
coord1 = edge[2]['coordinates'][:ind]
coord1.append((near_x, near_y)) # add 'point' in the end of the list
coord2 = edge[2]['coordinates'][ind:]
coord2.insert(0, (near_x, near_y)) # add 'point' at the front of the list
# startNode1 = tuple(coord1[0])
startNode1 = tuple([round(coord, decimals) for coord in coord1[0]])
# endNode1 = tuple(coord1[-1])
endNode1 = tuple([round(coord, decimals) for coord in coord1[-1]])
# startNode2 = tuple(coord2[0])
startNode2 = tuple([round(coord, decimals) for coord in coord2[0]])
# endNode2 = tuple(coord2[-1])
endNode2 = tuple([round(coord, decimals) for coord in coord2[-1]])
attrsDict = {}
attrsDict['Ind'] = tempId
attrsDict['coordinates'] = coord1
for attr in attrs:
attrsDict[attr] = edge[2][attr]
for nodeAttr in nodeAttrs:
key1 = tuple(coord1[0])
# key1 = (round(coord1[0][0], decimals), round(coord1[0][1], decimals))
if key1 in pointGraph.nodes():
attrsDict['node1_{}'.format(nodeAttr[0])] = pointGraph.node[key1][nodeAttr[0]]
key2 = tuple(coord1[-1])
# key2 = (round(coord1[-1][0], decimals), round(coord1[-1][1], decimals))
if key2 in pointGraph.nodes():
attrsDict['node2_{}'.format(nodeAttr[0])] = pointGraph.node[key2][nodeAttr[0]]
if startNode1 != endNode1:
lineGraphCopy.add_edge(startNode1, endNode1, attrsDict)
tempId+=1
attrsDict['Ind'] = tempId
attrsDict['coordinates'] = coord2
for nodeAttr in nodeAttrs:
key1 = tuple(coord2[0])
# key1 = (round(coord2[0][0], decimals), round(coord2[0][1], decimals))
if key1 in pointGraph.nodes():
attrsDict['node1_{}'.format(nodeAttr[0])] = pointGraph.node[key1][nodeAttr[0]]
key2 = tuple(coord2[-1])
# key2 = (round(coord2[-1][0], decimals), round(coord2[-1][1], decimals))
if key2 in pointGraph.nodes():
attrsDict['node2_{}'.format(nodeAttr[0])] = pointGraph.node[key2][nodeAttr[0]]
if startNode2 != endNode2:
lineGraphCopy.add_edge(startNode2, endNode2, attrsDict)
tempId+=1
# remove the original edge
e = (edge[0], edge[1], {'Ind':edge[2]['Ind']})
lineGraphCopy.remove_edge(*e[:2])
prevVertex = currVertex
done = True
break
prevVertex = currVertex
if done == True:
break
graphCalculate.addDistance(lineGraphCopy)
return lineGraphCopy
@staticmethod
def splitLineAtPoint_backup(lineGraph, pointGraph, decimals=6):
"""
Method for split a polyline type graph at individual points of input point list
Parameters
----------
lineGraph: networkx graph
polyline type graph to split
pointGraph: networkx graph
point type graph by which a line graph is split
Return
------
lineGraphCopy: networkx graph
split polyline type graph based on a set of points
"""
lineGraphCopy = lineGraph.copy()
attrs = set(list(lineGraphCopy.edges(data=True)[0][2].keys()))
remains = set(['Ind', 'coordinates'] + list(attrs))
delAttrs = attrs.difference(remains)
for edge in lineGraphCopy.edges(data=True):
for delAttr in delAttrs:
del edge[2][delAttr]
pointList = []
for point in pointGraph.nodes(data=True):
pointList.append([point[0][0], point[0][1]])
tempId = len(pointGraph.nodes(data=True))
# for every center point (splitFeatures)
for pointInd, point in enumerate(pointList):
print('{}/{}'.format(pointInd, len(pointList)))
# find the edge that the center point located
done = False
for edge in lineGraphCopy.edges(data=True):
# among the segments in the edge
prevVertex = None
for ind, vertex in enumerate(edge[2]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
onLine = geometry.isPointOnLine(prevVertex, currVertex, point)
# if the center point is on the segment
if onLine == True:
coord1 = edge[2]['coordinates'][:ind]
coord1.append(point) # add 'point' in the end of the list
coord2 = edge[2]['coordinates'][ind:]
coord2.insert(0, point) # add 'point' at the front of the list
# startNode1 = tuple(coord1[0])
startNode1 = tuple([round(coord, decimals) for coord in coord1[0]])
# endNode1 = tuple(coord1[-1])
endNode1 = tuple([round(coord, decimals) for coord in coord1[-1]])
# startNode2 = tuple(coord2[0])
startNode2 = tuple([round(coord, decimals) for coord in coord2[0]])
# endNode2 = tuple(coord2[-1])
endNode2 = tuple([round(coord, decimals) for coord in coord2[-1]])
attrsDict = {}
attrsDict['Ind'] = tempId
attrsDict['coordinates'] = coord1
attrs = attrs.difference(set(['Ind', 'coordinates']))
for attr in attrs:
attrsDict[attr] = edge[2][attr]
lineGraphCopy.add_edge(startNode1, endNode1, attrsDict)
tempId+=1
attrsDict['Ind'] = tempId
attrsDict['coordinates'] = coord2
lineGraphCopy.add_edge(startNode2, endNode2, attrsDict)
tempId+=1
# remove the original edge
e = (edge[0], edge[1], {'Ind':edge[2]['Ind']})
lineGraphCopy.remove_edge(*e[:2])
prevVertex = currVertex
done = True
break
prevVertex = currVertex
if done == True:
break
graphCalculate.addDistance(lineGraphCopy)
return lineGraphCopy
class intersect():
"""
Class to implement intersect analysis
"""
@staticmethod
def _intersections(graph):
intersectSet = set()
uniqueSet = set()
edges = graph.edges(data=True)
for edge in edges:
for vertex in edge[2]['coordinates']:
vertex = tuple(vertex)
if vertex not in uniqueSet:
uniqueSet.add(vertex)
else:
intersectSet.add(vertex)
return intersectSet
@staticmethod
def byGeometry(graph, attrs = [], field=('','')):
"""
Method for implementing intersect with polyline type graph.
The features are recreated based on the geometry (vertices)
Parameters
----------
graph: networkx graph
polyline type input graph
attrs: list of string
attributes to add to a graph after intersection
field: tuple
when implement the intersection by a specific attribute,
set this variable, e.g. (field name, value)
Return
------
outputGraph: networkx graph
polyline type graph which split by intersected vertices
"""
edgeId = 0 # for edge's id
outputGraph = nx.DiGraph() # output : networkx graph
intersectedNodes = intersect._intersections(graph)
# preparation for setting attributes
attrDict = {}
for attr in attrs:
attrDict[attr] = {}
## for each edge, check if any of intersected points are on the edge
# if there is a rule to implement the intersect function using a data field
if field == ('', ''):
edges = [edge for edge in graph.edges(data=True)]
else:
edges = []
not_consdiered_edges = []
for edge in graph.edges(data=True):
if edge[2][field[0]] == field[1]:
edges.append(edge)
else:
not_consdiered_edges.append(edge)
# add edges that not considered
for n_edge in not_consdiered_edges:
outputGraph.add_edge(n_edge[0], n_edge[1], Ind= edgeId, \
coordinates = n_edge[2]['coordinates'])
for attr in attrs:
attrDict[attr][n_edge[0], n_edge[1]] = n_edge[2][attr]
edgeId+=1
# use edges that considered
for edge in edges:
intersectedNodeDict = {}
for iNode in intersectedNodes:
# excludes the two end points
for ind, vertex in enumerate(edge[2]['coordinates'][:-1]):
if ind == 0:
pass
else:
if tuple(vertex) == iNode:
intersectedNodeDict[ind] = vertex
coordinatesList = []
coordinatesList.append(list(edge[0]))
for key in sorted(intersectedNodeDict.keys()):
coordinatesList.append(intersectedNodeDict[key])
coordinatesList.append(list(edge[1]))
startVertex = None
for ind, coord in enumerate(coordinatesList):
outputGraph.add_node(tuple(coord))
if ind == 0:
startVertex = coord
else:
endVertex = coord
startInd = edge[2]['coordinates'].index(startVertex)
endInd = edge[2]['coordinates'].index(endVertex)
vertexList = None
if edge[2]['coordinates'][startInd:endInd+1] == []:
vertexList = edge[2]['coordinates'][endInd:startInd+1]
else:
vertexList = edge[2]['coordinates'][startInd:endInd+1]
# add edge
outputGraph.add_edge(tuple(startVertex), tuple(endVertex),\
Ind= edgeId, coordinates= vertexList)
# add attributes
for attr in attrs:
attrDict[attr][tuple(startVertex), tuple(endVertex)] \
=edge[2][attr]
startVertex = endVertex
edgeId +=1
for attr in attrs:
nx.set_edge_attributes(outputGraph, attr, attrDict[attr])
graphCalculate.addDistance(outputGraph)
return outputGraph
class spatialjoin():
"""
Class to implement spatialjoin analysis between point and line features
"""
@staticmethod
def _spatialjoin(pntGraph, lineGraph, criterion, threshold):
# init spatialJoinDict showing the edge id matched to each point
# ind - point id, value - edge id
spatialJoinDict = {}
pointGraph = pntGraph.copy()
# pointGraph = nx.DiGraph()
# # create a point graph
# for ind, node in enumerate(pntGraph.nodes(data=True)):
# pointGraph.add_node(node[0], Ind=node[1]['Ind'])
# init 'matNumDict' showing the number of points matched to each edge
# ind - edge id, value - 0 (initial)
matNumDict = {}
for edge in lineGraph.edges(data=True):
matNumDict[edge[2]['Ind']] = 0
idxLine, edgeIdCoordDict = rtree.rtree_polyline(lineGraph)
# match each point to an edge
# first iteration - points
for point in pointGraph.nodes(data=True):
p = point[0][0]
q = point[0][1]
nearPolylineId = list(idxLine.nearest(rtree.pointRect(point[0]), 3))
# second iteration - edge
edgeCandidateDict={}
for edgeId in nearPolylineId:
edge = edgeIdCoordDict[edgeId]
if criterion == '':
# for one point, add the distance to start node and end node to the candidateDict
distToStartNode = geometry.lineLength(point[0], list(edge[0]))
distToEndNode = geometry.lineLength(point[0], list(edge[1]))
if threshold == 0:
edgeCandidateDict[distToStartNode] = edgeId
edgeCandidateDict[distToEndNode] = edgeId
else:
if distToStartNode <= threshold:
edgeCandidateDict[distToStartNode] = edgeId
if distToEndNode <= threshold:
edgeCandidateDict[distToEndNode] = edgeId
prevVertex = None
for ind, vertex in enumerate(lineGraph.edge[edge[0]][edge[1]]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
a = prevVertex[0]
b = prevVertex[1]
c = currVertex[0]
d = currVertex[1]
# find the point on the edge which closest from the point p and q
x, y = geometry.findNearPointOnLine((a,b), (c,d), (p,q))
# chech if the x and y are on the edge
# if true, add it into the dictionary key = distance, value = edge id
onLine = geometry.isPointOnLine(prevVertex, currVertex, [x,y])
if onLine == True:
distPointLine = geometry.lineLength(point[0], [x,y])
if threshold == 0:
edgeCandidateDict[distPointLine] = edgeId
else:
if distPointLine <= threshold:
edgeCandidateDict[distPointLine] = edgeId
prevVertex = currVertex
else: # if there is a criterion
if lineGraph.edge[edge[0]][edge[1]][criterion] == point[1][criterion]:
# for one point, add the distance to start node and end node to the candidateDict
distToStartNode = geometry.lineLength(point[0], list(edge[0]))
distToEndNode = geometry.lineLength(point[0], list(edge[1]))
if threshold == 0:
edgeCandidateDict[distToStartNode] = edgeId
edgeCandidateDict[distToEndNode] = edgeId
else:
if distToStartNode <= threshold:
edgeCandidateDict[distToStartNode] = edgeId
if distToEndNode <= threshold:
edgeCandidateDict[distToEndNode] = edgeId
prevVertex = None
for ind, vertex in enumerate(lineGraph.edge[edge[0]][edge[1]]['coordinates']):
if ind == 0:
prevVertex = vertex
else:
currVertex = vertex
a = prevVertex[0]
b = prevVertex[1]
c = currVertex[0]
d = currVertex[1]
# find the point on the edge which closest from the point p and q
x, y = geometry.findNearPointOnLine((a,b), (c,d), (p,q))
# chech if the x and y are on the edge
# if true, add it into the dictionary key = distance, value = edge id
onLine = geometry.isPointOnLine(prevVertex, currVertex, [x,y])
if onLine == True:
distPointLine = geometry.lineLength(point[0], [x,y])
if threshold == 0:
edgeCandidateDict[distPointLine] = edgeId
else:
if distPointLine <= threshold:
edgeCandidateDict[distPointLine] = edgeId
prevVertex = currVertex
# after looking through the entire edges, select the edge id with the minimum distance
if len(edgeCandidateDict) != 0:
finalEdgeId = edgeCandidateDict[min(sorted(edgeCandidateDict))]
spatialJoinDict[point[1]['Ind']] = finalEdgeId
matNumDict[finalEdgeId] += 1
else:
finalEdgeId = -1
spatialJoinDict[point[1]['Ind']] = finalEdgeId
# assign the point to the corresponding edge (edge count+=1)
return matNumDict, spatialJoinDict
@staticmethod
def nearest(pntGraph, lineGraph, criterion='', threshold=0):
"""
Method for finding the nearest polyline feature and assign its id to the point feature
Parameters
----------
pntGraph: networkx graph
point type graph where the id of the nearest edge to be added
lineGraph: networkx garph
polyline type graph whose id's are searched
criterion: string
a common column name of point and line features
when finding the nearest point
threshold: float
searching radius
"""
_, spatialJoinDict = spatialjoin._spatialjoin(pntGraph, lineGraph, criterion, threshold)
for point in pntGraph.nodes(data=True):
point[1]['nearEdge'] = spatialJoinDict[point[1]['Ind']]
print('The Ind of the nearest polyline is added to the POINT type graph.')
@staticmethod
def joincount(pntGraph, lineGraph, criterion='', threshold=0):
"""
Method for calculating the number of points which assigned to each polyline object in the spatial join process
Parameters
----------
pntGraph: networkx graph
point type graph where the id of the nearest edge to be added
lineGraph: networkx garph
polyline type graph whose id's are searched
criterion: string
a common column name of point and line features
when finding the nearest point
threshold: float
searching radius
"""
matNumDict, _ = spatialjoin._spatialjoin(pntGraph, lineGraph, criterion, threshold)
for edge in lineGraph.edges(data=True):
edge[2]['joinCount'] = matNumDict[edge[2]['Ind']]
print('The join count is added to the POLYLINE type graph.')
class interpolate():
"""
Class to implement interpolate analysis.
Calculate a density value based on \n
1) a kernel function, \n
http://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/an-overview-of-the-interpolation-tools.htm \n
http://connor-johnson.com/2014/03/20/simple-kriging-in-python/ \n
2) network distance - centerGraph > distance, \n
3) the number of events on the source lixel - splitGraph > pointCount
"""
@staticmethod
def _gaussian(bandwidth, d):
return 1/math.sqrt(2*math.pi)*math.exp((-1/2)*(d/bandwidth)**2)
@staticmethod
def _initiaize(lixelGraph, lxcenterGraph):
# create lookup dictionary key:value = center :(startNode, endNode, joincount)
lookupDict={}
for edge in lixelGraph.edges(data=True):
lookupDict[tuple(edge[2]['center'])] = (edge[0], edge[1], edge[2]['joinCount'])
# neighborsDict key:center point, value: number of neighbors
neighborsDict = {}
for node in lxcenterGraph.nodes():
neighbor = len(lxcenterGraph.neighbors(node))
if neighbor == 1:
neighborsDict[node] = neighbor + 1
else:
neighborsDict[node] = neighbor
return lookupDict, neighborsDict
@staticmethod
def graphKDE(lixelGraph, lxcenterGraph, bandwidth, kernel = 'gaussian', diverge=True):
"""
Method for calculating Kernel Density Estimate using lixel graph and lixel center graph
Parameters
----------
lixelGraph: networkx DiGraph
Polyline graph of split edges with join count of events
lxcenterGraph: networkx DiGraph(Polyline)
Polyline graph split by center points
Return
------
kernelDensity: list(tuple(), double)
list of kernel density estimate [tuple(center), kde]
"""
lookupDict, neighborsDict = interpolate._initiaize(lixelGraph, lxcenterGraph)
kernelDensity = []
for edge in lixelGraph.edges(data=True):
# extract center
center = edge[2]['center']
# find the shortest path for all nodes
pred, dist = nx.shortest_paths.weighted.dijkstra_predecessor_and_distance(lxcenterGraph, tuple(center), weight = 'distance')
# find neighbors within the bandwidth
neighbor = None
sortedDist = sorted(dist.items(), key=lambda x: x[1])
for ind, (key, val) in enumerate(sortedDist):
if val > bandwidth:
neighbor = sortedDist[:ind+1]
break
if ind == len(sortedDist) - 1 and val <= bandwidth:
neighbor = sortedDist
kd = 0
for nb in neighbor:
divergeWeight = 1
if diverge == True:
endNode = nb[0]
currNode = endNode
predNode = None
while True:
predNode = pred[tuple(currNode)]
if predNode != []:
divergeWeight *= neighborsDict[predNode[0]] - 1
currNode = predNode[0]
else:
break
# calculate kernel density for the sample
# neighbor - tuple(center, distance)
try:
# joincount (number of events) based on the center
jc = lookupDict[nb[0]][2]
except KeyError:
pass
else:
d = nb[1]
if kernel == 'gaussian':
kd += jc*interpolate._gaussian(bandwidth,d)/divergeWeight
kernelDensity.append((tuple(center), kd))
return kernelDensity
| [
"[email protected]"
] | |
b3bb6678f2f4c067fa070a410a4fc01f5e5e0f48 | 44c47006a6e4375caa57f4c374982c9903194193 | /week13/hh-back/api/views.py | c370c2076135c25da58df5f9a89336e25295c165 | [] | no_license | akosya-oraz/webka | ffa6fb7f2f788472410ea026340d8c1178fa2fef | 442476ab46f24984c70f7287094d39a3cc4e893d | refs/heads/master | 2023-01-24T09:36:38.301321 | 2020-04-22T19:10:51 | 2020-04-22T19:10:51 | 237,117,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,336 | py | from django.http.response import JsonResponse
from api.models import Vacancy, Company
from api.serializers import CompanySerializer, VacancySerializer
from rest_framework.response import Response
from rest_framework.decorators import api_view
import json
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
@permission_classes(IsAuthenticated, )
@api_view(['GET', 'POST'])
def companies(request):
if request.method == 'GET':
companies = Company.objects.all()
serializer = CompanySerializer(companies, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
serializer = CompanySerializer(data=json.loads(request.body))
if serializer.is_valid():
serializer.save()
return Response({"message": "New company created!"}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET', 'PUT', 'DELETE'])
def company(request, id):
if request.method == 'GET':
try:
company = Company.objects.get(id=id)
serializer = CompanySerializer(company)
return JsonResponse(serializer.data, safe=False)
except:
return JsonResponse({"error": "No company"})
elif request.method == 'PUT':
try:
company = Company.objects.get(id=id)
except:
return JsonResponse({'error': 'no company with that id'})
serializer = CompanySerializer(instance=company, data=json.loads(request.body))
if serializer.is_valid():
serializer.save()
return Response({'message': 'updated successfully'}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
elif request.method == 'DELETE':
try:
company = Company.objects.get(id=id)
except:
return JsonResponse({'error': 'no company with that id'})
company.delete()
return Response({'msg': 'deleted succesfully'}, status=status.HTTP_201_CREATED)
@api_view(['GET'])
def vacancies_by_company(request, id):
if request.method == 'GET':
company = Company.objects.get(id=id)
vacancies = company.vacancy_set.all()
serializer = VacancySerializer(vacancies, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class VacancyViews(APIView):
def get(self, request):
try:
vacancies = Vacancy.objects.all()
serializer = VacancySerializer(vacancies, many=True)
return JsonResponse(serializer.data, safe=False)
except:
return JsonResponse({"message": "no data"}, safe=False)
def post(self, request):
serializer = VacancySerializer(data=json.loads(request.body))
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response({'Error': serializer.errors})
class VacancyDView(APIView):
def get(self, request, id):
try:
vacancy = Vacancy.objects.get(id=id)
except:
return Response({'Errror': 'no vacancy'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
serializer = VacancySerializer(vacancy)
return JsonResponse(serializer.data, safe=False)
def put(self, request, id):
try:
vacancy = Vacancy.objects.get(id=id)
except:
return Response({'Errror': 'no vacancy'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
serializer = VacancySerializer(instance=vacancy, data=json.loads(request.body))
if serializer.is_valid():
serializer.save()
return Response({"message": "updated succesfully"})
return Response({"error": serializer.errors})
def delete(self, request, id):
try:
vacancy = Vacancy.objects.get(id=id)
except:
return Response({'Errror': 'no vacancy'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
vacancy.delete()
return Response({"message": "deleted succesfully"}) | [
"[email protected]"
] | |
1ab48f13edcbee28e40014c1f71cf0a7dcce4b2f | 1f75435873986f9496dd96baa3824c99e1c2e707 | /MicroBlog/spiders/weiboCrawl.py | fabb3010c54cf2806e512e409da2a7161d64a7ca | [] | no_license | Ynglylife/Microblog | e8e8893d6294069280569ad950f1ece56ba58184 | 13b02e251043dad5397db815e25d96a2b4dc7662 | refs/heads/master | 2020-04-18T06:39:22.346940 | 2019-01-24T09:01:38 | 2019-01-24T09:01:38 | 167,330,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | from scrapy.spider import Spider,Request
from MicroBlog.items import MicroblogItem
from scrapy.conf import settings
from bs4 import BeautifulSoup
import os.path
import time,datetime
import json
import re
import csv
base_usrinfo_url = 'https://m.weibo.cn/api/container/getIndex?uid=%s&containerid=100505%s'
base_usr_mblogs_url = 'https://m.weibo.cn/api/container/getIndex?uid=%s&containerid=%s&page=%s'
base_mblog_url = 'https://m.weibo.cn/status/%s'
base_comments_url = "https://m.weibo.cn/api/comments/show?id=%s&page=%s"
base_reposts_url = 'https://m.weibo.cn/api/statuses/repostTimeline?id=%s&page=%s'
class WeiboCrawl(Spider):
name = 'weiboCrawl'
def __init__(self,mblog_id=None,*args,**kwargs):
super(WeiboCrawl, self).__init__(*args,**kwargs)
self.mblog_url = base_mblog_url % (mblog_id)
def start_requests(self):
yield Request(self.mblog_url,callback=self.parse)
def parse(self, response):
pattern = r'var \$render_data = \[((.|\s)*?})\]'
raw_data = re.search(pattern,response.text)
raw_data = raw_data.group(1)
json_data = json.loads(raw_data)
status = json_data['status']
try:
is_retweeted = status['retweeted_status']
mblog_id = is_retweeted['id']
mblog_url = base_mblog_url % (mblog_id)
return Request(mblog_url, callback=self.parse)
except:
pass
pattern = r'var \$render_data = \[((.|\s)*?})\]'
raw_data = re.search(pattern, response.text)
raw_data = raw_data.group(1)
json_data = json.loads(raw_data)
status = json_data['status']
items = self.putItem(status)
yield items
if status['reposts_count']:
reposts_url = base_reposts_url % (status['id'],str(1))
yield Request(reposts_url,callback=self.getReposters)
def putItem(self,status):
user_info = status['user']
items = MicroblogItem()
items['mblog_id'] = status['id']
items['mblog_created_at'] = status['created_at']
items['mblog_reposts_count'] = status['reposts_count']
items['mblog_comments_count'] = status['comments_count']
items['mblog_attitudes_count'] = status['attitudes_count']
try:
items['mblog_raw_text'] = status['raw_text']
except:
items['mblog_raw_text'] = status['text']
try:
items['mblog_pid'] = status['pid']
except:
items['mblog_pid'] = None
try:
items['mblog_oid'] = status['retweeted_status']['id']
except:
items['mblog_oid'] = status['id']
items['usr_id'] = user_info['id']
items['usr_gender'] = user_info['gender']
items['usr_name'] = user_info['screen_name']
items['usr_blog_count'] = user_info['statuses_count']
items['usr_followers_count'] = user_info['followers_count']
items['usr_follow_count'] = user_info['follow_count']
print(items)
return items
def getReposters(self,response):
json_data = json.loads(response.text)
data = json_data['data']
max = data['max']
reposts_data = data['data']
pattern = 'page=(\d+)'
result = re.search(pattern,response.url)
page_id = result.group(1)
for item in reposts_data:
items = self.putItem(item)
yield items
if int(page_id) < int(max):
reposts_url = re.sub(pattern,'page='+str(int(page_id)+1),response.url)
yield Request(reposts_url,callback=self.getReposters)
| [
"[email protected]"
] | |
fc1b30113008ca57da9b812c530213bac3d8dbb2 | 83af0fab306c8e5729a9f855f96664a751ccb033 | /BrainFuck_Interpreter.py | ead29b9cae8f5ae746776926a08b4c435fc90247 | [] | no_license | aizjForever/brainfuck_interpreter | 89c11c9f2de7b51f21fc1a39e050bed907bc1d51 | 1b953f35b57df956719391ee60ee235b4bc2fa08 | refs/heads/master | 2020-04-01T12:21:04.532915 | 2018-10-16T01:15:06 | 2018-10-16T01:15:06 | 153,202,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,895 | py | import sys
def readFile(path):
with open(path, "rt") as f:
return f.read()
class Brainfuck(object):
def __init__(self,code,bits = 8,enable_precheck = False):
def pre_processing(code):
res = ""
for c in code:
if c in "+-><[].,":
res += c
return res
self.code = pre_processing(code)
self.pointer = 0
self.data_array = dict()
self.output = []
self.num_brackets = 0
self.enable_precheck = enable_precheck
self.bits = bits
def run(self):
def plus(self):
if self.pointer not in self.data_array:
self.data_array[self.pointer] = 0
if (self.data_array[self.pointer] == 2 ** self.bits / 2 - 1):
self.data_array[self.pointer] = -1 * 2 ** self.bits / 2
else:
self.data_array[self.pointer] += 1
def minus(self):
if self.pointer not in self.data_array:
self.data_array[self.pointer] = 0
if (self.data_array[self.pointer] == -1 * 2 ** self.bits / 2):
self.data_array[self.pointer] = 2 ** self.bits / 2 - 1
else:
self.data_array[self.pointer] -= 1
def greater(self):
self.pointer += 1
def smaller(self):
self.pointer -= 1
def find_next_r_bracket(index,code):
count = 1
i = index + 1
while count != 0 and i < len(code) :
if code[i] == "]":
count -= 1
elif code[i] == "[":
count += 1
i += 1
return i - 1
def is_valid(index,code):
if index < 0: return False
if index >= len(code): return False
return True
def find_prev_l_bracket(index,code):
count = 1
i = index - 1
while count != 0 and i >= 0:
if code[i] == "[":
count -= 1
elif code[i] == "]":
count += 1
i -= 1
return i + 1
def dot(self):
val = self.data_array[self.pointer] if (self.pointer in self.data_array) else 0
#print(chr(val))
self.output.append(chr(val))
def comma(self):
char = input("Please enter a character:")
self.data_array[self.pointer] = ord(char)
def pre_check(code):
i = 0
for c in code:
if c == "[":
i += 1
elif c == "]":
i -= 1
return i == 0
if self.enable_precheck and (not pre_check(self.code)):
print("Invalid Code : Brackets not in pair!")
return
index = 0
while index >= 0 and index < len(self.code):
command = self.code[index]
if command == '+':
plus(self)
index += 1
elif command == "-":
minus(self)
index += 1
elif command == ">":
greater(self)
index += 1
elif command == "<":
smaller(self)
index += 1
elif command == "[":
if (self.pointer not in self.data_array) or self.data_array[self.pointer] == 0:
a = find_next_r_bracket(index,self.code)
if a != None:
index = a + 1
else:
index = 0
break
else:
index += 1
elif command == "]":
if (self.pointer in self.data_array) and self.data_array[self.pointer] != 0:
a = find_prev_l_bracket(index,self.code)
if a != None:
index = a + 1
else:
index = 0
break
else:
index += 1
elif command == ".":
dot(self)
index += 1
elif command == ",":
comma(self)
index += 1
else:
break
if index == len(self.code):
print("Output: " + "".join(self.output))
return
else:
print("Code Invalid!")
return
if len(sys.argv) != 4 or sys.argv[3] not in ["True","False"] or int(sys.argv[2])<=0:
print("Invalid Input!")
else:
code = sys.argv[1]
code = readFile(code)
bits_num = int(sys.argv[2])
ep = sys.argv[3]
programme1 = Brainfuck(code,bits = bits_num,enable_precheck = True if ep == "True" else False)
print(programme1.code)
programme1.run()
| [
"[email protected]"
] | |
e969d49d903b9647ad4af2a70a644dac4050d0cd | f8832de3fa7f9fbb3da1db28bb7063088749a85a | /wipa2_show_project/wipa2_show/admin.py | d99d8d5f705911b2efd23021df889bed341a1fa8 | [
"MIT"
] | permissive | wipa2/wipa2.github.io | 10112e16100c26d019b4f3a44f618f4013971eed | 5f5ab3a45f2c7a67adbd180c037016c693149d65 | refs/heads/master | 2022-12-10T09:11:35.576436 | 2018-12-20T04:08:42 | 2018-12-20T04:08:42 | 152,339,119 | 0 | 0 | MIT | 2022-12-08T01:29:26 | 2018-10-10T00:27:39 | Python | UTF-8 | Python | false | false | 1,007 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.admin import site
from django.urls import path
from django.views import generic
from wipa2_show.models import *
class WorksListView(generic.ListView):
template_name = 'works_list.html'
context_object_name = 'works_list'
def get_queryset(self):
return Work.objects.all()
class WIPAdminSite(AdminSite):
def get_urls(self):
urls = super(WIPAdminSite, self).get_urls()
urls += [
path('works_list', self.admin_view(WorksListView.as_view()), name='works_list'),
]
return urls
admin.site = WIPAdminSite()
class WorkPhotoInline(admin.TabularInline):
model = WorkPhoto
extra = 3
class DesignerInline(admin.TabularInline):
model = Designer
class WorkAdmin(admin.ModelAdmin):
inlines = [ WorkPhotoInline, DesignerInline ]
admin.site.register(Work, WorkAdmin)
| [
"[email protected]"
] | |
1872d3b9826fea180fdfe3e97a5a1a5fb6af6eac | 752a46ea23b77b459172a015210d61d7b3c84c6a | /Translating_RNA_into_Protein.py | d86b3049283de37e1aa8f3e2d52f8642b9300dae | [] | no_license | Ashley-Pritchard/Rosalind | f6c896d4e8988cdaca5e209f1b2bf356fd3ffa75 | d77e75b622de1caee9c5f957aa6b08e2cdfa5447 | refs/heads/master | 2020-06-28T02:35:09.857154 | 2019-08-13T21:58:02 | 2019-08-13T21:58:02 | 200,121,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | f = open('rosalind.txt', 'r')
sequence = f.read()
amino_acids = {'AUA':'I', 'AUC':'I', 'AUU':'I', 'AUG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACU':'T', 'AAC':'N', 'AAU':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGU':'S', 'AGA':'R', 'AGG':'R', 'CUA':'L', 'CUC':'L', 'CUG':'L', 'CUU':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCU':'P', 'CAC':'H', 'CAU':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGU':'R', 'GUA':'V', 'GUC':'V', 'GUG':'V', 'GUU':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCU':'A', 'GAC':'D', 'GAU':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGU':'G', 'UCA':'S', 'UCC':'S', 'UCG':'S', 'UCU':'S', 'UUC':'F', 'UUU':'F', 'UUA':'L', 'UUG':'L', 'UAC':'Y', 'UAU':'Y', 'UAA':'_', 'UAG':'_', 'UGC':'C', 'UGU':'C', 'UGA':'_', 'UGG':'W'}
protein = ''
for i in range(0, len(sequence)-4, 3):
codon = sequence[i:i+3]
protein += amino_acids[codon]
print(protein)
| [
"[email protected]"
] | |
a90336f41588ab43df39daf15ea0567c1cdeddd5 | 172d5ac6e918b32231714a853b0fdb0ea6a22710 | /Amulya/pattern1.py | d11bf0a0f83c26fe0d64cb9c793b63eb3825d81a | [] | no_license | nikithasake/Python | ea2b4a8e01a72c7f59ff440b237d00c8ec306c14 | 0472bed9c0b5b671e4df67d0cc06b834d6fbf4bb | refs/heads/main | 2023-08-14T13:58:52.811882 | 2021-09-21T16:59:35 | 2021-09-21T16:59:35 | 391,322,571 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #Pattern Program
'''
1
1 2
1 2 3
1 2 3 4
1 2 3 4 5'''
n=int(input("Enter row:"))
for i in range(1,n+1):
for j in range(1,i+1):
print(j,end=" ")
print()
| [
"[email protected]"
] | |
4c05d6c39aaaddaaabc1e0f9c40b29d94136f0db | e0ed932fc2e4edb953cc4e423362dabc19083008 | /python/002_note/learn_匿名函数.py | f7241f78ff170bbb6a1fae78ba677adbd3bd687f | [] | no_license | glfAdd/note | 90baee45003ac3998d898dcfbc618caa28f33b74 | 19a9aff61450be25904bff0fe672f660d49d90ff | refs/heads/main | 2023-05-27T13:28:36.092352 | 2023-05-24T03:35:58 | 2023-05-24T03:35:58 | 240,066,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | """
https://zhuanlan.zhihu.com/p/78698111
"""
def test_1(a, b):
return a + b
a_1 = test_1(1, 5)
print(a_1)
a_2 = lambda a, b: a + b
print(a_2(11, 22))
| [
"[email protected]"
] | |
9048b0e04febbf2e2680231dda7673346eaf03c9 | 816bfcd598e0f2ae46eaad2eaec43c9398ae1121 | /napalmv1.py | e75561d1912ebfd33f2e9764fac0637840a5e12a | [] | no_license | Terenzano/PythonNetworkEngineers | 72a79cdfd20da06dd7b1cd1a4684b5a7e4626ab5 | 7adee328458fddf270a9ce40c985f64e0309a433 | refs/heads/master | 2023-01-23T10:52:04.697074 | 2020-11-15T15:07:02 | 2020-11-15T15:07:02 | 284,260,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | import json
from napalm import get_network_driver
driver = get_network_driver('ios')
iosvl2 = driver('192.168.122.72', 'cisco', 'cisco')
iosvl2.open()
ios_output = iosvl2.get_facts()
print (json.dumps(ios_output, indent=4))
ios_output = iosvl2.get_interfaces()
print (json.dumps(ios_output, indent=4))
ios_output = iosvl2.get_interfaces_counters()
print (json.dumps(ios_output, indent=4))
| [
"[email protected]"
] | |
6586d88345b36755cb433ae6a996c875e3bdb836 | b22c848d8d3257b58e25248721fa419cb2abaeb9 | /skateteam/admin.py | 7a2b47aa715cdf38d01b75a2e8414c17f471e36f | [] | no_license | AlexNexton/soleki | 69983de38a1ca709b4c1bc093f75debf105c106e | 9c37cc5b6924a332ef1f0d534ed8087ec07d1034 | refs/heads/master | 2023-04-21T07:47:57.966541 | 2021-05-14T04:34:21 | 2021-05-14T04:34:21 | 358,224,825 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.contrib import admin
from .models import Skateteam, Teamcat
# Register your models here.
class SkateteamAdmin(admin.ModelAdmin):
list_display = (
'sku',
'name',
'teamcat',
'rating',
'image',
)
ordering = ('sku',)
class TeamcatAdmin(admin.ModelAdmin):
list_display = (
'friendly_name',
'name',
)
admin.site.register(Skateteam, SkateteamAdmin)
admin.site.register(Teamcat, TeamcatAdmin)
| [
"[email protected]"
] | |
0708a0a845af193cd7bec3d84b88899f4a7b7c19 | 16caad6ae23609e5f9bdf9cc7a568db6bfd4c57d | /src/coreclr/scripts/superpmi_collect_setup.py | a6a9b84e7e8ba8cb4a9f6efa09e5864d18723a0b | [
"MIT"
] | permissive | Emmy96/runtime | eae07c6d6e23a54802f5df2e0d278136e1d043da | 6527f540e4b50bc84eb72705f80d3f2bdd57473b | refs/heads/main | 2023-09-05T00:31:29.490643 | 2021-11-03T04:27:34 | 2021-11-03T04:27:34 | 424,103,302 | 1 | 0 | NOASSERTION | 2021-11-03T05:33:36 | 2021-11-03T05:33:35 | null | UTF-8 | Python | false | false | 23,416 | py | #!/usr/bin/env python3
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
#
# Title : superpmi_collect_setup.py
#
# Notes:
#
# Script to setup directory structure required to perform SuperPMI collection in CI.
# It does the following steps:
# 1. It creates `correlation_payload_directory` that contains files from CORE_ROOT, src\coreclr\scripts.
# This directory is the one that is sent to all the helix machines that performs SPMI collection.
# 2. It clones dotnet/jitutils, builds it and then copies the `pmi.dll` to `correlation_payload_directory` folder.
# This file is needed to do pmi SPMI runs.
# 3. The script takes `input_artifacts` parameter which contains managed .dlls and .exes on
# which SPMI needs to be run. This script will partition these folders into equal buckets of approximately `max_size`
# bytes and stores them under `payload` directory. Each sub-folder inside `payload` directory is sent to individual
# helix machine to do SPMI collection on. E.g. for `input_artifacts` to be run on libraries, the parameter would be path to
# `CORE_ROOT` folder and this script will copy `max_size` bytes of those files under `payload/libraries/0/binaries`,
# `payload/libraries/1/binaries` and so forth.
# 4. Lastly, it sets the pipeline variables.
#
# Below are the helix queues it sets depending on the OS/architecture:
# | Arch | windows | Linux |
# |-------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------|
# | x86 | Windows.10.Amd64.X86.Rt | |
# | x64 | Windows.10.Amd64.X86.Rt | Ubuntu.1804.Amd64 |
# | arm | - | (Ubuntu.1804.Arm32)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440 |
# | arm64 | Windows.10.Arm64 | (Ubuntu.1804.Arm64)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652 |
#
################################################################################
################################################################################
import argparse
import os
import stat
from coreclr_arguments import *
from azdo_pipelines_util import run_command, copy_directory, copy_files, set_pipeline_variable, ChangeDir, TempDir
# Start of parser object creation.
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-source_directory", help="path to source directory")
parser.add_argument("-core_root_directory", help="path to core_root directory")
parser.add_argument("-arch", help="Architecture")
parser.add_argument("-mch_file_tag", help="Tag to be used to mch files")
parser.add_argument("-collection_name", help="Name of the SPMI collection to be done (e.g., libraries, tests)")
parser.add_argument("-collection_type", help="Type of the SPMI collection to be done (crossgen, crossgen2, pmi)")
parser.add_argument("-input_directory", help="directory containing assemblies for which superpmi collection to be done")
parser.add_argument("-max_size", help="Max size of each partition in MB")
is_windows = platform.system() == "Windows"
native_binaries_to_ignore = [
"api-ms-win-core-console-l1-1-0.dll",
"api-ms-win-core-datetime-l1-1-0.dll",
"api-ms-win-core-debug-l1-1-0.dll",
"api-ms-win-core-errorhandling-l1-1-0.dll",
"api-ms-win-core-file-l1-1-0.dll",
"api-ms-win-core-file-l1-2-0.dll",
"api-ms-win-core-file-l2-1-0.dll",
"api-ms-win-core-handle-l1-1-0.dll",
"api-ms-win-core-heap-l1-1-0.dll",
"api-ms-win-core-interlocked-l1-1-0.dll",
"api-ms-win-core-libraryloader-l1-1-0.dll",
"api-ms-win-core-localization-l1-2-0.dll",
"api-ms-win-core-memory-l1-1-0.dll",
"api-ms-win-core-namedpipe-l1-1-0.dll",
"api-ms-win-core-processenvironment-l1-1-0.dll",
"api-ms-win-core-processthreads-l1-1-0.dll",
"api-ms-win-core-processthreads-l1-1-1.dll",
"api-ms-win-core-profile-l1-1-0.dll",
"api-ms-win-core-rtlsupport-l1-1-0.dll",
"api-ms-win-core-string-l1-1-0.dll",
"api-ms-win-core-synch-l1-1-0.dll",
"api-ms-win-core-synch-l1-2-0.dll",
"api-ms-win-core-sysinfo-l1-1-0.dll",
"api-ms-win-core-timezone-l1-1-0.dll",
"api-ms-win-core-util-l1-1-0.dll",
"api-ms-win-crt-conio-l1-1-0.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-locale-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-multibyte-l1-1-0.dll",
"api-ms-win-crt-private-l1-1-0.dll",
"api-ms-win-crt-process-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-time-l1-1-0.dll",
"api-ms-win-crt-utility-l1-1-0.dll",
"clretwrc.dll",
"clrgc.dll",
"clrjit.dll",
"clrjit_unix_arm_arm.dll",
"clrjit_unix_arm_arm64.dll",
"clrjit_unix_arm_x64.dll",
"clrjit_unix_arm_x86.dll",
"clrjit_unix_arm64_arm64.dll",
"clrjit_unix_arm64_x64.dll",
"clrjit_unix_armel_arm.dll",
"clrjit_unix_armel_arm64.dll",
"clrjit_unix_armel_x64.dll",
"clrjit_unix_armel_x86.dll",
"clrjit_unix_osx_arm64_arm64.dll",
"clrjit_unix_osx_arm64_x64.dll",
"clrjit_unix_x64_arm64.dll",
"clrjit_unix_x64_x64.dll",
"clrjit_win_arm_arm.dll",
"clrjit_win_arm_arm64.dll",
"clrjit_win_arm_x64.dll",
"clrjit_win_arm_x86.dll",
"clrjit_win_arm64_arm64.dll",
"clrjit_win_arm64_x64.dll",
"clrjit_win_x64_arm64.dll",
"clrjit_win_x64_x64.dll",
"clrjit_win_x86_arm.dll",
"clrjit_win_x86_arm64.dll",
"clrjit_win_x86_x64.dll",
"clrjit_win_x86_x86.dll",
"clrjit_universal_arm_arm.dll",
"clrjit_universal_arm_arm64.dll",
"clrjit_universal_arm_x64.dll",
"clrjit_universal_arm_x86.dll",
"clrjit_universal_arm64_arm64.dll",
"clrjit_universal_arm64_x64.dll",
"coreclr.dll",
"CoreConsole.exe",
"coredistools.dll",
"CoreRun.exe",
"CoreShim.dll",
"createdump.exe",
"crossgen.exe",
"crossgen2.exe",
"dbgshim.dll",
"ilasm.exe",
"ildasm.exe",
"jitinterface_arm.dll",
"jitinterface_arm64.dll",
"jitinterface_x64.dll",
"jitinterface_x86.dll",
"KernelTraceControl.dll",
"KernelTraceControl.Win61.dll",
"mcs.exe",
"Microsoft.DiaSymReader.Native.amd64.dll",
"Microsoft.DiaSymReader.Native.x86.dll",
"mscordaccore.dll",
"mscordbi.dll",
"mscorrc.dll",
"msdia140.dll",
"R2RDump.exe",
"R2RTest.exe",
"superpmi.exe",
"superpmi-shim-collector.dll",
"superpmi-shim-counter.dll",
"superpmi-shim-simple.dll",
"System.IO.Compression.Native.dll",
"ucrtbase.dll",
"xunit.console.exe",
]
MAX_FILES_COUNT = 1500
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"source_directory",
lambda source_directory: os.path.isdir(source_directory),
"source_directory doesn't exist")
coreclr_args.verify(args,
"core_root_directory",
lambda core_root_directory: os.path.isdir(core_root_directory),
"core_root_directory doesn't exist")
coreclr_args.verify(args,
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(args,
"mch_file_tag",
lambda unused: True,
"Unable to set mch_file_tag")
coreclr_args.verify(args,
"collection_name",
lambda unused: True,
"Unable to set collection_name")
coreclr_args.verify(args,
"collection_type",
lambda unused: True,
"Unable to set collection_type")
coreclr_args.verify(args,
"input_directory",
lambda input_directory: os.path.isdir(input_directory),
"input_directory doesn't exist")
coreclr_args.verify(args,
"max_size",
lambda max_size: max_size > 0,
"Please enter valid positive numeric max_size",
modify_arg=lambda max_size: int(
max_size) * 1000 * 1000 if max_size is not None and max_size.isnumeric() else 0
# Convert to MB
)
return coreclr_args
def get_files_sorted_by_size(src_directory, exclude_directories, exclude_files):
""" For a given src_directory, returns all the .dll files sorted by size.
Args:
src_directory (string): Path of directory to enumerate.
exclude_directories ([string]): Directory names to exclude.
exclude_files ([string]): File names to exclude.
"""
def sorter_by_size(pair):
""" Sorts the pair (file_name, file_size) tuple in descending order of file_size
Args:
pair ([(string, int)]): List of tuple of file_name, file_size
"""
pair.sort(key=lambda x: x[1], reverse=True)
return pair
filename_with_size = []
for file_path, dirs, files in os.walk(src_directory, topdown=True):
# Credit: https://stackoverflow.com/a/19859907
dirs[:] = [d for d in dirs if d not in exclude_directories]
for name in files:
# Make the exclude check case-insensitive
exclude_files_lower = [filename.lower() for filename in exclude_files]
if name.lower() in exclude_files_lower:
continue
curr_file_path = os.path.join(file_path, name)
if not os.path.isfile(curr_file_path):
continue
if not name.endswith(".dll") and not name.endswith(".exe"):
continue
size = os.path.getsize(curr_file_path)
filename_with_size.append((curr_file_path, size))
return sorter_by_size(filename_with_size)
def first_fit(sorted_by_size, max_size):
""" Given a list of file names along with size in descending order, divides the files
in number of buckets such that each bucket doesn't exceed max_size. Since this is a first-fit
approach, it doesn't guarantee to find the bucket with tighest spot available.
Args:
sorted_by_size ((string, int)): (file_name, file_size) tuple
max_size (int): Maximum size (in bytes) of each bucket.
Returns:
[{int, [string]}]: Returns a dictionary of partition-index to list of file names following in that bucket.
"""
partitions = {}
for curr_file in sorted_by_size:
_, file_size = curr_file
# Find the right bucket
found_bucket = False
if file_size < max_size:
for p_index in partitions:
total_in_curr_par = sum(n for _, n in partitions[p_index])
if ((total_in_curr_par + file_size) < max_size) and (len(partitions[p_index]) < MAX_FILES_COUNT):
partitions[p_index].append(curr_file)
found_bucket = True
break
if not found_bucket:
partitions[len(partitions)] = [curr_file]
total_size = 0
for p_index in partitions:
partition_size = sum(n for _, n in partitions[p_index])
print("Partition {0}: {1} files with {2} bytes.".format(p_index, len(partitions[p_index]), partition_size))
total_size += partition_size
print("Total {0} partitions with {1} bytes.".format(str(len(partitions)), total_size))
return partitions
def partition_files(src_directory, dst_directory, max_size, exclude_directories=[],
exclude_files=native_binaries_to_ignore):
""" Copy bucketized files based on size to destination folder.
Args:
src_directory (string): Source folder containing files to be copied.
dst_directory (string): Destination folder where files should be copied.
max_size (int): Maximum partition size in bytes
exclude_directories ([string]): List of folder names to be excluded.
exclude_files ([string]): List of files names to be excluded.
"""
print('Partitioning files from {0} to {1}'.format(src_directory, dst_directory))
sorted_by_size = get_files_sorted_by_size(src_directory, exclude_directories, exclude_files)
partitions = first_fit(sorted_by_size, max_size)
index = 0
for p_index in partitions:
file_names = [curr_file[0] for curr_file in partitions[p_index]]
curr_dst_path = os.path.join(dst_directory, str(index), "binaries")
copy_files(src_directory, curr_dst_path, file_names)
index += 1
def setup_microbenchmark(workitem_directory, arch):
""" Perform setup of microbenchmarks
Args:
workitem_directory (string): Path to work
arch (string): Architecture for which dotnet will be installed
"""
performance_directory = os.path.join(workitem_directory, "performance")
run_command(
["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory])
with ChangeDir(performance_directory):
dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch)
dotnet_install_script = os.path.join(performance_directory, "scripts", "dotnet.py")
if not os.path.isfile(dotnet_install_script):
print("Missing " + dotnet_install_script)
return
run_command(
get_python_name() + [dotnet_install_script, "install", "--architecture", arch, "--install-dir",
dotnet_directory, "--verbose"])
def get_python_name():
"""Gets the python name
Returns:
[string]: Returns the appropriate python name depending on the OS.
"""
if is_windows:
return ["py", "-3"]
else:
return ["python3"]
def main(main_args):
""" Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
coreclr_args = setup_args(main_args)
source_directory = coreclr_args.source_directory
# CorrelationPayload directories
correlation_payload_directory = os.path.join(coreclr_args.source_directory, "payload")
superpmi_src_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts')
superpmi_dst_directory = os.path.join(correlation_payload_directory, "superpmi")
arch = coreclr_args.arch
helix_source_prefix = "official"
creator = ""
ci = True
if is_windows:
helix_queue = "Windows.10.Arm64" if arch == "arm64" else "Windows.10.Amd64.X86.Rt"
else:
if arch == "arm":
helix_queue = "(Ubuntu.1804.Arm32)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440"
elif arch == "arm64":
helix_queue = "(Ubuntu.1804.Arm64)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652"
else:
helix_queue = "Ubuntu.1804.Amd64"
# create superpmi directory
print('Copying {} -> {}'.format(superpmi_src_directory, superpmi_dst_directory))
copy_directory(superpmi_src_directory, superpmi_dst_directory, match_func=lambda path: any(path.endswith(extension) for extension in [".py"]))
if is_windows:
acceptable_copy = lambda path: any(path.endswith(extension) for extension in [".py", ".dll", ".exe", ".json"])
else:
# Need to accept files without any extension, which is how executable file's names look.
acceptable_copy = lambda path: (os.path.basename(path).find(".") == -1) or any(path.endswith(extension) for extension in [".py", ".dll", ".so", ".json"])
print('Copying {} -> {}'.format(coreclr_args.core_root_directory, superpmi_dst_directory))
copy_directory(coreclr_args.core_root_directory, superpmi_dst_directory, match_func=acceptable_copy)
# Copy all the test files to CORE_ROOT
# The reason is there are lot of dependencies with *.Tests.dll and to ensure we do not get
# Reflection errors, just copy everything to CORE_ROOT so for all individual partitions, the
# references will be present in CORE_ROOT.
if coreclr_args.collection_name == "libraries_tests":
print('Copying {} -> {}'.format(coreclr_args.input_directory, superpmi_dst_directory))
def make_readable(folder_name):
"""Make file executable by changing the permission
Args:
folder_name (string): folder to mark with 744
"""
if is_windows:
return
print("Inside make_readable")
run_command(["ls", "-l", folder_name])
for file_path, dirs, files in os.walk(folder_name, topdown=True):
for d in dirs:
os.chmod(os.path.join(file_path, d),
# read+write+execute for owner
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
# read for group
(stat.S_IRGRP) |
# read for other
(stat.S_IROTH))
for f in files:
os.chmod(os.path.join(file_path, f),
# read+write+execute for owner
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
# read for group
(stat.S_IRGRP) |
# read for other
(stat.S_IROTH))
run_command(["ls", "-l", folder_name])
make_readable(coreclr_args.input_directory)
copy_directory(coreclr_args.input_directory, superpmi_dst_directory, match_func=acceptable_copy)
# Workitem directories
workitem_directory = os.path.join(source_directory, "workitem")
input_artifacts = ""
if coreclr_args.collection_name == "benchmarks":
# Setup microbenchmarks
setup_microbenchmark(workitem_directory, arch)
else:
# Setup for pmi/crossgen runs
# Clone and build jitutils
try:
with TempDir() as jitutils_directory:
run_command(
["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory])
# Make sure ".dotnet" directory exists, by running the script at least once
dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh"
dotnet_script_path = os.path.join(source_directory, dotnet_script_name)
run_command([dotnet_script_path, "--info"], jitutils_directory)
# Set dotnet path to run build
os.environ["PATH"] = os.path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"]
build_file = "build.cmd" if is_windows else "build.sh"
run_command([os.path.join(jitutils_directory, build_file), "-p"], jitutils_directory)
copy_files(os.path.join(jitutils_directory, "bin"), superpmi_dst_directory, [os.path.join(jitutils_directory, "bin", "pmi.dll")])
except PermissionError as pe_error:
# Details: https://bugs.python.org/issue26660
print('Ignoring PermissionError: {0}'.format(pe_error))
# NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a
# Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2.
# The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios.
# # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll
# if coreclr_args.collection_type == "crossgen2":
# dotnet_src_directory = os.path.join(source_directory, ".dotnet")
# dotnet_dst_directory = os.path.join(correlation_payload_directory, ".dotnet")
# print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory))
# copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False)
# payload
pmiassemblies_directory = os.path.join(workitem_directory, "pmiAssembliesDirectory")
input_artifacts = os.path.join(pmiassemblies_directory, coreclr_args.collection_name)
exclude_directory = ['Core_Root'] if coreclr_args.collection_name == "coreclr_tests" else []
exclude_files = native_binaries_to_ignore
if coreclr_args.collection_type == "crossgen2":
print('Adding exclusions for crossgen2')
# Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it.
exclude_files += ["Microsoft.Build.dll"]
if coreclr_args.collection_name == "libraries_tests":
# libraries_tests artifacts contains files from core_root folder. Exclude them.
core_root_dir = coreclr_args.core_root_directory
exclude_files += [item for item in os.listdir(core_root_dir)
if os.path.isfile(os.path.join(core_root_dir, item)) and (item.endswith(".dll") or item.endswith(".exe"))]
partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory,
exclude_files)
# Set variables
print('Setting pipeline variables:')
set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory)
set_pipeline_variable("WorkItemDirectory", workitem_directory)
set_pipeline_variable("InputArtifacts", input_artifacts)
set_pipeline_variable("Python", ' '.join(get_python_name()))
set_pipeline_variable("Architecture", arch)
set_pipeline_variable("Creator", creator)
set_pipeline_variable("Queue", helix_queue)
set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
set_pipeline_variable("MchFileTag", coreclr_args.mch_file_tag)
################################################################################
# __main__
################################################################################
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
| [
"[email protected]"
] | |
f4afe2ce92f22ba1ed821039ba79f0211280a3fa | f4fce41f2b3cba606d2a36075de356434602d1c0 | /xwing_rulebook/faqs/migrations/0005_auto_20170518_1809.py | f6c13b351f83d19ca9db9915b5099b9ce0faead5 | [] | no_license | lvisintini/xwing-rulebook | 24a392c12a2b13027e7cf65b9cc41e8a21585e3c | 6dc7ac58f962a4928843364dcfc077638384dc16 | refs/heads/master | 2021-01-22T19:41:32.960048 | 2018-07-25T10:53:35 | 2018-07-25T10:53:35 | 85,224,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-18 18:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('faqs', '0004_auto_20170518_1110'),
]
operations = [
migrations.AlterField(
model_name='faq',
name='topic',
field=models.CharField(choices=[('general', 'General'), ('actions-and-game-effects', 'Actions and game effects'), ('combat', 'Combat'), ('attack-timing-chart', 'Timing chart for performing an attack'), ('missions', 'Missions'), ('movement', 'Movement'), ('range-measurement', 'Range measurement')], default='general', max_length=24),
),
]
| [
"[email protected]"
] | |
184dc6a22c1c622a78ebabdaefcfcd9e62b19680 | 65306b41168a5afa6fc80904cc0fbf737939a01a | /scale/scheduler/migrations/0004_auto_20170209_1616.py | dd8b7af0a3fbf8bc3008c90332e4c38a95df28af | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | kfconsultant/scale | 9e5df45cd36211d1bc5e946cf499a4584a2d71de | 28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b | refs/heads/master | 2020-12-07T00:04:37.737556 | 2020-01-06T12:57:03 | 2020-01-06T12:57:03 | 232,587,229 | 0 | 0 | Apache-2.0 | 2020-01-08T14:53:36 | 2020-01-08T14:53:35 | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0003_auto_20160201_0846'),
]
operations = [
migrations.RemoveField(
model_name='scheduler',
name='max_node_errors',
),
migrations.RemoveField(
model_name='scheduler',
name='node_error_period',
),
]
| [
"[email protected]"
] | |
c732150cd76a35276b2bb893b8d0d273ed06f108 | c31cb569af4ffc14406ee8697c397489edb90b44 | /test/contrib/test_control_flow.py | 1a4392273bc9a10877f6d4cee5e53d447e993131 | [
"Apache-2.0"
] | permissive | fehiepsi/numpyro | ff39d4ebf09cb3cfa4afb6a3c5befdd1c9262213 | 9ca63dad442d6b84f846b24b60fdd00552f7456d | refs/heads/master | 2023-08-24T14:30:38.270239 | 2021-05-27T20:28:51 | 2021-05-27T20:28:51 | 176,783,350 | 0 | 1 | Apache-2.0 | 2020-09-10T23:17:08 | 2019-03-20T17:23:15 | Python | UTF-8 | Python | false | false | 6,259 | py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro.contrib.control_flow import cond, scan
import numpyro.distributions as dist
from numpyro.handlers import seed, substitute, trace
from numpyro.infer import MCMC, NUTS, SVI, Predictive, Trace_ELBO
from numpyro.infer.util import potential_energy
def test_scan():
def model(T=10, q=1, r=1, phi=0.0, beta=0.0):
def transition(state, i):
x0, mu0 = state
x1 = numpyro.sample("x", dist.Normal(phi * x0, q))
mu1 = beta * mu0 + x1
y1 = numpyro.sample("y", dist.Normal(mu1, r))
numpyro.deterministic("y2", y1 * 2)
return (x1, mu1), (x1, y1)
mu0 = x0 = numpyro.sample("x_0", dist.Normal(0, q))
y0 = numpyro.sample("y_0", dist.Normal(mu0, r))
_, xy = scan(transition, (x0, mu0), jnp.arange(T))
x, y = xy
return jnp.append(x0, x), jnp.append(y0, y)
T = 10
num_samples = 100
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=100, num_samples=num_samples)
mcmc.run(random.PRNGKey(0), T=T)
assert set(mcmc.get_samples()) == {"x", "y", "y2", "x_0", "y_0"}
mcmc.print_summary()
samples = mcmc.get_samples()
x = samples.pop("x")[0] # take 1 sample of x
# this tests for the composition of condition and substitute
# this also tests if we can use `vmap` for predictive.
future = 5
predictive = Predictive(
numpyro.handlers.condition(model, {"x": x}),
samples,
return_sites=["x", "y", "y2"],
parallel=True,
)
result = predictive(random.PRNGKey(1), T=T + future)
expected_shape = (num_samples, T + future)
assert result["x"].shape == expected_shape
assert result["y"].shape == expected_shape
assert result["y2"].shape == expected_shape
assert_allclose(result["x"][:, :T], jnp.broadcast_to(x, (num_samples, T)))
assert_allclose(result["y"][:, :T], samples["y"])
@pytest.mark.xfail(raises=RuntimeError)
def test_nested_scan_smoke():
def model():
def outer_fn(y, val):
def body_fn(z, val):
z = numpyro.sample("z", dist.Normal(z, 1))
return z, z
y = numpyro.sample("y", dist.Normal(y, 1))
_, zs = scan(body_fn, y, None, 4)
return y, zs
x = numpyro.sample("x", dist.Normal(0, 1))
_, zs = scan(outer_fn, x, None, 3)
return zs
data = jnp.arange(12).reshape((3, 4))
# we can scan but can't substitute values through multiple levels of scan
with trace(), seed(rng_seed=0), substitute(data={"z": data}):
zs = model()
assert_allclose(zs, data)
def test_scan_constrain_reparam_compatible():
def model(T, q=1, r=1, phi=0.0, beta=0.0):
x = 0.0
mu = 0.0
for i in range(T):
x = numpyro.sample(f"x_{i}", dist.LogNormal(phi * x, q))
mu = beta * mu + x
numpyro.sample(f"y_{i}", dist.Normal(mu, r))
def fun_model(T, q=1, r=1, phi=0.0, beta=0.0):
def transition(state, i):
x, mu = state
x = numpyro.sample("x", dist.LogNormal(phi * x, q))
mu = beta * mu + x
numpyro.sample("y", dist.Normal(mu, r))
return (x, mu), None
scan(transition, (0.0, 0.0), jnp.arange(T))
T = 10
params = {}
for i in range(T):
params[f"x_{i}"] = (i + 1.0) / 10
params[f"y_{i}"] = -i / 5
fun_params = {"x": jnp.arange(1, T + 1) / 10, "y": -jnp.arange(T) / 5}
actual_log_joint = potential_energy(fun_model, (T,), {}, fun_params)
expected_log_joint = potential_energy(model, (T,), {}, params)
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_without_stack():
def multiply_and_add_repeatedly(K, c_in):
def iteration(c_prev, c_in):
c_next = jnp.dot(c_prev, K) + c_in
return c_next, (c_next,)
_, (ys,) = scan(iteration, init=jnp.asarray([1.0, 0.0]), xs=c_in)
return ys
result = multiply_and_add_repeatedly(
K=jnp.asarray([[0.7, 0.3], [0.3, 0.7]]), c_in=jnp.asarray([[1.0, 0.0]])
)
assert_allclose(
result,
[[1.7, 0.3]],
)
def test_cond():
def model():
def true_fun(_):
x = numpyro.sample("x", dist.Normal(4.0))
numpyro.deterministic("z", x - 4.0)
def false_fun(_):
x = numpyro.sample("x", dist.Normal(0.0))
numpyro.deterministic("z", x)
cluster = numpyro.sample("cluster", dist.Normal())
cond(cluster > 0, true_fun, false_fun, None)
def guide():
m1 = numpyro.param("m1", 2.0)
s1 = numpyro.param("s1", 0.1, constraint=dist.constraints.positive)
m2 = numpyro.param("m2", 2.0)
s2 = numpyro.param("s2", 0.1, constraint=dist.constraints.positive)
def true_fun(_):
numpyro.sample("x", dist.Normal(m1, s1))
def false_fun(_):
numpyro.sample("x", dist.Normal(m2, s2))
cluster = numpyro.sample("cluster", dist.Normal())
cond(cluster > 0, true_fun, false_fun, None)
svi = SVI(model, guide, numpyro.optim.Adam(1e-2), Trace_ELBO(num_particles=100))
params, losses = svi.run(random.PRNGKey(0), num_steps=2500)
predictive = Predictive(
model,
guide=guide,
params=params,
num_samples=1000,
return_sites=["cluster", "x", "z"],
)
result = predictive(random.PRNGKey(0))
assert result["cluster"].shape == (1000,)
assert result["x"].shape == (1000,)
assert result["z"].shape == (1000,)
mcmc = MCMC(
NUTS(model),
num_warmup=500,
num_samples=2500,
num_chains=4,
chain_method="sequential",
)
mcmc.run(random.PRNGKey(0))
x = mcmc.get_samples()["x"]
assert x.shape == (10_000,)
assert_allclose(
[x[x > 2.0].mean(), x[x > 2.0].std(), x[x < 2.0].mean(), x[x < 2.0].std()],
[4.01, 0.965, -0.01, 0.965],
atol=0.1,
)
assert_allclose([x.mean(), x.std()], [2.0, jnp.sqrt(5.0)], atol=0.5)
| [
"[email protected]"
] | |
e960e713ed05947ea465e38afc27667ff5ebde33 | d76f6ad574aae67ecc0f9b964a8416413ce59833 | /leaparticulator/test/data/profile_p2p_logs.py | 3b953d56756dbca90f8b0a48ec091a4f91b6631b | [] | no_license | keryil/leaparticulatorqt | 7990c38e107a5b4dd53c488e386432efdf448a24 | cd3fe7bf61ccac41fbbe2248f2b9d81b49fbcb40 | refs/heads/master | 2021-08-07T11:45:42.983068 | 2020-04-29T17:03:51 | 2020-04-29T17:03:51 | 64,754,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | from line_profiler import LineProfiler
from leaparticulator.data.functions import *
def do_profile(follow=[]):
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
for f in follow:
profiler.add_function(f)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
fname = './leaparticulator/test/test_data/P2P-160209.144607.realdata.1.exp.log'
@do_profile(follow=[process_p2p_log])
def profile_fromFile():
fromFile(fname)
@do_profile(follow=[toPandas_p2p, process_p2p_log])
def profile_toPandas_p2p():
toPandas_p2p(fname, nphases=10)
# self.test_file = './leaparticulator/test/test_data/P2P-160203.170804.REALDATA.1.exp.log'
# else:
# self.test_file = './leaparticulator/test/test_data/P2P-160204.121143.1.exp.log'
# print "Data file to use in the following test: {}".format(self.test_file)
if __name__ == "__main__":
profile_toPandas_p2p()
| [
"[email protected]"
] | |
be5ff5c51a901f01d761497daf0e79330141733b | 6403ade027404096a9ca3ad5ed92e92eeecf344c | /Game/Actions/battle.py | 5100f628426e1d3f0919e33868d118efadc3baf8 | [] | no_license | Larionov0/Stedunto | 8ff8e614f3799616551a2469a5a3d5b75ce6bf10 | 544343de95eae99823039fd33f871d01b3266f84 | refs/heads/master | 2023-02-16T18:47:21.965534 | 2021-01-15T16:03:53 | 2021-01-15T16:03:53 | 322,740,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | from ..Heroes.hero import Hero
from ..Interface.interface import InterfaceManager
interface = InterfaceManager.instance()
class HeroBattle:
@staticmethod
def heroes_starts_battle(hero1: Hero, hero2: Hero):
for hero in hero1.alive_team:
hero.before_battle(hero2)
for hero in hero2.alive_team:
hero.before_battle(hero1)
while True:
for hero in hero1.alive_team:
if len(hero2.alive_team) != 0:
hero.make_move()
for hero in hero2.alive_team:
if len(hero1.alive_team) != 0:
hero.make_move()
if len(hero1.alive_team + hero2.alive_team) == 0:
return 0
elif not hero1.is_team_alive:
interface.print_msg(f"{hero2.colored_name} победил!")
return hero2
elif not hero2.is_team_alive:
interface.print_msg(f"{hero1.colored_name} победил!")
return hero1
| [
"[email protected]"
] | |
a900dd069e481a00a126498627e640c565cbc7d5 | 5c892bd3a0244b0698f4432eb314aac14b4bb6be | /Ch. 7 - Signal Synthesis/sineAngle.py | 7b187147676c566d0f4a6a8d83844b88fe0195ba | [] | no_license | jlqian98/HackAudioPythonCode | bcf2406e8fd4e6ecd34eaf3464dae0c364ccfdd0 | 6ab887f5f4cec211b8da5dca1b1036b8bb25a753 | refs/heads/main | 2023-08-28T01:40:22.602263 | 2021-11-13T18:52:50 | 2021-11-13T18:52:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # SINEANGLE
# This script demonstrates a method to synthesize sine waves
# using an angle of rotation
#
# See also SINESYNTHESIS
import matplotlib.pyplot as plt
import numpy as np
# Declare initial parameters
f = 2 # frequency in Hz
phi = 0 # phase offset
Fs = 1000 # sampling rate
Ts = 1/Fs # sample period
lenSec = 1 # seconds
N = Fs * lenSec # number of samples
t = np.arange(0, N) * Ts # array of sample times
# Calculate angle of rotation
angleChange = f*Ts*2*np.pi
currentAngle = phi
out = np.zeros([N, 1])
# Update the value of the currentAngle each iteration through loop
for n in range(N):
out[n][0] = np.sin(currentAngle)
# Update phase angle for next loop
currentAngle += angleChange
if currentAngle > 2 * np.pi: # Ensure angle is not > 2*pi
currentAngle -= 2 * np.pi
# Plot synthesized signal
plt.plot(t, out)
plt.xlabel('Time (sec)')
plt.ylabel('Amplitude')
plt.legend(['out'])
plt.show()
| [
"[email protected]"
] | |
a426da23e0cb9acd952b5ddf686bb4d74eb522ac | e1af4069695e6bb93e4dc8d9eae2af5d130eadbf | /metafilter/webserve.py | 0eb31965b25cdb69288071fdff7588d7d73a0cf9 | [] | no_license | pombredanne/metafilter | 5b153f3be74c8cad457ab6e4ee748dd900973fed | dfbc01877a3020f7fe58b9fda3e14ed073684f25 | refs/heads/master | 2021-01-16T19:53:06.614469 | 2013-06-23T10:58:26 | 2013-06-23T10:58:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,141 | py | from flask import (
Flask,
g,
render_template,
request,
redirect,
make_response,
url_for,
jsonify)
from metafilter.model import Node, Query, Session, Tag, set_dsn
from metafilter.model import queries, nodes, tags as tag_model
import logging
LOG = logging.getLogger(__name__)
app = Flask(__name__)
class FlaskConfig(object):
DEBUG = True
SECRET_KEY = 'YSY*H3ZGFC-;@8F.QG*V@M9<MTXF=?N(OR<6O.%CKZD=\CM(O'
app.config.from_object(FlaskConfig())
@app.before_request
def before_request():
g.sess = Session()
@app.after_request
def after_request(response):
g.sess.commit()
g.sess.close()
return response
@app.route('/query')
@app.route('/query/<path:query>')
def query(query="root"):
result = nodes.subdirs(g.sess, query)
if not result:
result = []
result += nodes.from_incremental_query(g.sess, query)
try:
result = result.order_by([
Node.mimetype != 'other/directory',
Node.uri])
except Exception, exc:
LOG.info(exc)
if request.values.get('format', '') == 'json':
return jsonify(dict(
result=[{'download_url': url_for('download', path=_.path)} for _ in result]
))
return render_template("entries.html", entries=result, query=query)
@app.route('/tags')
def tags():
tags = tag_model.tag_counts(g.sess)
return render_template("tags.html", tags=tags.execute())
@app.route('/delete_from_disk/<path>')
def delete_from_disk(path):
nodes.delete_from_disk(g.sess, path)
return redirect(request.referrer)
@app.route('/thumbnail/<path>')
def thumbnail(path):
import Image
from cStringIO import StringIO
node = nodes.by_path(g.sess, path)
try:
im = Image.open(node.uri)
im.thumbnail((128, 128), Image.ANTIALIAS)
tmp = StringIO()
im.save(tmp, "JPEG")
response = make_response(tmp.getvalue())
response.headers['Content-Type'] = 'image/jpeg'
return response
except Exception, exc:
return str(exc)
@app.route('/download/<path>')
def download(path):
node = nodes.by_path(g.sess, path)
data = open(node.uri, 'rb').read()
if request.values.get('format', '') == 'json':
return jsonify(
data=data.encode('base64')
)
response = make_response(data)
response.headers['Content-Type'] = node.mimetype
return response
@app.route('/set_rating', methods=["POST"])
def set_rating():
from metafilter.model.nodes import set_rating
set_rating(request.form["path"], int(request.form['value']))
return "OK"
@app.route('/tag_all', methods=["POST"])
def tag_all():
node_qry = nodes.from_incremental_query(g.sess, request.form["query"])
tags = []
for tagname in request.form['tags'].split(','):
tagname = tagname.strip()
tag = Tag.find(g.sess, tagname)
if not tag:
tag = Tag(tagname)
tags.append(tag)
for node in node_qry:
if node.is_dir():
continue
node.tags.extend(tags)
return redirect(url_for('query', query=request.form['query']))
@app.route('/new_query', methods=["POST"])
def new_query():
qry = Query(request.form['query'])
g.sess.add(qry)
return redirect(request.referrer)
@app.route("/")
def list_queries():
qry = g.sess.query(Query)
qry = qry.order_by(Query.query)
return render_template("queries.html", saved_queries=qry)
@app.route("/save_query", methods=["POST"])
def save_query():
old_query = request.form['id']
new_query = request.form['value']
queries.update( g.sess, old_query, new_query )
return new_query
@app.route("/save_tags", methods=["POST"])
def save_tags():
uri = request.form['id']
tags_value = request.form['value']
tags = [x.strip() for x in tags_value.split(',')]
nodes.set_tags( g.sess, uri, tags )
return ', '.join(tags)
@app.route("/delete_query/<query>")
def delete_query(query):
queries.delete( g.sess, query )
return "OK"
@app.route("/duplicates")
def duplicates():
return render_template("duplicates.html",
duplicates=nodes.duplicates(g.sess))
@app.route("/acknowledge_duplicate/<md5>")
def acknowledge_duplicate(md5):
nodes.acknowledge_duplicate(g.sess, md5)
return redirect(url_for('duplicates'))
@app.route("/view/<path:query>/<int:index>")
def view(query, index=0):
result = nodes.subdirs(g.sess, query)
if not result:
result = []
result += nodes.from_incremental_query(g.sess, query)
result = filter(lambda x: x.mimetype != 'other/directory', result)
try:
result = result.order_by([
Node.mimetype != 'other/directory',
Node.uri])
except Exception, exc:
LOG.info(exc)
return render_template("view.html",
node=result[index],
query=query,
index=index,
)
@app.route("/file_uri/<path:query>/<int:index>")
def file_uri(query, index):
"""
Retrieve the file URI for the given query on the given index
"""
result = nodes.subdirs(g.sess, query)
if not result:
result = []
result += nodes.from_incremental_query(g.sess, query)
result = filter(lambda x: x.mimetype in (
'image/jpeg',
'image/png',
'image/jpg'), result)
try:
result = result.order_by( [Node.mimetype != 'other/directory', Node.uri ] )
except Exception, exc:
LOG.info(exc)
if index > len(result)-1:
return jsonify(dict(
url = None
))
return jsonify(dict(
url = url_for('download', path=result[index].path)
))
@app.route("/fullscreen/<path:query>")
def fullscreen(query):
"""
Displays an entry in a fullscreen view
"""
return render_template("fullscreen.html",
query = query,
)
if __name__ == "__main__":
app.debug = True
logging.basicConfig(level=logging.DEBUG)
set_dsn("postgresql://filemeta:filemeta@localhost:5432/filemeta")
app.run(host="0.0.0.0", port=8181, threaded=True)
| [
"[email protected]"
] | |
f81cfa9aea7193947f5a4846d43936a049cf9ac5 | 9cdf9b022527efb72d2e9b9210cbdb4971e00223 | /getting_ec2_info_by_client_method.py | c18f029118cf160cdb4580be18cd5ac21a9bf946 | [] | no_license | saurjain108/awsboto3 | a52b21e57619ca28fcb1fc12264f545c5faba5a6 | 0bbfcd1ae2775e208004d3616655bfc6ff867826 | refs/heads/master | 2022-07-01T17:59:28.217737 | 2020-05-11T16:06:42 | 2020-05-11T16:06:42 | 260,275,279 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | import boto3
from pprint import pprint #this is used for formatting the output text
aws_man_con = boto3.session.Session(profile_name="root")
ec2_con_cli = aws_man_con.client('ec2')
resource = ec2_con_cli.describe_instances()['Reservations']
#pprint(resource)
for i in resource:
for j in (i['Instances']):
# pprint(j)
print("The instance id is: {}\nThe image id is: {}\nThe launch time is: {}".format(j['InstanceId'],j['ImageId'],j['LaunchTime']))
print("---------------------------------------")
#listing volumes
volume_response = ec2_con_cli.describe_volumes()['Volumes']
#pprint(volume_response)
#print('================')
for x in (volume_response):
print("The volume id is: {}\n The availability zone is {}\n The volume type is {}".format(x['VolumeId'],x['AvailabilityZone'],x['VolumeType']))
print("============================")
#this is also one way of looping for just volume ids
for y in (volume_response):
print(y['VolumeId'])
| [
"[email protected]"
] | |
2f477b2c9634b3953e550ab62fe6996a47276a77 | c9f2a64cb2c30bf47f94d6940521f00cc72474f3 | /ejercicios_videos/ChainofResponsability.py | 135c66cd5a36120efd79eea92731cc32856c5d36 | [] | no_license | sandramoctezuma/Unidad3_SBMV | b16470f37b76fdbc3e7259e3e0c11caf7e037525 | a5b7f18ac6143d1536933487a82b9b39d5b3c41e | refs/heads/master | 2021-05-06T17:34:49.163282 | 2017-11-24T05:55:07 | 2017-11-24T05:55:07 | 111,878,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | # Sandra Bibiana Moctezuma Vargas
# Grupo: GITI9072-e
class Handler: #abstrac handler
"""Abstracc handler"""
def __init__(self, successor):
self._successor = successor
#Define who is the next headler
def handle(self, request):
handled = self._handle(request) #if hadled, stop here
#otherwise, keep going
if not handled:
self._successor.handle(request)
def _handle(self, request):
raise NotImplementedError('Must provide implementation in subclass!')
class ConcreteHandler1(Handler): #Inherist from the abstract handler
"""Concrete Handler 1"""
def _handle(self, request):
if 0 < request <= 10: #provide a condition for handling
print("Request{} handled in handler 1 ".format(request))
return True #indicates that the has been handler
class DefaultHandler(Handler): #inherist from the abstrac handler
"""Default handler"""
def _handle(self, request):
"""If there is no handler avilable"""
#no conditional cheking since this is a default handler
print("End of chain, no handler for {}".format(request))
return True #indicates that the request has been handler
class Client: #using handler
def __init__(self):
self.handler = ConcreteHandler1(DefaultHandler(None))
#create handlers and use them in a sequence you want
#Note that the default handler has no successor
def delegate(self, requests): #Send your requests one at a time for handler has no successor
for request in requests:
self.handler.handle(request)
#create Client
c = Client()
#Create request
requests = [2, 5, 30]
#Send the request
c.delegate(requests) | [
"[email protected]"
] | |
61d12a44c6177b03fb5edc606a8a2a864591c389 | 2ca672c02614fabab6206e7babbe0e1cdc69bf1e | /.vscode/test.py | a26d70723e06fbfc13320c372e17d0d262dba6ea | [] | no_license | AllenGFLiu/leetcode | 8ee20c804aa00aac8fca97ca9dc10ea5347469f3 | d5d6d5a984049e1f3de240f25067d8e868ddcac8 | refs/heads/master | 2020-09-04T20:07:01.047642 | 2020-01-18T06:50:10 | 2020-01-18T06:50:10 | 219,878,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | from typing import Optional
class Node:
def __init__(self, value: int):
self.val = value
self.next = None
class MyCircularDeque:
def __init__(self, k: int):
"""
Initialize your data structure here. Set the size of the deque to be k.
"""
self.head = Node(None)
self.tail = self.head
self.count = 0
self.capacity = k
def insertFront(self, value: int) -> bool:
"""
Adds an item at the front of Deque. Return true if the operation is successful.
"""
if self.count == self.capacity:
return False
new_node = Node(value)
new_node.next = self.head
self.head = new_node
self.count += 1
return True
def insertLast(self, value: int) -> bool:
"""
Adds an item at the rear of Deque. Return true if the operation is successful.
"""
if self.count == self.capacity:
return False
new_node = Node(value)
self.tail.next = new_node
self.tail = new_node
self.count += 1
return True
def deleteFront(self) -> bool:
"""
Deletes an item from the front of Deque. Return true if the operation is successful.
"""
if self.count == 0:
return False
self.head = self.head.next
self.count -= 1
return True
def deleteLast(self) -> bool:
"""
Deletes an item from the rear of Deque. Return true if the operation is successful.
"""
if self.count == 0:
return False
node = self.head
while node:
if node.next == self.tail:
node.next = None
self.tail = node
self.count -= 1
return True
node = node.next
def getFront(self) -> int:
"""
Get the front item from the deque.
"""
if self.count:
return self.head.val
else:
return -1
def getRear(self) -> int:
"""
Get the last item from the deque.
"""
if self.count:
return self.tail.val
else:
return -1
def isEmpty(self) -> bool:
"""
Checks whether the circular deque is empty or not.
"""
if self.count:
return False
else:
return True
def isFull(self) -> bool:
"""
Checks whether the circular deque is full or not.
"""
if self.count == self.capacity:
return True
else:
return False
# Your MyCircularDeque object will be instantiated and called as such:
if __name__ == '__main__':
obj = MyCircularDeque(3)
param_1 = obj.insertFront(value)
param_2 = obj.insertLast(value)
param_3 = obj.deleteFront()
param_4 = obj.deleteLast()
param_5 = obj.getFront()
param_6 = obj.getRear()
param_7 = obj.isEmpty()
param_8 = obj.isFull() | [
"[email protected]"
] | |
23353616677794c761d515939a0905ee2aeeda27 | 911fb895f2f0a22787b2dd88c6f2f4991feffd30 | /amp_monitor.py | f4298dedb43bcea6b773a1520e53694929588acb | [
"MIT"
] | permissive | chagar01/amp_monitor | 6d7e1d76fe09f0d9e246b52312c6ee9ffcdba92b | 576f54c5de7699818eec450ff2c93cde3a94eaa4 | refs/heads/master | 2020-04-15T01:03:32.713288 | 2019-01-06T13:23:13 | 2019-01-06T13:23:13 | 164,262,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,245 | py | #!/usr/bin/python -u
# MIT License
# Copyright (c) 2019 Charles Garcia-Tobin
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from time import sleep
import argparse
import re
import os
class AmpService:
Off = 0 # 0 sound not playing,
On = 1 # 1 sound playing,
Hysteresis = 2 # 2 hysteresis (sound now playing but waitng until completes before swiching GPIO off)
def __init__(self):
self.state = AmpService.Off
self.hyst_count = 0
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--monitor",
help = "specify which file to monitor to detect sound output",
default = "/proc/asound/card1/pcm0p/sub0/status")
parser.add_argument("--grepon",
help = "grep for string that should match when music is playing",
default = "RUNNING")
parser.add_argument("--grepoff",
help = "grep for string that should match when music is NOT playing",
default = "closed")
parser.add_argument("--pollinterval",
help = "milliseconds between polling intervals", type = int,
default = 2000)
parser.add_argument("--hysteresis",
help = "number of polling intervals to wait before turning off", type = int,
default = 30)
parser.add_argument("--pin",
help = "gpio PIN to which relay for amp is connected", type = int,
default = 16)
parser.add_argument("--inverted",
help = "is GPIO inverted?", dest='inverted', action='store_true')
parser.add_argument("--log",
help = "log output", dest='log', action='store_true')
self.args = parser.parse_args()
self.initGPIO()
def dogrep(self,fname, expression):
found = False
fh = open(fname, "r")
for line in fh:
if re.search(expression, line):
found = True
break
return found
def initGPIO(self):
# check if GPIO file already exists
dname = '/sys/class/gpio/gpio'+str(self.args.pin)
if not os.path.exists(dname):
os.system('echo '+str(self.args.pin)+' > /sys/class/gpio/export')
sleep(1)
# we should have GPIO now
# set as inverted if needed
if (self.args.inverted):
os.system('echo 1 > /sys/class/gpio/gpio'+str(self.args.pin)+'/active_low')
else:
os.system('echo 0 > /sys/class/gpio/gpio'+str(self.args.pin)+'/active_low')
# set as output
os.system('echo \'out\' > /sys/class/gpio/gpio'+str(self.args.pin)+'/direction')
# turn off
os.system('echo 0 > /sys/class/gpio/gpio'+str(self.args.pin)+'/value')
def switch(self,on):
if self.args.log: print "Switching: ",on
if on:
# turn on
os.system('echo 1 > /sys/class/gpio/gpio'+str(self.args.pin)+'/value')
else:
# turn off
os.system('echo 0 > /sys/class/gpio/gpio'+str(self.args.pin)+'/value')
def run(self):
while True:
sleep(float(self.args.pollinterval)/1000.0)
if self.state == AmpService.Off:
# check if sound is now playing if so transition to state 1 (ON)
if self.dogrep(self.args.monitor,self.args.grepon):
# sound is playing, traansition to ON
self.state = AmpService.On
self.switch(True) # swtch amp on
continue
if self.state == AmpService.On:
# check if sound has stopped playing, if so transition to state hysterisis state
# with count of 0
if self.dogrep(self.args.monitor,self.args.grepoff):
# sound has stopped playing
self.state = AmpService.Hysteresis
self.hyst_count = 0
continue
if self.state == AmpService.Hysteresis:
# check if sound is still not playing, if so increase hyst count, if that exceeds
# threshold then turn GPIO off
# if sound starts playing transition back to ON
if self.dogrep(self.args.monitor,self.args.grepoff):
# sound has stopped playing
self.hyst_count += 1
if self.hyst_count > self.args.hysteresis:
self.state = AmpService.Off
self.switch(False) # swtch amp off
elif self.dogrep(self.args.monitor,self.args.grepon):
self.state = AmpService.On
continue
if self.args.log: print 'state:', self.state, 'hyst_count:', self.hyst_count
if __name__ == '__main__':
amp = AmpService()
amp.run()
| [
"[email protected]"
] | |
90ceee4160324fdbc2cc4dcea35a10cf345c4bee | 7b4cf9df77a8f5f716dece430c9465b1de137a30 | /src/highlight/render_highlight.py | e525b49f548ec7e47164ab3b33fe3bd1a399e810 | [] | no_license | willcrichton/generative-grading | b440074e64c36a1fd982b4331e2d4ea36cbd57e4 | a36e0c91f778817f8b79d36a06e4b982b1f30245 | refs/heads/master | 2020-06-06T16:27:02.103847 | 2019-07-04T21:22:40 | 2019-07-04T21:22:40 | 192,791,584 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | import os
import pickle
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
from collections import Counter
from torch.utils.data import DataLoader
from src.datasets.student_programs import StudentPrograms
from src.rubricsampling.engineHighlight import InferenceNNHighlight
from src.rubricsampling.generatorUtils import fixWhitespace
from scripts.process_data import strip_comments
from src.utils.io_utils import load_json, save_json
from pprint import pprint
import re
def conv_sample(curr_node):
nonterminal = curr_node['name']
rvs = curr_node['rvs']
children = curr_node['children']
template = curr_node['template']
if not isinstance(children, list):
children = [children]
# print(curr_node)
# print()
format_data = dict()
rv_data = dict()
for ch in children:
ch_name, ch_render, ch_idxs = conv_sample(ch)
format_data[ch_name] = ch_render
rv_data[ch_name] = ch_idxs
ret_idxs = []
render = template
render = render.replace('{{', '{')
render = render.replace('}}', '}')
all_nonterms = re.findall('{.*?}', render)
for to_find in all_nonterms:
key = to_find[1:-1]
if to_find not in render:
continue
offset_idx = render.index(to_find)
end_idx = offset_idx + len(to_find)
render = render[:offset_idx] + format_data[key] + render[end_idx:]
for rv_set, (idx, n) in rv_data[key]:
ret_idxs.append((rv_set, (idx + offset_idx, n)))
if len(rvs) > 0:
ret_idxs.append((rvs, (0, len(render))))
return nonterminal, render, ret_idxs
def tagged_data(data):
ret = []
for d in tqdm(data):
p = d[0]
ret.append(conv_sample(d[1]))
return ret
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('data_file', type=str, help='which results to process')
args = arg_parser.parse_args()
data = load_json(args.data_file)
conv_data = tagged_data(data)
save_json(conv_data, os.path.join('output.json'))
| [
"[email protected]"
] | |
606f30c43a10b6fb70bfa1d7d069ba308d7d0c6b | db18bf62e535bfbb4cc3c28a52715ecfcf9e5a5f | /sessionapp/urls.py | 4d6d7bfa6412f1922f19d49f57a397ce7f858594 | [] | no_license | hakimks/djangofeatures | 41ff890a4b3353a9a9f7b7aaeabef0cf49dae346 | 06d2bb87616f2039f56e4b16bb7024f74f9517f1 | refs/heads/master | 2023-04-11T10:34:50.327747 | 2021-04-19T13:04:57 | 2021-04-19T13:04:57 | 356,165,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.sessfun, name='home'),
] | [
"[email protected]"
] | |
70f378d9edbde3a1d68cab919def134511998363 | fda5c716744d06d8e7d1199b89061e9f4a05a347 | /tictactoe.py | e9eef7ad2dd7f96642b585a27b51780ae40e8692 | [] | no_license | Akash-Raj-ST/TicTacToe | a0ea07a8782c223a986f8bb0d265d32c8bd67ec7 | 2a2dbc6433566c821ab5d0bac22770f9029005ff | refs/heads/main | 2023-01-02T10:03:42.442525 | 2020-10-25T08:23:42 | 2020-10-25T08:23:42 | 306,633,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | matrix=[' ',' ',' ',' ',' ',' ',' ',' ',' ']
def check(player): #Function for checking whether the player has won
l=0
m=2
k=0
p=6
cond=True
while l<=7 and cond:
t=s=r=q=0
for i in range(l,m): #horizontal check
if matrix[i]==matrix[i+1] and matrix[i]!=' ':
t+=1
else:
break
for i in range(k,p,3): #vertical check
if matrix[i]==matrix[i+3] and matrix[i]!=' ':
s+=1
else:
break
for i in range(0,5,4): #left diagonal check
if matrix[i]==matrix[i+4] and matrix[i]!=' ':
if i==4:
r=2
else:
break
for i in range(2,5,2): #right diagonal check
if matrix[i]==matrix[i+2] and matrix[i]!=' ':
if i==4:
q=2
else:
break
if t==2 or s==2 or r==2 or q==2:
break
p+=1
k+=1
l+=3
m+=3
if t==2 or s==2 or r==2 or q==2:
print(t,s,r,2)
print("PLAYER ***",player,"*** WON ! ! ! !")
return 1
def display(): #Function to display the moves
l=0
m=3
for i in range(3):
for i in matrix[l:m]:
print(i,end='|')
print()
l+=3
m+=3
num=[0,1,2,3,4,5,6,7,8]
def inputCheck(vari): #Function to check whether the input is valid
s=1
for i in num:
if i==vari:
s=0
if s==0:
return 0
else:
return 1
def tie(num): #Function to check draw
leng=len(num)
if leng==0:
print("***MATCH TIED!!!***")
return 1
print("1|2|3|")
print("4|5|6|")
print("7|8|9|")
while True:
x=int(input('PlayerX:'))
x=x-1
inputCheck1=inputCheck(x)
crct=True
while crct:
if x>8:
state="WARNING...The positions are from 1 to 9 ONLY..."
else:
state="Space already used"
if inputCheck1==1:
print(state)
x=int(input('playerX:'))
x=x-1
inputCheck1=inputCheck(x)
elif inputCheck1==0:
crct=False
num.remove(x)
tie1=tie(num)
if(tie1==1):
break
matrix[x]='x'
res=check('x')
display()
if res==1:
break
y=int(input('playerY:'))
y=y-1
inputCheck1=inputCheck(y)
crct=True
while crct:
if y>8:
state="WARNING...The positions are from 1 to 9 ONLY..."
else:
state="Space already used"
if inputCheck1==1:
print(state)
y=int(input('playerY:'))
y=y-1
inputCheck1=inputCheck(y)
elif inputCheck1==0:
crct=False
num.remove(y)
tie1=tie(num)
if(tie1==1):
break
matrix[y]='y'
res=check('y')
display()
if res==1:
break
| [
"[email protected]"
] | |
df2bc7eae0ec10fb183bbb65d1fa0b99044e52a4 | a2960cf4ba59a3ccfcb8deb4b46e3b55e17843a1 | /tasks/functions/brook.py | 5913b80d396d4b3ca9b131e98f4f67561496331e | [
"MIT"
] | permissive | cmz0228/backend | 4108869751d0ea03a6841c82cc123d116b79986a | 31a4fc7027a14147f971ca3d1097e957456daed3 | refs/heads/main | 2023-04-06T23:55:20.687821 | 2021-04-18T10:27:29 | 2021-04-18T10:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | from sqlalchemy.orm import Session
from app.db.models.port import Port
from app.db.models.port_forward import MethodEnum
from app.utils.dns import dns_query
from app.utils.ip import is_ip
from tasks.functions.base import AppConfig
class BrookConfig(AppConfig):
method = MethodEnum.BROOK
def __init__(self):
super().__init__()
self.app_name = "brook"
def apply(self, db: Session, port: Port):
self.local_port = port.num
self.app_command = self.get_app_command(db, port)
self.update_app = not port.server.config.get("brook")
self.applied = True
return self
def get_app_command(self, db: Session, port: Port):
command = port.forward_rule.config.get("command")
if port.forward_rule.config.get("remote_address"):
if not is_ip(port.forward_rule.config.get("remote_address")):
remote_ip = dns_query(port.forward_rule.config.get("remote_address"))
else:
remote_ip = port.forward_rule.config.get("remote_address")
port.forward_rule.config['remote_ip'] = remote_ip
db.add(port.forward_rule)
db.commit()
if command == "relay":
args = (
f"-f :{port.num} "
f"-t {remote_ip}:{port.forward_rule.config.get('remote_port')}"
)
elif command in ("server", "wsserver"):
args = f"-l :{port.num} -p {port.forward_rule.config.get('password')}"
elif command in ("client", "wsclient"):
args = (
f"--socks5 0.0.0.0:{port.num} "
f"-s {remote_ip}:{port.forward_rule.config.get('remote_port')} "
f"-p {port.forward_rule.config.get('password')}"
)
else:
args = port.forward_rule.config.get("args")
return f"/usr/local/bin/brook {command} {args}"
@property
def playbook(self):
return "app.yml"
| [
"[email protected]"
] | |
9f097dd603e1b81f46605e7d01b735e22b05818a | 9a6cc46801eaf24405bda6296a8a5444233a7b02 | /simu_config.py | e10ca449ab076dc44153869f56d53c1333e8e65b | [] | no_license | IIT-Lab/MP-MAB | 6610dbff2091731a37af1d00c355349a937d16e7 | 66e6f909f3e843b2e9ada4aef5e965a3f0dfb028 | refs/heads/master | 2021-05-24T18:19:48.893307 | 2020-03-31T07:27:22 | 2020-03-31T07:27:22 | 253,696,591 | 1 | 0 | null | 2020-04-07T05:34:20 | 2020-04-07T05:34:20 | null | UTF-8 | Python | false | false | 18,607 | py | # -*- coding: utf-8 -*-
"""
@author: Wenbo Wang
[Wang2020] Wenbo Wang, Amir Leshem, Dusit Niyato and Zhu Han, "Decentralized Learning for Channel
Allocation inIoT Networks over Unlicensed Bandwidth as aContextual Multi-player Multi-armed Bandit Game"
License:
This program is licensed under the GPLv2 license. If you in any way use this code for research
that results in publications, please cite our original article listed above.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
"""
# This file provides the configurations for each simulation.
__author__ = "Wenbo Wang"
import numpy as np
from envutils import Struct as Section
if __name__ == '__main__':
print("Warning: this script 'simu_config.py' is NOT executable..") # DEBUG
exit(0)
# (context-player): arm-vector: {lower bound} - {upper bound}
initial_data = [{("context 1", 0): np.array([0., 0.5, 0.3]), ("context 2", 0): np.array([0.1, 0.2, 0.2]), ("context 3", 0): np.array([0., 0.2, 0.25]),
("context 1", 1): np.array([0.1, 0.6, 0.2]), ("context 2", 1): np.array([0., 0., 0.]), ("context 3", 1): np.array([0.2, 0.1, 0.45])},
{("context 1", 0): np.array([0.5, 0.8, 0.6]), ("context 2", 0): np.array([1., 1., 0.4]), ("context 3", 0): np.array([1, 0.3, 0.65]),
("context 1", 1): np.array([0.81, 0.96, 0.52]), ("context 2", 1): np.array([0.5, 0.4, 0.9]), ("context 3", 1): np.array([0.62, 0.21, 0.95])}
]
initial_data_2 = [{("context 1", 0): np.array([0.0, 0.5, 0.3, 0.1]), ("context 2", 0): np.array([0.1, 0.2, 0.2, 0.5]), ("context 3", 0): np.array([0.0, 0.2, 0.25, 0.4]),
("context 1", 1): np.array([0.1 , 0.6 , 0.2 , 0.44]), ("context 2", 1): np.array([0.0, 0.0, 0.0, 0.2]), ("context 3", 1): np.array([0.2 , 0.1 , 0.45, 0.36]),
("context 1", 2): np.array([0.24, 0.11, 0.3 , 0.14]), ("context 2", 2): np.array([0.2, 0.0 , 0.1, 0.2]), ("context 3", 2): np.array([0.32, 0.21, 0.25, 0.59])},
{("context 1", 0): np.array([0.5, 0.8, 0.6, 0.7]), ("context 2", 0): np.array([1.0, 1.0, 0.4, 1.0]), ("context 3", 0): np.array([1.0, 0.3, 0.65, 0.9]),
("context 1", 1): np.array([0.81, 0.96, 0.52, 1.0 ]), ("context 2", 1): np.array([0.5, 0.4, 0.9, 0.6]), ("context 3", 1): np.array([0.62, 0.31, 0.95, 0.79]),
("context 1", 2): np.array([0.81, 0.78, 0.67, 1.0 ]), ("context 2", 2): np.array([0.3, 0.95, 0.9, 0.6]), ("context 3", 2): np.array([0.75, 0.63, 1.0 , 0.99]),}
]
###############################################################################
# Section 1:
# Hard-coded MAB environment for uniform/gaussian arms and unifrom context with
# 3-contexts, 2-plaers, 3-arms
###############################################################################
ENV_SCENARIO_1 = Section("2-player-3-context-3-unifroms-arm MAB: regret evolution")
ENV_SCENARIO_1.game_horizon = 200000
ENV_SCENARIO_1.env_config = {'horizon': ENV_SCENARIO_1.game_horizon,
'arm number': 3,
'player number': 2,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'uniform', # change the underlying distribution here
'initial data': initial_data
}
# Enable simulation for regret evolution with repetition
ENV_SCENARIO_1.enable_regret_simulation = True
ENV_SCENARIO_1.enable_reward_simulation = True
ENV_SCENARIO_1.alg_types = ['Static Hungarian', 'Musical Chairs', 'Trial and Error']
ENV_SCENARIO_1.alg_configs = [None, None, {"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2, "xi": 0.001,
"alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.35, "alpha22": 0.4,}]
# Experiment parameters
ENV_SCENARIO_1.T_start = 5000
ENV_SCENARIO_1.T_step = 20
ENV_SCENARIO_1.T_simu_rounds = 20
ENV_SCENARIO_1.flag_save_figure = True
ENV_SCENARIO_1.repeated_play_data_name = 'regret_data'
# Enable parallel processing
ENV_SCENARIO_1.flag_parallel = False
ENV_SCENARIO_1.flag_progress_bar = True
###############################################################################
# Section 1:
# Parallel version
###############################################################################
ENV_SCENARIO_1_PARALLEL = ENV_SCENARIO_1
ENV_SCENARIO_1_PARALLEL.flag_parallel = True
###############################################################################
# Section 2:
# Hard-coded MAB environment for uniform/gaussian arms and unifrom context with
# 3-contexts, 2-plaers, 3-arms
###############################################################################
ENV_SCENARIO_2 = Section("2-player-3-context-3-unifroms-arm MAB: reward evolution")
ENV_SCENARIO_2.game_horizon = 80000
ENV_SCENARIO_2.env_config = {'horizon': ENV_SCENARIO_2.game_horizon,
'arm number': 3,
'player number': 2,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'uniform', # change the underlying distribution here
'initial data': initial_data
}
# Disable simulation for reward evolution in a single shot
ENV_SCENARIO_2.enable_efficiency_simulation = True
ENV_SCENARIO_2.alg_types = ['Static Hungarian', 'Musical Chairs', 'Trial and Error']
ENV_SCENARIO_2.alg_configs = [None, None, {"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2, "xi": 0.001,
"alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.35, "alpha22": 0.4,}]
# Experiment parameters
ENV_SCENARIO_2.flag_save_figure = True
ENV_SCENARIO_2.save_data = True
# Enable parallel processing
ENV_SCENARIO_2.flag_parallel = False
ENV_SCENARIO_2.flag_progress_bar = True
###############################################################################
# Section 2:
# Parallel version
###############################################################################
ENV_SCENARIO_2_PARALLEL = ENV_SCENARIO_2
ENV_SCENARIO_2_PARALLEL.flag_parallel = True
###############################################################################
# Section 3:
# Hard-coded MAB environment for uniform/gaussian arms and unifrom context with
# 3-contexts, 2-plaers, 3-arms
###############################################################################
ENV_SCENARIO_3 = Section("2-player-3-context-3-unifroms-arm MAB: regret evolution")
ENV_SCENARIO_3.game_horizon = 200000
ENV_SCENARIO_3.env_config = {'horizon': ENV_SCENARIO_3.game_horizon,
'arm number': 3,
'player number': 2,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'uniform', # change the underlying distribution here
'initial data': initial_data
}
# Enable simulation for regret evolution with repetition
ENV_SCENARIO_3.enable_regret_simulation = True
ENV_SCENARIO_3.alg_types = ['Static Hungarian', 'Musical Chairs', 'Trial and Error', 'Game of Thrones']
ENV_SCENARIO_3.alg_configs = [None, None, {"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2, "xi": 0.001,
"alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.35, "alpha22": 0.4,},
{"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2}]
# Experiment parameters
ENV_SCENARIO_3.flag_save_figure = True
ENV_SCENARIO_3.save_data = True
# Experiment parameters
ENV_SCENARIO_3.T_start = 5000
ENV_SCENARIO_3.T_step = 20
ENV_SCENARIO_3.T_simu_rounds = 200
ENV_SCENARIO_3.repeated_play_data_name = 'regret_data_3_alg'
# Enable parallel processing
ENV_SCENARIO_3.flag_parallel = False
ENV_SCENARIO_3.flag_progress_bar = True
###############################################################################
# Section 3:
# Parallel version
###############################################################################
ENV_SCENARIO_3_PARALLEL = ENV_SCENARIO_3
ENV_SCENARIO_3_PARALLEL.flag_parallel = True
###############################################################################
# Section 4:
# Hard-coded MAB environment for uniform/gaussian arms and unifrom context with
# 3-contexts, 2-plaers, 3-arms, test of parallel simulation
# for a single round of this 4-algorithm example, multiprocessing accelerates by
# about 1/3
###############################################################################
ENV_SCENARIO_4 = Section("2-player-3-context-3-unifroms-arm MAB: reward evolution")
ENV_SCENARIO_4.game_horizon = 200000
ENV_SCENARIO_4.env_config = {'horizon': ENV_SCENARIO_4.game_horizon,
'arm number': 3,
'player number': 2,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'uniform', # change the underlying distribution here
'initial data': initial_data
}
# Disable simulation for reward evolution in a single shot
ENV_SCENARIO_4.enable_efficiency_simulation = True
ENV_SCENARIO_4.alg_types = ['Static Hungarian', 'Musical Chairs', 'Trial and Error', 'Game of Thrones']
ENV_SCENARIO_4.alg_configs = [None, None, {"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2, "xi": 0.001,
"alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.35, "alpha22": 0.4,},
{"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2}]
# Experiment parameters
ENV_SCENARIO_4.flag_save_figure = True
ENV_SCENARIO_4.save_data = False
# Experiment parameters
ENV_SCENARIO_4.T_start = 5000
ENV_SCENARIO_4.T_step = 20
ENV_SCENARIO_4.T_simu_rounds = 200
ENV_SCENARIO_4.repeated_play_data_name = 'regret_data_3_alg'
# Enable parallel processing
ENV_SCENARIO_4.flag_parallel = False
ENV_SCENARIO_4.flag_progress_bar = True
###############################################################################
# Section 4:
# Parallel version
###############################################################################
ENV_SCENARIO_4_PARALLEL = ENV_SCENARIO_4
ENV_SCENARIO_4_PARALLEL.flag_parallel = True
###############################################################################
# Section 5:
# MAB environment in HetNet, with 12 random arms/channel and 10 randomly placed
# users, 3 contexts (MUE transmission in the underlying macro cells)
# for a single round of this 4-algorithm example, multiprocessing is to be implemented
###############################################################################
ENV_SCENARIO_5 = Section("10-UE-10-Channel HetNet: regret evolution")
ENV_SCENARIO_5.game_horizon = 80000
ENV_SCENARIO_5.env_config = {'horizon': ENV_SCENARIO_5.game_horizon,
'arm number': 12,
'player number': 10,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'HetNet simulator', # change the underlying distribution here
'enabel mmWave': True,
'cell range': 200,
'context_prob': {'context 1': 1, 'context 2': 1, 'context 3': 1},
'los_prob': {'context 1': 1, 'context 2': 1, 'context 3': 1}
}
# Disable simulation for reward evolution in a single shot
ENV_SCENARIO_5.enable_efficiency_simulation = True
ENV_SCENARIO_5.alg_types = ['Musical Chairs', 'Trial and Error', 'Game of Thrones']
ENV_SCENARIO_5.alg_configs = [None, {"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2, "xi": 0.001,
"alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.39, "alpha22": 0.4,},
{"c1": 100, "c2": 200,"c3":100, "epsilon": 0.01, "delta": 2}]
# Experiment parameters
ENV_SCENARIO_5.flag_save_figure = True
ENV_SCENARIO_5.save_data = False
# Experiment parameters
ENV_SCENARIO_5.T_start = 5000
ENV_SCENARIO_5.T_step = 20
ENV_SCENARIO_5.T_simu_rounds = 200
ENV_SCENARIO_5.repeated_play_data_name = 'regret_data_3_alg'
# Enable parallel processing
ENV_SCENARIO_5.flag_parallel = False
ENV_SCENARIO_5.flag_progress_bar = True
###############################################################################
# Section 5:
# Parallel version
###############################################################################
ENV_SCENARIO_5_PARALLEL = ENV_SCENARIO_5
ENV_SCENARIO_5_PARALLEL.flag_parallel = True
###############################################################################
# Section 6:
# MAB environment in HetNet, with 12 random arms/channel and 10 randomly placed
# users, 3 contexts (MUE transmission in the underlying macro cells)
###############################################################################
ENV_SCENARIO_6 = Section("10-UE-12-Channel HetNet: reward evolution")
ENV_SCENARIO_6.game_horizon = 200000
ENV_SCENARIO_6.env_config = {'horizon': ENV_SCENARIO_6.game_horizon,
'arm number': 12,
'player number': 10,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'HetNet simulator', # change the underlying distribution here
'enabel mmWave': True,
'cell range': 250,
'context_prob': {'context 1': 2, 'context 2': 1, 'context 3': 1},
'los_prob': {'context 1': 1.5, 'context 2': 2, 'context 3': 1}
}
# Disable simulation for reward evolution in a single shot
ENV_SCENARIO_6.enable_efficiency_simulation = False
ENV_SCENARIO_6.enable_regret_simulation = False
ENV_SCENARIO_6.enable_reward_simulation = True
ENV_SCENARIO_6.enable_switching_simulation = True
ENV_SCENARIO_6.alg_types = ['Musical Chairs', 'SOC', 'Trial and Error', 'Game of Thrones'] #,
ENV_SCENARIO_6.alg_configs = [None,
{"delta": 0.02, "exploration_time": 10000},
{"c1": 1000, "c2": 3000,"c3":3000, "epsilon": 0.01, "delta": 1.5, "xi": 0.001,
"alpha11": -0.04, "alpha12": 0.05, "alpha21": -0.035, "alpha22": 0.04, "observable": 1},
{"c1": 1000, "c2": 3000,"c3":3000, "epsilon": 0.01, "delta": 1.5},
]
# Experiment parameters
ENV_SCENARIO_6.flag_save_figure = True
ENV_SCENARIO_6.save_data = False
# Experiment parameters
ENV_SCENARIO_6.T_start = 40000
ENV_SCENARIO_6.T_step = 12
ENV_SCENARIO_6.T_simu_rounds = 200
ENV_SCENARIO_6.repeated_play_data_name = 'reward_data_4_alg_HetNet'
# Enable parallel processing
ENV_SCENARIO_6.flag_parallel = False
ENV_SCENARIO_6.flag_progress_bar = True
###############################################################################
# Section 6:
# Parallel version
###############################################################################
ENV_SCENARIO_6_PARALLEL = ENV_SCENARIO_6
ENV_SCENARIO_6_PARALLEL.flag_parallel = True
###############################################################################
# Section 7:
# Hard-coded MAB environment for uniform/gaussian arms and unifrom context with
# 3-contexts, 2-plaers, 3-arms
###############################################################################
ENV_SCENARIO_7 = Section("3-context-3-player-4-unifroms-arm MAB: reward evolution")
ENV_SCENARIO_7.game_horizon = 100000
ENV_SCENARIO_7.env_config = {'horizon': ENV_SCENARIO_7.game_horizon,
'arm number': 4,
'player number': 3,
'context set': {"context 1", "context 2", "context 3"},#
'env_type': 'uniform', # change the underlying distribution here
'initial data': initial_data_2
}
# add algorithms
ENV_SCENARIO_7.alg_types = ['Musical Chairs', 'SOC', 'Game of Thrones', 'Trial and Error'] #, , 'TnE Nonobservable'
ENV_SCENARIO_7.alg_configs = [None,
{"delta": 0.02, "exploration_time": 10000},
{"c1": 500, "c2": 1000,"c3":1000, "epsilon": 0.01, "delta": 1.5},
{"c1": 500, "c2": 1000,"c3":1000, "epsilon": 0.01, "delta": 1.5, "xi": 0.001,
"alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.35, "alpha22": 0.4},
# {"c1": 300, "c2": 1000,"c3":1000, "epsilon": 0.01, "delta": 1.5, "xi": 0.001,
# "alpha11": -0.12, "alpha12": 0.15, "alpha21": -0.35, "alpha22": 0.4, "observable": 0}
]
# Disable simulation for reward evolution in a single shot
ENV_SCENARIO_7.enable_efficiency_simulation = False
ENV_SCENARIO_7.enable_regret_simulation = False
ENV_SCENARIO_7.enable_reward_simulation = True
ENV_SCENARIO_7.enable_switching_simulation = True
# Experiment parameters
ENV_SCENARIO_7.T_start = 20000
ENV_SCENARIO_7.T_step = 10
ENV_SCENARIO_7.T_simu_rounds = 20
ENV_SCENARIO_7.repeated_play_data_name = 'congfig_7_5_algs_uniform'
# Experiment parameters
ENV_SCENARIO_7.flag_save_figure = True
ENV_SCENARIO_7.save_data = False
# Enable parallel processing
ENV_SCENARIO_7.flag_parallel = True
ENV_SCENARIO_7.flag_progress_bar = True
###############################################################################
# All configurations are stored in the following dictionary:
###############################################################################
CONFIGURATION_DICT = {1: ENV_SCENARIO_1,
2: ENV_SCENARIO_2,
3: ENV_SCENARIO_3,
4: ENV_SCENARIO_4,
5: ENV_SCENARIO_5,
6: ENV_SCENARIO_6,
7: ENV_SCENARIO_1_PARALLEL,
8: ENV_SCENARIO_2_PARALLEL,
9: ENV_SCENARIO_3_PARALLEL,
10: ENV_SCENARIO_4_PARALLEL,
11: ENV_SCENARIO_5_PARALLEL,
12: ENV_SCENARIO_6_PARALLEL,
13: ENV_SCENARIO_7
}
| [
"[email protected]"
] | |
ec1adb887dcd401acb84fc8e2a4216578d019f4a | 6e170c8ae2eeb206c6e69a025206c29b2c5fa1db | /tools/cpplint.py | 8d30ed603bd84596b47807d9e63ce70a7bc6f7f7 | [
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | fossabot/arctic | d7276ab41fbb67559ad7adf62639005ffec5501e | f3f6e1051b7209020cdaec69ad1f1edbd1acb522 | refs/heads/master | 2021-04-03T05:11:46.728128 | 2018-03-08T20:22:58 | 2018-03-08T20:22:58 | 124,443,896 | 0 | 0 | MIT | 2018-03-08T20:22:57 | 2018-03-08T20:22:57 | null | UTF-8 | Python | true | false | 237,326 | py | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits] [--headers=x,y,...]
<file> [file] ...
The style guidelines this tries to follow are those in
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
headers=x,y,...
The header extensions that cpplint will treat as .h in checks. Values are
automatically added to --extensions list.
Examples:
--headers=hpp,hxx
--headers=hpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
root=subdir
headers=x,y,...
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
The "root" option is similar in function to the --root flag (see example
above).
The "headers" option is similar in function to the --headers flag
(see example above).
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/c++14',
'build/c++tr1',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'scoped_allocator',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Type names
_TYPES = re.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
_TEST_FILE_SUFFIX = r'(_test|_unittest|_regtest)$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE', 'ASSERT_TRUE',
'EXPECT_FALSE', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
# This is set by --headers flag.
_hpp_headers = set(['h'])
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ProcessHppHeadersOption(val):
global _hpp_headers
try:
_hpp_headers = set(val.split(','))
# Automatically append to extensions list so it does not have to be set 2 times
_valid_extensions.update(_hpp_headers)
except ValueError:
PrintUsage('Header extensions must be comma seperated list.')
def IsHeaderExtension(file_extension):
return file_extension in _hpp_headers
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
return s in ('c', 'cc', 'cpp', 'cxx')
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stdout.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = current_dir = os.path.dirname(fullname)
while current_dir != os.path.dirname(current_dir):
if (os.path.exists(os.path.join(current_dir, ".git")) or
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
suffix = os.sep
# On Windows using directory separator will leave us with
# "bogus escape error" unless we properly escape regex.
if suffix == '\\':
suffix += '\\'
file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root)
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a .cc file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
if not os.path.exists(headerfile):
return
headername = FileInfo(headerfile).RepositoryName()
first_include = 0
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername))
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
line = clean_lines.lines_without_raw_strings[linenum]
if Match(r'\s*#\s*endif\s*([^/\s]|/[^/]|$)', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
body = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(body)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif (include.endswith('.cc') and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .cc files from other packages')
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if IsHeaderExtension(file_extension):
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (IsHeaderExtension(file_extension)
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo = FileInfo(filename_cc)
if not fileinfo.IsSource():
return (False, '')
filename_cc = filename_cc[:-len(fileinfo.Extension())]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if IsHeaderExtension(file_extension):
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
elif name == 'root':
global _root
_root = val
elif name == 'headers':
ProcessHppHeadersOption(val)
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stdout.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions=',
'headers='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--headers':
ProcessHppHeadersOption(val)
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
52c9c6e5b3ad7d0bb3ab21c3c6abaf8ec655ea91 | 7d69597350c12b21667f6836fc39924060de8a65 | /ekoparty2016/for50-certified-excel-hacker/false_solve.py | 34139c9f1897a9b8f5f27eecd11ac6cf38cd0d0f | [] | no_license | write-ups/public-writeup | 1ae1a539a4897b7be16341440cd679b1326ccf75 | 6bf2099e6df9b44c6674e84dbc728b9668a9883f | refs/heads/master | 2020-06-04T10:50:31.632486 | 2019-03-06T19:06:34 | 2019-03-06T19:06:34 | 191,990,646 | 0 | 1 | null | 2019-06-14T18:42:05 | 2019-06-14T18:42:04 | null | UTF-8 | Python | false | false | 312 | py | #!/usr/bin/env python
import base64
import sha
answer = ""
# VBA code runs 1 to 16777216 but VBA loops include the 16777216
for i in range(1,16777217):
answer = base64.b64encode(sha.new(answer).digest())
print "EKO{" + answer.replace("=", "") + "}"
# Result is EKO{DCEUslnl7DeiLWSdCLi0l1fxdc8} | [
"[email protected]"
] | |
177385e73c4b35884e8ce4f1c6cac71bba121d38 | f9f379aaacff1c9590ee5d9bbf1ce61cfb35762d | /env/bin/isort-identify-imports | 9ef0457f9d645713b9ba6d858de4e5f6809834e1 | [] | no_license | RedSolutionsNYC/backend_samtrak | b420b81f7c9089cfe288206f4aaa9eacca8ba30d | f0cda2f649c20253a34298ba334bcc06971da3f2 | refs/heads/master | 2023-02-23T20:48:00.690570 | 2021-01-24T23:58:03 | 2021-01-24T23:58:03 | 332,518,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | #!/home/jeanhsuarez/Public/sample-tracker/backend_samtrak/env/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import identify_imports_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(identify_imports_main())
| [
"devyoyong@gmailcom"
] | devyoyong@gmailcom |
|
0f608d94c8cec41e78a6415cb81157505e3dbe05 | ea58a62bd6fe205aeeeddab41c1587c8ca9e6798 | /exl4.py | 0d7e656ea2385f081cdd704ea8e53e6038e75065 | [] | no_license | jasonwenlee/Learn-Python-The-Hard-Way-Exercises-Jason | 7b42a7c64a7189ab432909f9dc806d92a4ce462b | c918808810143f0a9e79861c07b4435f726f489b | refs/heads/master | 2021-09-07T21:49:13.722715 | 2018-03-01T14:39:36 | 2018-03-01T14:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | print "Exercise 4: Variables And Names"
print #
# Variable cars is straight away defined as an integer with a value of 100.
# There are 100 cars.
cars = 100
# Variable space_in_a_car is straight away defined as an integer with a value of 4.
# There is only enough space for 4 passengers.
space_in_a_car = 4
# Variable drivers is straight away defined as an integer with a value of 30.
# There are a total 30 drivers.
drivers = 30.0
# Variable passengers is straight away defined as an integer with a value of 30.
# There are a total of 30 passengers
passengers = 30.0
# Determine the number of cars not driven.
cars_not_driven = cars - drivers
# The numbe of cars driven is equal to the number of drivers.
cars_driven = drivers
# Determine the total amount of carpool capacity.
carpool_capacity = cars_driven * space_in_a_car
# Determine the average number of passengers in a car
average_passengers_per_car = passengers / cars_driven
print "There are", cars, "cars available."
print "There are only", drivers, "drivers available."
print "There will be", cars_not_driven, "empty cars today."
print "We can transport", carpool_capacity, "people today."
print "We have", passengers, "to carpool today."
print "We need to put about", average_passengers_per_car
| [
"[email protected]"
] | |
2569015e68203b5145927d0ffd3d14a85516b999 | 6f2af0e2296302b8f901e4a4f93b5d3034460e7a | /lib/screen.py | 1f824e58e193ecfb0f3e11fc283b6952ee88a2de | [] | no_license | verdog/autopico8 | 67ecd0ec8d53148439b32f70993fe6dbbcc5fefb | b64fa444bee02219723cbb394a325bf1740ee6b0 | refs/heads/master | 2022-12-19T08:50:06.235699 | 2020-10-04T23:36:58 | 2020-10-04T23:36:58 | 300,954,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | import cv2
import numpy as np
from Xlib import display, X
from PIL import ImageGrab, Image
# these are in BGR format
COLOR_0 = (0, 0, 0)
COLOR_1 = (83, 43, 29)
COLOR_2 = (83, 37, 126)
COLOR_3 = (81, 135, 0)
COLOR_4 = (54, 82, 171)
COLOR_5 = (79, 87, 95)
COLOR_6 = (199, 195, 194)
COLOR_7 = (232, 241, 255)
COLOR_8 = (77, 0, 255)
COLOR_9 = (0, 163, 255)
COLOR_A = (39, 236, 255)
COLOR_B = (54, 228, 0)
COLOR_C = (255, 173, 41)
COLOR_D = (156, 118, 131)
COLOR_E = (168, 119, 255)
COLOR_F = (170, 204, 255)
# find the pico8 x window
def find_pico8():
print("Trying to find pico8...")
dsp = display.Display()
q = []
q.append(dsp.screen().root)
while len(q) != 0:
tree = q.pop(0)
for node in tree.query_tree().children:
name = node.get_wm_name()
if name and "PICO-8" in name:
print(f"Found it: {node.id}")
return node
q.append(node)
print("Couldn't find it...")
return None
# return a cv2 image of the screen of a given window
def get_screen(window):
geo = window.get_geometry()
w = geo.width
h = geo.height
raw = window.get_image(0, 0, w, h, X.ZPixmap, 0xffffffff)
image = Image.frombytes("RGB", (w, h), raw.data, "raw", "BGRX")
array = np.array(image)
return cv2.cvtColor(array, cv2.COLOR_RGB2BGR)
# reduce image to one more easily processable
def filter(image):
w = image.shape[1]
h = image.shape[0]
smallest = min(w, h)
scale = smallest//128
sidesize = scale * 128
x = (w - sidesize) // 2
y = (h - sidesize) // 2
cropped = image[y:y+sidesize, x:x+sidesize]
return cv2.resize(cropped, (128, 128), None, 0, 0, cv2.INTER_NEAREST)
| [
"[email protected]"
] | |
df9652cb12686bb67561a0ad9b96edd691e90972 | 2009473f0d95a9a92b1a4a9c93532cac35b4de42 | /cloudbaseinit/plugins/common/mtu.py | b987b130c6285728951fddaabc61faca5c69fffb | [
"Apache-2.0"
] | permissive | rrbarreto/pfsense-cloudinit | 3601e82a5c2e8ff4fa15751f7ed279f084cfbbb4 | d8575ff3c1114b72999002024a876abeddca0d25 | refs/heads/master | 2020-05-01T01:30:29.087025 | 2019-03-29T20:16:33 | 2019-03-29T20:16:33 | 177,197,282 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
from oslo_config import cfg
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins.common import base
from cloudbaseinit.utils import dhcp
opts = [
cfg.BoolOpt('mtu_use_dhcp_config', default=True,
help='Configures the network interfaces MTU based on the '
'values provided via DHCP'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
class MTUPlugin(base.BasePlugin):
def execute(self, service, shared_data):
if CONF.mtu_use_dhcp_config:
osutils = osutils_factory.get_os_utils()
dhcp_hosts = osutils.get_dhcp_hosts_in_use()
for (mac_address, dhcp_host) in dhcp_hosts:
options_data = dhcp.get_dhcp_options(dhcp_host,
[dhcp.OPTION_MTU])
if options_data:
mtu_option_data = options_data.get(dhcp.OPTION_MTU)
if mtu_option_data:
mtu = struct.unpack('!H', mtu_option_data)[0]
osutils.set_network_adapter_mtu(mac_address, mtu)
else:
LOG.debug('Could not obtain the MTU configuration '
'via DHCP for interface "%s"' % mac_address)
return (base.PLUGIN_EXECUTE_ON_NEXT_BOOT, False)
| [
"[email protected]"
] | |
e466d3634b6da0d12ef3d2b7c06dc8e276d0f81a | c63fe277b5d455b7cd1e671d45302cfca1cbb17b | /student.py | 32836999be629f642bced045086e656f816f665c | [] | no_license | sauravsingh2/saurav-sayantan | cdf8ea42db63bf20331ba673277b085e75a0da5b | e7ba34ec5aa20917c2c04065eda23110a1d08043 | refs/heads/main | 2023-03-15T06:48:32.352143 | 2021-03-08T10:49:29 | 2021-03-08T10:49:29 | 345,589,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | class Student:
def __init__(self, name, Id, percentage = 0, skills = []):
self.name = name
self.Id = Id
self.percentage = percentage
self.skills = skills
def get_name(self):
return self.name
def get_Id(self):
return self.Id
def get_percentage(self):
return self.percentage
def get_skills(self):
return self.skills
def set_name(self, name):
self.name = name
def set_percentage(self, perct):
self.percentage = perct
def set_skill(self, skillsnew):
self.skills = skillsnew
class Operation (Student):
def __init__(self):
Student.__init__(self,"saurav",1,10,["java"])
def changeName(self, newName):
self.set_name(newName)
def changeNameUserInput(self):
newName = input("Enter your new Name")
self.set_name(newName)
def addSkills(self,newSkills):
newarr = newSkills+ self.skills
self.set_skill(newarr)
| [
"[email protected]"
] | |
09b78b4d7e1feed889feb876a2a3b1bb69dbcd3b | c48a87f449e20e23ef995a3de5f23dd8abc718ed | /face_rec_api/app.py | c6bc98ce0a269ec701af307a94546679f67a49fb | [
"MIT"
] | permissive | saranyasajeev/f | dd328a65c047efa6e692ceda511ed08e0168a81d | 857b90b362e1f55551e0be222c1f35826d0d0d72 | refs/heads/main | 2023-06-14T04:17:24.021507 | 2021-07-14T10:46:10 | 2021-07-14T10:46:10 | 385,862,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,246 | py | from flask import Flask, request, redirect, jsonify
from werkzeug.utils import secure_filename
import os
import json
from face_util import compare_faces, face_rec, find_facial_features, find_face_locations
import re
import base64
from PIL import Image
from io import BytesIO
import requests
app = Flask(__name__)
UPLOAD_FOLDER = 'received_files'
ALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg']
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def print_request(request):
# Print request url
print(request.url)
# print relative headers
print('content-type: "%s"' % request.headers.get('content-type'))
print('content-length: %s' % request.headers.get('content-length'))
# print body content
if request.is_json:
json_data = request.get_json(cache=True)
# replace image_data with '<image base64 data>'
if json_data.get('image_data', None) is not None:
json_data['image_data'] = '<image base64 data>'
else:
print('request image_data is None.')
print(json.dumps(json_data,indent=4))
else: # form data
body_data=request.get_data()
# replace image raw data with string '<image raw data>'
body_sub_image_data=re.sub(b'(\r\n\r\n)(.*?)(\r\n--)',br'\1<image raw data>\3', body_data,flags=re.DOTALL)
print(body_sub_image_data.decode('utf-8'))
# print(body_data[0:500] + b'...' + body_data[-500:]) # raw binary
if request.method == 'POST':
# Print request url, headers and content
print_request(request)
# JSON data format
if request.is_json:
""" Sample data
{'file_format':'jpg', 'image_data': <base64 ascii string>}
"""
# print('Request is a JSON format.')
json_data = request.get_json(cache=False)
file_format = json_data.get('file_format', None)
image_data = json_data.get('image_data', None)
if file_format not in ALLOWED_EXTENSIONS or image_data is None:
return '{"error":"Invalid JSON."}'
file = os.path.join(UPLOAD_FOLDER, 'image.' + file_format)
with open(file,'wb') as f:
# Note: Convert ascii string to binary string first, e.g. 'abc' to b'abc', before decode as base64 string.
f.write(base64.b64decode(image_data.encode('ascii')))
# form data format
else:
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files.get('file')
# if user does not select file, browser also submit an empty part without filename
if file.filename == '':
print('No selected file')
return redirect(request.url)
if not allowed_file(file.filename):
return '{"error":"Invalid image file format."}'
# Process image file
# Note file could be a filename or a file object.
name = face_rec(file)
resp_data = {'name': name }
# get parameters from url if any.
# facial_features parameter:
param_features = request.args.get('facial_features', '')
if param_features.lower() == 'true':
facial_features = find_facial_features(file)
# append facial_features to resp_data
resp_data.update({'facial_features': facial_features})
# face_locations parameter:
param_locations = request.args.get('face_locations', '')
if param_locations.lower() == 'true':
face_locations = find_face_locations(file)
resp_data.update({'face_locations': face_locations})
return json.dumps(resp_data)
return '''
<!doctype html>
<title>Face Recognition</title>
<h1>Upload an image</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/face_match', methods=['POST'])
def face_match():
if request.method == 'POST':
# check if the post request has the file part
# if ('file1' not in request.files) or ('file2' not in request.files):
# print('No file part')
# return redirect(request.url)
data= request.get_json()
license = data['license']
selfie = data['selfie']
licenseIm = Image.open(BytesIO(requests.get(license).content)).convert("RGB")
selfieIm = Image.open(BytesIO(requests.get(selfie).content)).convert("RGB")
#licenseIm.save('im1.jpg')
#selfieIm.save('im2.jpg')
#im1 = open('im1.jpg','rb')
#im2 = open('im2.jpg','rb')
ret= compare_faces(licenseIm,selfieIm)
resp_data = {"match": bool(ret)} # convert ret (numpy._bool) to bool for json.dumps
return jsonify(resp_data)
# file1 = request.files.get('file1')
# file2 = request.files.get('file2')
# if user does not select file, browser also submit an empty part without filename
# if licenseIm.filename == '' or selfieIm.filename == '':
# print('No selected file')
# return redirect(request.url)
if allowed_file(licenseIm.filename) and allowed_file(selfieIm.filename):
#file1.save( os.path.join(UPLOAD_FOLDER, secure_filename(file1.filename)) )
#file2.save( os.path.join(UPLOAD_FOLDER, secure_filename(file2.filename)) )
ret = compare_faces(licenseIm, selfieIm)
resp_data = {"match": bool(ret)} # convert ret (numpy._bool) to bool for json.dumps
return jsonify(resp_data)
# Return a demo page for GET request
@app.route('/')
def hello_world():
return 'Hello, World!'
# Run in HTTP
# When debug = True, code is reloaded on the fly while saved
port = int(os.getenv('PORT'))
app.run(host='0.0.0.0', port=port, debug=True)
| [
"[email protected]"
] | |
d4f046e7d15eb9d33ad2b2ffa1a275a27af4d56d | 7f6315bf39bd3b27b62b91eb293292088e8199a8 | /getBeauty/middlewares.py | df752ea653a115fd06a3866cd4ef4bf6fea08b2f | [] | no_license | Tarantiner/getBeauty-Master | 3c6eb2f840562b9b5a3561106a4bfa3f4221cdb2 | e6328fb5e3e7a77e9029a4dcefb9dc60da05de55 | refs/heads/master | 2022-12-14T03:56:44.511823 | 2019-03-21T12:28:19 | 2019-03-21T12:28:19 | 176,923,616 | 0 | 0 | null | 2022-12-08T02:30:38 | 2019-03-21T10:36:57 | Python | UTF-8 | Python | false | false | 4,873 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import json
import random
from scrapy import signals
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
from scrapy.contrib.downloadermiddleware.retry import RetryMiddleware
# class MyRetryMiddleWare(RetryMiddleware):
#
# def process_response(self, request, response, spider):
# print('#'*100)
# print(response.status)
# print('#' * 100)
# return response
class MyUserAgentMiddleWare(UserAgentMiddleware):
# provide user-agent for each request
@staticmethod
def get_ua():
return json.load(open('getBeauty/service/UserAgent.json', 'r', encoding='utf-8'))
def process_request(self, request, spider):
# fetch a random user-agent from existing user-agent list
ua_lis = self.get_ua()
ua = random.choice(ua_lis)
request.headers.update({'User-Agent': ua, 'Referer': 'http://fun1shot.com/list/?id=7'})
return None
class MyProxyMiddleWare(object):
@staticmethod
def get_proxy():
return json.load(open('getBeauty/service/proxy.json', 'r', encoding='utf-8'))
def process_request(self, request, spider):
proxy_lis = self.get_proxy()
proxy = 'http://' + random.choice(proxy_lis)
request.meta['proxy'] = proxy
return None
class GetbeautySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class GetbeautyDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
a9b12f2d7b79598884ea8efef4330b45ade550ad | 10b5c8090412758cac1b00a450249f31843b243c | /mz_ws/devel/lib/python2.7/dist-packages/amcl/cfg/AMCLConfig.py | 54babc11e9763b3d517cd8d5fa746e312df57d46 | [] | no_license | jyr735112797/MeiZhongRobot | 1499c29ca6de786023c8cb0a23ce6cf5da857681 | bdba37818c1b37eb269cac8142fdc8590b235e8f | refs/heads/master | 2020-12-08T05:50:44.356523 | 2020-01-09T20:55:17 | 2020-01-09T20:55:17 | 232,903,373 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,806 | py | ## *********************************************************
##
## File autogenerated for the amcl package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Minimum allowed number of particles.', 'max': 1000, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_particles', 'edit_method': '', 'default': 100, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Mamimum allowed number of particles.', 'max': 10000, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_particles', 'edit_method': '', 'default': 5000, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Maximum error between the true distribution and the estimated distribution.', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'kld_err', 'edit_method': '', 'default': 0.01, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Upper standard normal quantile for (1 - p), where p is the probability that the error on the estimated distrubition will be less than kld_err.', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'kld_z', 'edit_method': '', 'default': 0.99, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Translational movement required before performing a filter update.', 'max': 5.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'update_min_d', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Rotational movement required before performing a filter update.', 'max': 6.283185307179586, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'update_min_a', 'edit_method': '', 'default': 0.5235987755982988, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Number of filter updates required before resampling.', 'max': 20, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'resample_interval', 'edit_method': '', 'default': 2, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Time with which to post-date the transform that is published, to indicate that this transform is valid into the future.', 'max': 2.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'transform_tolerance', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Exponential decay rate for the slow average weight filter, used in deciding when to recover by adding random poses. A good value might be 0.001.', 'max': 0.5, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'recovery_alpha_slow', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Exponential decay rate for the fast average weight filter, used in deciding when to recover by adding random poses. A good value might be 0.1.', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'recovery_alpha_fast', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'When true skips laser scans when a scan doesnt work for a majority of particles', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'do_beamskip', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Distance from a valid map point before scan is considered invalid', 'max': 2.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'beam_skip_distance', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Ratio of samples for which the scans are valid to consider as valid scan', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'beam_skip_threshold', 'edit_method': '', 'default': 0.3, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'When true (the default), publish results via TF. When false, do not.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'tf_broadcast', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Maximum rate (Hz) at which scans and paths are published for visualization, -1.0 to disable.', 'max': 100.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'gui_publish_rate', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 290, 'description': 'Maximum rate (Hz) at which to store the last estimated pose and covariance to the parameter server, in the variables ~initial_pose_* and ~initial_cov_*. This saved pose will be used on subsequent runs to initialize the filter. -1.0 to disable.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'save_pose_rate', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 290, 'description': 'When set to true, AMCL will subscribe to the map topic rather than making a service call to receive its map.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'use_map_topic', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'When set to true, AMCL will only use the first map it subscribes to, rather than updating each time a new one is received.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'first_map_only', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': "Minimum scan range to be considered; -1.0 will cause the laser's reported minimum range to be used.", 'max': 1000.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_min_range', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 290, 'description': "Maximum scan range to be considered; -1.0 will cause the laser's reported maximum range to be used.", 'max': 1000.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_max_range', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 290, 'description': 'How many evenly-spaced beams in each scan to be used when updating the filter.', 'max': 250, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_max_beams', 'edit_method': '', 'default': 30, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Mixture weight for the z_hit part of the model.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_z_hit', 'edit_method': '', 'default': 0.95, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Mixture weight for the z_short part of the model.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_z_short', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Mixture weight for the z_max part of the model.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_z_max', 'edit_method': '', 'default': 0.05, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Mixture weight for the z_rand part of the model.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_z_rand', 'edit_method': '', 'default': 0.05, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Standard deviation for Gaussian model used in z_hit part of the model.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_sigma_hit', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Exponential decay parameter for z_short part of model.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_lambda_short', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Maximum distance to do obstacle inflation on map, for use in likelihood_field model.', 'max': 20.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_likelihood_max_dist', 'edit_method': '', 'default': 2.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Which model to use, either beam, likelihood_field or likelihood_field_prob.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'laser_model_type', 'edit_method': "{'enum_description': 'Laser Models', 'enum': [{'srcline': 52, 'description': 'Use beam laser model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'beam', 'ctype': 'std::string', 'type': 'str', 'name': 'beam_const'}, {'srcline': 52, 'description': 'Use likelihood_field laser model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'likelihood_field', 'ctype': 'std::string', 'type': 'str', 'name': 'likelihood_field_const'}, {'srcline': 52, 'description': 'Use likelihood_field_prob laser model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'likelihood_field_prob', 'ctype': 'std::string', 'type': 'str', 'name': 'likelihood_field_prob'}]}", 'default': 'likelihood_field', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Which model to use, diff, omni, diff-corrected, or omni-corrected', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_model_type', 'edit_method': "{'enum_description': 'Odom Models', 'enum': [{'srcline': 56, 'description': 'Use diff odom model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'diff', 'ctype': 'std::string', 'type': 'str', 'name': 'diff_const'}, {'srcline': 57, 'description': 'Use omni odom model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'omni', 'ctype': 'std::string', 'type': 'str', 'name': 'omni_const'}, {'srcline': 58, 'description': 'Use corrected diff odom model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'diff-corrected', 'ctype': 'std::string', 'type': 'str', 'name': 'diff_corrected_const'}, {'srcline': 59, 'description': 'Use corrected omni odom model', 'srcfile': '/home/ubuntu/mz_ws/src/navigation/amcl/cfg/AMCL.cfg', 'cconsttype': 'const char * const', 'value': 'omni-corrected', 'ctype': 'std::string', 'type': 'str', 'name': 'omni_corrected_const'}]}", 'default': 'diff', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': "Specifies the expected noise in odometry's rotation estimate from the rotational component of the robot's motion.", 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_alpha1', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': "Specifies the expected noise in odometry's rotation estimate from the translational component of the robot's motion.", 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_alpha2', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': "Specifies the expected noise in odometry's translation estimate from the translational component of the robot's motion.", 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_alpha3', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': "Specifies the expected noise in odometry's translation estimate from the rotational component of the robot's motion.", 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_alpha4', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Translation-related noise parameter (only used if model is omni).', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_alpha5', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Which frame to use for odometry.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'odom_frame_id', 'edit_method': '', 'default': 'odom', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Which frame to use for the robot base.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'base_frame_id', 'edit_method': '', 'default': 'base_link', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'The name of the coordinate frame published by the localization system.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'global_frame_id', 'edit_method': '', 'default': 'map', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Retsore the default configuration', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'restore_defaults', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
AMCL_beam_const = 'beam'
AMCL_likelihood_field_const = 'likelihood_field'
AMCL_likelihood_field_prob = 'likelihood_field_prob'
AMCL_diff_const = 'diff'
AMCL_omni_const = 'omni'
AMCL_diff_corrected_const = 'diff-corrected'
AMCL_omni_corrected_const = 'omni-corrected'
| [
"[email protected]"
] | |
ab50175ccd01f72b050a261297409d0dc2b222ae | 0b9354147b631922828b070635af207d19ad56c5 | /deblender/utils.py | 43705fe5b4a510584d99ef06c9ec2b2585e68131 | [] | no_license | tvelagapudi/deblender | 2a3804fe0f5605a4e0658ac409625359de518748 | 9d472e4630da86e06033523a427ab8b53888669b | refs/heads/master | 2020-05-07T15:04:11.492790 | 2019-10-16T22:06:33 | 2019-10-16T22:06:33 | 180,621,814 | 0 | 0 | null | 2019-04-10T16:31:37 | 2019-04-10T16:31:37 | null | UTF-8 | Python | false | false | 7,565 | py | import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
class DataSampler:
"""
Creates TensorFlow Dataset objects from directories containing
.tfrecord TensorFlow binaries and passes tensors to graph. The
resulting sampler is reinitializable onto any of three datasets
(training, validation, testing) via the initialize method.
Args:
train_path(str): training data filepath containing .tfrecords files.
train_path(str): validation data filepath containing .tfrecords files.
train_path(str): test data filepath containing .tfrecords files.
data_shapes(dict): data shape dictionary to specify reshaping operation.
batch_size(int): number of samples per batch call.
shuffle(bool): shuffle data (only applicable to training set).
buffer_size(int): size of shuffled buffer TFDataset will draw from.
Note: this class currently only supports float data. In the future, it will
need to accomodate integer-valued data as well.
"""
def __init__(self, train_path, valid_path, test_path, data_shapes,
batch_size, shuffle=True, buffer_size=10000):
assert isinstance(batch_size, int), "Batch size must be integer-valued."
assert isinstance(buffer_size, int), "Buffer size must be integer-valued."
self.train_path = train_path
self.valid_path = valid_path
self.test_path = test_path
self.data_shapes = data_shapes
self.batch_size = batch_size
self.shuffle = shuffle
self.buffer_size = buffer_size
self.initialized = False
def initialize(self):
valid, test = map(self.make_dataset, [self.valid_path, self.test_path])
train = self.make_dataset(self.train_path, train=True)
self.iter = tf.data.Iterator.from_structure(
train.output_types, train.output_shapes)
train_init, valid_init, test_init = map(
self.iter.make_initializer, [train, valid, test])
self.init_ops = dict(zip(['train', 'valid', 'test'],
[train_init, valid_init, test_init]))
self.initialized = True
def make_dataset(self, filepath, train=False):
files = [os.path.join(filepath, file) for file
in os.listdir(filepath) if file.endswith('.tfrecords')]
dataset = tf.data.TFRecordDataset(files).map(self.decoder)
if train:
if self.shuffle:
dataset = dataset.shuffle(buffer_size=self.buffer_size)
return dataset.repeat().batch(self.batch_size)
else:
return dataset.batch(self.batch_size)
def get_dataset(self, dataset='train'):
if not self.initialized:
raise ValueError('Sampler must be initialized before dataset retrieval.')
try:
return self.init_ops.get(dataset)
except:
raise ValueError('Dataset unknown or unavailable.')
def decoder(self, example_proto):
feature_keys = {k: tf.FixedLenFeature(np.prod(v), tf.float32)
for k, v in self.data_shapes.items()}
parsed_features = tf.parse_single_example(example_proto, feature_keys)
parsed = [parsed_features[key] for key in self.data_shapes.keys()]
return parsed
def get_batch(self):
if not self.initialized:
raise ValueError('Sampler must be initialized before batch retrieval.')
batch = self.iter.get_next()
batch = [tf.reshape(batch[i], [-1] + list(v))
for i, v in enumerate(self.data_shapes.values())]
return batch
def np_to_tfrecords(data, file_path_prefix, verbose=False):
"""
Converts 2-D NumPy arrays to TensorFlow binaries.
Args:
data(dict): dictionary of NumPy arrays and corresponding keys.
file_path_prefix(str): file path for storing resulting .tfrecords.
verbose(bool): function verbosity for debugging.
Note that the keys provided to create .tfrecord files must correspond with
the keys passed to the utils.DataSampler class in the data_shapes arg.
Adapted from a Gist by Sangwoong Yoon.
"""
def _dtype_feature(ndarray):
""" Match appropriate tf.train.Feature class with dtype of ndarray. """
assert isinstance(ndarray, np.ndarray)
dtype_ = ndarray.dtype
if dtype_ == np.float64 or dtype_ == np.float32:
return lambda array: tf.train.Feature(
float_list=tf.train.FloatList(value=array))
elif dtype_ == np.int64 or dtype_ == np.int32:
return lambda array: tf.train.Feature(
int64_list=tf.train.Int64List(value=array))
else:
raise TypeError("The input should be numpy ndarray. \
Instead got {}".format(ndarray.dtype))
feature_types, records = {}, []
for k, v in data.items():
assert isinstance(v, np.ndarray)
n_records = v.shape[0]
records.append(n_records)
if not len(v.shape) == 2:
data[k] = v = v.reshape([n_records, -1])
feature_types[k] = _dtype_feature(v)
assert all(x == records[0] for x in records), \
"All data must have the same number of samples."
""" Generate TFRecord writer. """
result_tf_file = file_path_prefix + '.tfrecords'
writer = tf.python_io.TFRecordWriter(result_tf_file)
if verbose:
print("Serializing {:d} examples into {}".format(
n_records, result_tf_file))
""" Iterate over each sample and serialize it as ProtoBuf. """
for idx in range(n_records):
d_feature = {}
for k, v in data.items():
d_feature[k] = feature_types[k](v[idx])
features = tf.train.Features(feature=d_feature)
example = tf.train.Example(features=features)
serialized = example.SerializeToString()
writer.write(serialized)
if verbose:
print("Writing {} done!".format(result_tf_file))
def restore_session(sess, ckptdir):
"""
Restores the checkpoint session from disk.
"""
meta_graph = [os.path.join(ckptdir, file) for file
in os.listdir(ckptdir) if file.endswith('.meta')][0]
restorer = tf.train.import_meta_graph(meta_graph)
restorer.restore(sess, tf.train.latest_checkpoint(ckptdir))
def get_total_params():
"""
Computes the total number of learnable variables in default graph.
"""
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
def get_trainable_params():
"""
Analyzes trainable variables in default graph.
"""
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def to_stdout(obj):
"""
Prints arbitrarily long dictionaries or lists to stdout.
"""
if type(obj) == dict:
for k, v in obj.items():
if not isinstance(v, str) and hasattr(v, '__iter__'):
print(k)
to_stdout(v)
else:
if isinstance(v, float):
print('{}: {:.4e}'.format(k, v))
else:
print('%s: %s' % (k, v))
elif type(obj) == list:
for v in obj:
if hasattr(v, '__iter__'):
to_stdout(v)
else:
print(v)
else:
print(obj)
| [
"[email protected]"
] | |
b6fc0bf325cf5f669098fbf0bfd04324c3ce468f | fa788c9202fa269def4fc4261a5c26b81bbd81f2 | /codeforces/231C - To Add or Not to Add.py | a70909597cb84395eb358eef66dcc597147bf224 | [] | no_license | m7mdkamal/Problem-Solving | 34275689e119c0828146c1fc7178324df469353c | b36ab427b3289550c8c53b37faeb8e5877367e42 | refs/heads/master | 2020-12-24T17:54:20.667549 | 2016-05-03T06:57:59 | 2016-05-03T06:57:59 | 38,404,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #http://codeforces.com/contest/231/problem/C
n , k = map(int, raw_input().split())
ar = map(int,raw_input().split())
ar.sort()
s = [0] * (n + 1)
for i in xrange(n):
s[i + 1] = s[i] + ar[i]
i, j, maxx, ans = 0, 1, 1, ar[0]
while j < n:
while ((j - i) * ar[j] - s[j] + s[i]> k): i += 1
if j - i + 1 > maxx:
maxx, ans = j - i + 1, ar[j]
j += 1
print maxx, ans
| [
"[email protected]"
] | |
34cc0c096002b790547ced2f5c45be6f16731053 | 0139bdde50d922893e718221a69e1ca4cb89757d | /wssh/examples/cryptography-1.5/tests/hazmat/backends/test_commoncrypto.py | 2b730e93798cd959ecbce7143da0ba35becd5484 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | nuaays/Miscellaneous_Scripts | 79adc5d4a639f1c95d5206447593f89a813d2e06 | 803a3b30e8848bbcbce58eb12f9b25a12060a437 | refs/heads/master | 2021-01-10T05:46:24.227613 | 2017-08-04T02:30:18 | 2017-08-04T02:30:18 | 48,648,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends import _available_backends
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, GCM
from ...doubles import DummyCipherAlgorithm
from ...utils import raises_unsupported_algorithm
@pytest.mark.skipif("commoncrypto" not in
[i.name for i in _available_backends()],
reason="CommonCrypto not available")
class TestCommonCrypto(object):
def test_supports_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._register_cipher_adapter(
AES, backend._lib.kCCAlgorithmAES128,
CBC, backend._lib.kCCModeCBC
)
def test_handle_response(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._check_cipher_response(backend._lib.kCCAlignmentError)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCMemoryFailure)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCDecodeError)
def test_nonexistent_aead_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import Backend
b = Backend()
cipher = Cipher(
DummyCipherAlgorithm(), GCM(b"fake_iv_here"), backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
| [
"[email protected]"
] | |
af0dd01c46ca70c63c0426085e8e360b8068a0ba | cb1fcff6eead0afad5ed1ac9771a5c557de7826b | /PlayGround-STEM/src/nltksample/main.py | 9cae5a7c4a1e2712a343254a809c87beaf69c1e4 | [] | no_license | minhla1215/PlayGround-STEM | eca59af3046b962382afce3badce2ef053908c3f | 42d88c12f02493bd74d711854bd8ff1ffff6f0e3 | refs/heads/master | 2016-09-05T22:44:05.707058 | 2013-07-15T22:16:41 | 2013-07-15T22:16:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | '''
def getline(line, delimiter=','):
def _gen():
while True:
if delimiter in line:
yield line[0:line.index(delimiter)]
break
else:
yield line
return "".join(_gen())
'''
import nltk
infile = "Layout-Media.csv"
outfile = "Revised.csv"
text = ""
f = open(infile,'r')
g = open(outfile, 'w')
while True:
text = f.readline()
if len(text) > 10 :
text = nltk.clean_html(text)
text = text.split(',')
if len(text[10]) > 3 :
new = text[0] + "," + text[1] + "," + text[3] + "," + text[10] + "\n"
g.write(new)
#print text[10]
if len(text) == 0:
break
g.close()
#text += testline
#text = nltk.clean_html(text)
#print text
| [
"[email protected]"
] | |
9638e97a64006ff1700145103cf10a43896b4f6b | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /new_pyretic_loop_mcs/interreplay_6_l_2/replay_config.py | 04eb43066db3a8ed2402767c9faf39f22a34371e | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 -v high pyretic.modules.hub', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/new_pyretic_loop_mcs/interreplay_6_l_2/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='InvariantChecker.python_check_loops',
bug_signature="{'hs_history': [(x^L) - ([]), (dl_vlan:65535,dl_vlan_pcp:0) - ([]), (dl_vlan:65535,dl_vlan_pcp:0) - ([])], 'hdr': (dl_vlan:65535,dl_vlan_pcp:0) - ([]), 'visits': [100001, 200002, 300001], 'port': 100001}")
| [
"[email protected]"
] | |
08a048c3525535e09dc805d69d1002a791389dcf | 296f758aa22793210553c170a41ce669c0b7895b | /test.py | a0bd1e1a7be47874e22c7f2da698f88196504f78 | [
"Apache-2.0"
] | permissive | yesup/tensorflow-serving-client-python | cab185158c308ae9e4387ec6d986c2645c89a321 | ed44ddd9c3f143267679e9099d33c8f6b856e033 | refs/heads/master | 2021-01-23T06:39:13.608952 | 2017-03-28T18:12:04 | 2017-03-28T18:12:04 | 86,383,999 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,685 | py |
def doTest(host, port):
from tensorflow_serving.apis.predict_pb2 import PredictRequest
from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub
from grpc import insecure_channel, StatusCode
from tensorflow.contrib.util import make_tensor_proto, make_ndarray
from tensorflow import float32
target = "%s:%s"%(host, port)
print "Sending prediction request to", target, "\n"
channel = insecure_channel(target)
stub = PredictionServiceStub(channel)
request = PredictRequest()
request.model_spec.name = "campaign"
request.model_spec.signature_name = ""
request.inputs["hour"].CopyFrom(make_tensor_proto(6, shape=[1], dtype=float32))
request.inputs["week"].CopyFrom(make_tensor_proto(5, shape=[1], dtype=float32))
request.inputs["sid"].CopyFrom(make_tensor_proto("47320", shape=[1]))
request.inputs["sspid"].CopyFrom(make_tensor_proto("3", shape=[1]))
request.inputs["country"].CopyFrom(make_tensor_proto("DK", shape=[1]))
request.inputs["os"].CopyFrom(make_tensor_proto("6", shape=[1]))
request.inputs["domain"].CopyFrom(make_tensor_proto("video9.in", shape=[1]))
request.inputs["isp"].CopyFrom(make_tensor_proto("Tele Danmark", shape=[1]))
request.inputs["browser"].CopyFrom(make_tensor_proto("4", shape=[1]))
request.inputs["type"].CopyFrom(make_tensor_proto("site", shape=[1]))
request.inputs["lat"].CopyFrom(make_tensor_proto(35000, shape=[1], dtype=float32))
request.inputs["lon"].CopyFrom(make_tensor_proto(105000, shape=[1], dtype=float32))
request.inputs["connectiontype"].CopyFrom(make_tensor_proto("2", shape=[1]))
request.inputs["devicetype"].CopyFrom(make_tensor_proto("1", shape=[1]))
request.inputs["donottrack"].CopyFrom(make_tensor_proto("0", shape=[1]))
request.inputs["userid"].CopyFrom(make_tensor_proto("984273063", shape=[1]))
request.inputs["ua"].CopyFrom(make_tensor_proto("Mozilla/5.0 (Linux; U; Android 5.1.1; en-US; Redmi Note 3 Build/LMY47V) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 UCBrowser/11.0.8.855 U3/0.8.0 Mobile Safari/534.30", shape=[1]))
(result, status) = stub.Predict.with_call(request)
if status.code() != StatusCode.OK:
print "call failed", status
return
predictions = make_ndarray(result.outputs["classes"])
if predictions.size == 0:
print "no predition replied"
return
cidIndex = predictions[0]
print "Server predict with index", cidIndex
if __name__ == "__main__":
from sys import argv, exit
if argv.__len__() != 3:
print "Usage: python test.py [host] [port]\n"
exit(0)
doTest(argv[1], argv[2]) | [
"[email protected]"
] | |
e0b33a2ea9ee99944717ab928c5e187a0c12d220 | 5ec90a88fccee85ac37bd00d8488054f9dc55ee9 | /Ashok/sqlite/1/asdf/polls/migrations/0002_auto_20170308_1351.py | 66b60a3c8b32abec1717c56ff060309375466382 | [] | no_license | MbF-Ashok/pro1 | 9fe45329206695302ab23a5300fd3a65baa1355b | 8d921616285f125ff2a2ef309aa0a8f150ef64f2 | refs/heads/master | 2021-06-13T13:21:38.357260 | 2017-03-17T12:51:43 | 2017-03-17T12:51:43 | 85,305,352 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-08 13:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='student',
name='age',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
70b39c99d506cdea163ccf46b3185dfe24d456c5 | 5a5b66513e6b8e77d49662392868452deafeeef3 | /load_neo4j/load_drugs.py | 5534ee61e8429db32b848cd05a2fd35aedde4dfd | [] | no_license | broadinstitute/reasoner | e844c6dabbb9524e9b060b0f48095ecb628498c8 | f78fffb771c482bd65d14814bff39e85b4cbac17 | refs/heads/master | 2021-06-06T23:50:36.183081 | 2020-06-19T17:31:06 | 2020-06-19T17:31:06 | 111,457,986 | 5 | 1 | null | 2020-06-19T17:31:07 | 2017-11-20T20:12:42 | Python | UTF-8 | Python | false | false | 428 | py | import pandas as pd
from reasoner.knowledge_graph.KnowledgeGraph import KnowledgeGraph
drugs_file = '../data/knowledge_graph/ready_to_load/drugs.csv'
drugs = pd.read_csv(drugs_file)
kg = KnowledgeGraph()
drugs.fillna('', inplace = True)
for index, row in drugs.iterrows():
kg.add_drug(row['chembl_id'], row['name'], row['cui'], row['chebi_id'], row['drugbank_id'], row['type'], row['mechanism'], row['pharmacodynamics'])
| [
"[email protected]"
] | |
e588bf9c0f7468166c7c38d0a5a5ffc80a1f3733 | 2b5c710c9b372ad7e4d49e16852944aaf8926a07 | /Tools/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py | e69362cba54f94e822458564aa196ed92cd9dfa5 | [
"Apache-2.0"
] | permissive | FMSoftCN/mdolphin-core | 8ce68f1055d3a38f6e46bd311f2c20b7afc183b8 | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | refs/heads/master | 2022-04-09T22:16:12.602078 | 2020-02-11T10:06:11 | 2020-02-11T10:06:11 | 92,716,592 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from webkitpy.tool.commands.queuestest import QueuesTest
from webkitpy.tool.commands.sheriffbot import SheriffBot
from webkitpy.tool.mocktool import MockBuilder
class SheriffBotTest(QueuesTest):
builder1 = MockBuilder("Builder1")
builder2 = MockBuilder("Builder2")
def test_sheriff_bot(self):
mock_work_item = {
29837: [self.builder1],
}
expected_stderr = {
"begin_work_queue": "CAUTION: sheriff-bot will discard all local changes in \"%s\"\nRunning WebKit sheriff-bot.\n" % os.getcwd(),
"next_work_item": "",
"process_work_item": "MOCK: irc.post: abarth, darin, eseidel: http://trac.webkit.org/changeset/29837 might have broken Builder1\nMOCK bug comment: bug_id=42, cc=['[email protected]', '[email protected]', '[email protected]']\n--- Begin comment ---\\http://trac.webkit.org/changeset/29837 might have broken Builder1\n--- End comment ---\n\n",
"handle_unexpected_error": "Mock error message\n"
}
self.assert_queue_outputs(SheriffBot(), work_item=mock_work_item, expected_stderr=expected_stderr)
revisions_causing_failures = {
1234: [builder1],
1235: [builder1, builder2],
}
def test_new_failures(self):
old_failing_svn_revisions = []
self.assertEquals(SheriffBot()._new_failures(self.revisions_causing_failures,
old_failing_svn_revisions),
self.revisions_causing_failures)
def test_new_failures_with_old_revisions(self):
old_failing_svn_revisions = [1234]
self.assertEquals(SheriffBot()._new_failures(self.revisions_causing_failures,
old_failing_svn_revisions),
{1235: [builder2]})
def test_new_failures_with_old_revisions(self):
old_failing_svn_revisions = [1235]
self.assertEquals(SheriffBot()._new_failures(self.revisions_causing_failures,
old_failing_svn_revisions),
{})
| [
"[email protected]"
] | |
a01d843fe430a1ec7bceda594c74ed0f30e00802 | 44c78bf70c63724b54bd87db1c96a2a3c5a0aac2 | /crop.py | 2841b926362e29ff185cf6a8556c6687f59b5ae3 | [] | no_license | JryonGitHub/utils | 3f8b65e90bce89198e6f9dec17ef00487f787cf9 | 66ebca2fb52a415115d74243eb2dc269fea494a4 | refs/heads/master | 2020-04-09T22:48:10.474582 | 2018-12-06T07:54:45 | 2018-12-06T07:54:45 | 160,639,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | import skimage.io as imgio
import skimage.transform as imgtf
import numpy as np
import os
image_root = "/home/yuchaojian/caffe/caffe-final/examples/data/car_ims/"
new_root = "/home/yuchaojian/caffe/caffe-final/examples/data/car_ims_crop_512/"
with open("car_test_images.txt", "r") as f:
i = 0
for line in f.readlines():
i=i+1
line = line.rstrip("\n").strip(" ")
path,category = line.split(" ")
path_tmp=image_root+path
a = imgio.imread(path_tmp)
h,w = a.shape[0:2]
if h > w:
scale_fcator = 512.0 / w
else:
scale_fcator = 512.0 / h
new_h = int(h * scale_fcator)
new_w = int(w * scale_fcator)
b = imgtf.resize(a,(new_h,new_w))
new_path_tmp=new_root+path
img_new_path,_ = os.path.split(new_path_tmp)
if not os.path.exists(img_new_path):
os.mkdir(img_new_path)
imgio.imsave(new_path_tmp,b)
print(i)
| [
"[email protected]"
] | |
a7634827a66ebce36a0180be3a6c686643cf94a3 | 0bb39544ccd1ce1f8bd66dfe5d1b30c6ae9639be | /music/urls.py | 58ca70137355cd71f2dc6b5f3f5c19743d5112c1 | [] | no_license | kevinbooth/music-api | ab17edefa1947f651504250acd10defbdb558f77 | 1c03dcd9b96b02359d04c3fb4b2de47c308fafbb | refs/heads/master | 2020-04-24T20:46:21.716252 | 2019-02-26T22:21:40 | 2019-02-26T22:21:40 | 172,255,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | """
Module that resolves url paths to a specific view in the music app
"""
from django.urls import path
from .views import ListSongsView
"""
Specifying the url route.
"""
urlpatterns = [
path('songs/', ListSongsView.as_view(), name="songs-all")
]
| [
"[email protected]"
] | |
d823efe9f1e8c44c0d01b63990a3dde49f8ec27b | 7dd937b6c0efc130ee2bdc67de6b7b3aa1b4ad6a | /plentyofcats/migrations/0011_userad_uploaded_file.py | 50184545bf91f7442cdfba9f3d272f70e3b0f8d9 | [] | no_license | MindSparkTm/Pythondjangoapp | d964a00cf49bd409fe43f15ca5587fff9acbe018 | 50355539d76a3e84efcd0b8aef278f842aff2dac | refs/heads/master | 2020-03-27T18:52:01.201440 | 2018-08-31T22:56:22 | 2018-08-31T22:56:22 | 146,949,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-06-20 23:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plentyofcats', '0010_auto_20180620_2222'),
]
operations = [
migrations.AddField(
model_name='userad',
name='uploaded_file',
field=models.FileField(blank=True, null=True, upload_to='media/users/'),
),
]
| [
"[email protected]"
] | |
ca3097fdf5630dc7760f474debb4398f76834eb7 | 6af80f9a6aeca9c1b65092afcbbc6ff7608b844a | /CORR/corr_org.py | 8a43f966b12e3dd9b1426d00faa0400c18d7c3e0 | [] | no_license | FCP-INDI/indi_bidsification | 0d34dac3debc982e0ea21a691d5541b3173d28f3 | f45c831f78c58b4525222f8a9d6c0a6b0500792d | refs/heads/master | 2020-07-15T23:51:10.012208 | 2017-05-18T21:25:02 | 2017-05-18T21:25:02 | 73,956,486 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,141 | py | from CPAC.AWS import aws_utils, fetch_creds
import tarfile
import os
import shutil
import re
import sys
keyspath=sys.argv[1]
bucket = fetch_creds.return_bucket(keyspath, 'fcp-indi')
#Be sure to put in the last forward slash as may act as wildcard otherwise
ipdir='data/Projects/CORR/RawData/'
opdir='data/Projects/CORR/RawDataBIDs/'
srclist=[]
for i,k in enumerate(bucket.list(prefix=ipdir)):
srclist.append(k.name)
print k.name
srclist=sorted(srclist)
matchdct={
'anat' :
["(.+)/([0-9]+)/session_([0-9]{1,2})/anat_([0-9]{1,2})/anat.nii.gz" ,
r"\1/sub-\2/ses-\3/anat/sub-\2_ses-\3_run-\4_T1w.nii.gz"],
#'mpi_anat_comp':
#[r"(.+)/([0-9]+)/session_([0-9]{1,2})/anat_([0-9]{1,2})/anat_([a-z12\_]+).nii.gz" ,
#r"\1/sub-\2/ses-\3/anat/sub-\2_ses-\3_acq-\5_run-\4_T1w.nii.gz"],
'dti' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/dti_([0-9]{1,2})/dti.nii.gz" ,
r"\1/sub-\2/ses-\3/dwi/sub-\2_ses-\3_run-\4_dwi.nii.gz"],
'func_rest' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/rest_([0-9]{1,2})/rest.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_run-\4_func.nii.gz"],
'func_rest_mb' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/rest_([0-9]{3,4})_([0-9]{1,2})/rest.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_acq-tr\4ms_run-\5_func.nii.gz"],
'func_rest_pref' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/rest_([0-9]{1,2})_([a-z]+)/rest.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_acq-fov\5_run-\4_func.nii.gz"],
'func_msit' :
["(.+)/([0-9]+)/session_([0-9]{1,2})/msit_([0-9]{1,2})/msit.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-msit_run-\4_func.nii.gz"],
'func_eyetracker' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/eyemovement_([0-9]{3,4})_([0-9]{1,2})/eyemovement.+.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-eyemovement_acq-tr\4ms_run-\5_func.nii.gz"],
'func_breathhold' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/breathhold_([0-9]{3,4})_([0-9]{1,2})/breathhold.+.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-breathhold_acq-tr\4ms_run-\5_func.nii.gz"],
'func_checker' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/checkerboard_([0-9]{3,4})_([0-9]{1,2})/checkerboard.+.nii.gz" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-checkerboard_acq-tr\4ms_run-\5_func.nii.gz"],
'dti' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/dti_([0-9]{1,2})/dti(.+)" ,
r"\1/sub-\2/ses-\3/dwi/sub-\2_ses-\3_run-\4_dwi\5"],
'fmap_phs' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/fieldmap_([0-9]{1,2})/fieldmap_phase.nii.gz" ,
r"\1/sub-\2/ses-\3/fmap/sub-\2_ses-\3_run-\4_phasediff.nii.gz"],
'fmap_mag' :
[ r"(.+)/([0-9]+)/session_([0-9]{1,2})/fieldmap_([0-9]{1,2})/fieldmap_magnitude.nii.gz" ,
r"\1/sub-\2/ses-\3/fmap/sub-\2_ses-\3_run-\4_magnitude.nii.gz"],
'fmap_phs_noscannum' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/FieldMap/phase.nii.gz" ,
r"\1/sub-\2/ses-\3/fmap/sub-\2_ses-\3_phasediff.nii.gz"],
'fmap_mag_noscannum' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/FieldMap/magnitude([12]).nii.gz" ,
r"\1/sub-\2/ses-\3/fmap/sub-\2_ses-\3_magnitude\4.nii.gz"],
'cbf' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/cbf_([0-9]{1,2})/cbf.nii.gz" ,
r"\1/sub-\2/ses-\3/cbf/sub-\2_ses-\3_task-rest_run-\4_cbf.nii.gz"],
'asl' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/asl_([0-9]{1,2})/asl.nii.gz" ,
r"\1/sub-\2/ses-\3/asl/sub-\2_ses-\3_run-\4_asl.nii.gz"],
'iba_trt_msitbehav' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/msit_([1-9]{1})/([a-z]+).txt" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-msit_run-\4_\5.txt"],
'utah_physio_mat' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/Physio_scan_([0-9]{1,2}).mat" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_run-\4_recording-physio.mat"],
'utah_physio_resp-puls' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/EPIlog_scan_([0-9]{1,2}).([a-z]{4})" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_run-\4_recording-\5.\5"],
'utah_physio_resp-puls-txt' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/([a-z]{4})_scan_([0-9]{1,2}).txt" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_run-\5_recording-\4.txt"],
'utah_physio_pmu_resp-puls' :
[ r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/.+_rest([0-9]{1}).([a-z]{4})" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_run-\4_recording-\5.\5"],
'utah_nophys' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/no_physio_available.txt" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_no_physio_available.txt"],
'utah_nophys2' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/NoPhysio_Scan2" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_no_physio_available.txt"],
'utah_pyhsio_pmu_norunnum' :
[r"(.+)/([0-9]+)/session_([0-9]{1,2})/[Pp]hysio/.+_rest.([a-z]{4})" ,
r"\1/sub-\2/ses-\3/func/sub-\2_ses-\3_task-rest_run-1_recording-\4.\4"]
}
for mk in matchdct.keys():
print mk
srclist_filt=[]
destlist=[]
for sl in sorted(srclist):
if re.match(matchdct[mk][0],sl):
#print sl,re.sub(matchdct[mk][0],matchdct[mk][1],sl)
srclist_filt.append(sl)
destlist.append(re.sub(matchdct[mk][0],matchdct[mk][1],sl).replace(ipdir,opdir))
# Note might error with make_public=True, removing it stops error, unsure why error occurs
aws_utils.s3_rename(bucket,srclist_filt,destlist,keep_old=True,make_public=True)
r"""
# All Anats
# MPG MP2RAGE T1 Weighted Component Images
# Fix new name of inversion phase components to remove underscore
#for i in $(find Organized_Data_BIDs/mpg_1/ -iname '*inv*phs*T1w*');do mv -nv $i ${i/_phs/phs};done
# Funcs not containing reference to TR (NKI) or Shortened Number of Slices (MPG)
# Pulls in NKI rest Data with reference to TR
# Pulls in MPG data with reference to Prefrontal acquisition
# Breath Hold Checkerboard eyemovement MSIT
# DTI
# Fieldmaps
# CBF
# ASL
# Physio
# UTAH
# .mat Physio Files with run number in original filename
# EPI log .resp/.puls Files with run number in original filename
# resp/puls text Files with run number in original filename
# EPI/PMU log .resp/.puls Files with run number in original filename
# no phys available text files
# EPI/PMU log .resp/.puls Files WITHOUT run number in original filename
# IBA MSIT Behavioural
"""
| [
"[email protected]"
] | |
e5ed9f21f4f1f9054c597c8dfcb4e565cf181cef | 1f4fb774f87a96d31008ad3b7621fca97a7c1892 | /models/dwh_data/models.py | 9e8fba59b1ecb477f902b0a724e415463e639915 | [] | no_license | miamitops/RRC-DWH-Starter- | 738b65a4419f5d779406237206116205a4316500 | 3d492058ef0239840691bcf0aac76fdebecd1fcf | refs/heads/master | 2022-03-05T18:52:50.041154 | 2019-09-17T07:31:37 | 2019-09-17T07:31:37 | 208,978,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,303 | py | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class BaseData(models.Model):
individualid = models.CharField(db_column='IndividualID', max_length=255, blank=True, null=True, primary_key=True) # Field name made lowercase.
individualguid = models.CharField(db_column='Individualguid', max_length=255, blank=True, null=True) # Field name made lowercase.
familyname = models.CharField(db_column='FamilyName', max_length=255, blank=True, null=True) # Field name made lowercase.
givenname = models.CharField(db_column='GivenName', max_length=255, blank=True, null=True) # Field name made lowercase.
concatenatedname = models.CharField(db_column='ConcatenatedName', max_length=255, blank=True, null=True) # Field name made lowercase.
registrationdate = models.CharField(db_column='RegistrationDate', max_length=255, blank=True, null=True) # Field name made lowercase.
dateofbirth = models.CharField(db_column='DateofBirth', max_length=255, blank=True, null=True) # Field name made lowercase.
countryoforigin = models.CharField(db_column='CountryOfOrigin', max_length=255, blank=True, null=True) # Field name made lowercase.
asylumcountrycode = models.CharField(db_column='AsylumCountryCode', max_length=255, blank=True, null=True) # Field name made lowercase.
arrivaldate = models.CharField(db_column='ArrivalDate', max_length=255, blank=True, null=True) # Field name made lowercase.
gender = models.CharField(max_length=255, blank=True, null=True)
age = models.CharField(db_column='Age', max_length=255, blank=True, null=True) # Field name made lowercase.
individualagecohortcode = models.CharField(db_column='IndividualAgeCohortCode', max_length=255, blank=True, null=True) # Field name made lowercase.
nationalitycode = models.CharField(db_column='NationalityCode', max_length=255, blank=True, null=True) # Field name made lowercase.
rsdstatuscode = models.CharField(db_column='RSDStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
resettlementstatuscode = models.CharField(db_column='ResettlementStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
volrepstatuscode = models.CharField(db_column='VolRepStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
marriagestatuscode = models.CharField(db_column='MarriageStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
ethnicitycode = models.CharField(db_column='EthnicityCode', max_length=255, blank=True, null=True) # Field name made lowercase.
educationlevelcode = models.CharField(db_column='EducationLevelCode', max_length=255, blank=True, null=True) # Field name made lowercase.
occupationcode = models.CharField(db_column='OccupationCode', max_length=255, blank=True, null=True) # Field name made lowercase.
processstatuscode = models.CharField(db_column='ProcessStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
refugeestatuscode = models.CharField(db_column='RefugeeStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
fathername = models.CharField(db_column='FatherName', max_length=255, blank=True, null=True) # Field name made lowercase.
mothername = models.CharField(db_column='MotherName', max_length=255, blank=True, null=True) # Field name made lowercase.
siteidowner = models.CharField(db_column='SiteIDOwner', max_length=255, blank=True, null=True) # Field name made lowercase.
siteidcreate = models.CharField(db_column='SiteIDCreate', max_length=255, blank=True, null=True) # Field name made lowercase.
createdate = models.CharField(db_column='CreateDate', max_length=255, blank=True, null=True) # Field name made lowercase.
isanygroupprincipalrepresentative = models.CharField(db_column='IsAnyGroupPrincipalRepresentative', max_length=255, blank=True, null=True) # Field name made lowercase.
hasspneed = models.CharField(db_column='HasSPNeed', max_length=255, blank=True, null=True) # Field name made lowercase.
processinggrouptypecode = models.CharField(db_column='ProcessingGroupTypeCode', max_length=255, blank=True, null=True) # Field name made lowercase.
processinggroupnumber = models.CharField(db_column='ProcessingGroupNumber', max_length=255, blank=True, null=True) # Field name made lowercase.
currentrationcardnumber = models.CharField(db_column='CurrentRationCardNumber', max_length=255, blank=True, null=True) # Field name made lowercase.
processinggroupfilenumber = models.CharField(db_column='ProcessingGroupFileNumber', max_length=255, blank=True, null=True) # Field name made lowercase.
processinggroupsize = models.CharField(db_column='ProcessingGroupSize', max_length=255, blank=True, null=True) # Field name made lowercase.
processinggroupstatuscode = models.CharField(db_column='ProcessingGroupStatusCode', max_length=255, blank=True, null=True) # Field name made lowercase.
processgroupstatusdate = models.CharField(db_column='ProcessGroupStatusDate', max_length=255, blank=True, null=True) # Field name made lowercase.
processinggroupregistrationdate = models.CharField(db_column='ProcessingGroupRegistrationDate', max_length=255, blank=True, null=True) # Field name made lowercase.
individualsequencenumber = models.CharField(db_column='IndividualSequenceNumber', max_length=255, blank=True, null=True) # Field name made lowercase.
principalrepresentative = models.CharField(db_column='PrincipalRepresentative', max_length=255, blank=True, null=True) # Field name made lowercase.
relationshiptoprincipalrepresentative = models.CharField(db_column='RelationshipToPrincipalRepresentative', max_length=255, blank=True, null=True) # Field name made lowercase.
relationshiptext = models.CharField(db_column='RelationshipText', max_length=255, blank=True, null=True) # Field name made lowercase.
coalocationlevel1id = models.CharField(db_column='COALocationLevel1ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coalocationlevel2id = models.CharField(db_column='COALocationLevel2ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coalocationlevel3id = models.CharField(db_column='COALocationLevel3ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coalocationlevel4id = models.CharField(db_column='COALocationLevel4ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coalocationlevel5id = models.CharField(db_column='COALocationLevel5ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coolocationlevel1id = models.CharField(db_column='COOLocationLevel1ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coolocationlevel2id = models.CharField(db_column='COOLocationLevel2ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coolocationlevel3id = models.CharField(db_column='COOLocationLevel3ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coolocationlevel4id = models.CharField(db_column='COOLocationLevel4ID', max_length=255, blank=True, null=True) # Field name made lowercase.
coolocationlevel5id = models.CharField(db_column='COOLocationLevel5ID', max_length=255, blank=True, null=True) # Field name made lowercase.
vulnerabilitycode = models.CharField(db_column='VulnerabilityCode', max_length=255, blank=True, null=True) # Field name made lowercase.
vulnerabilitydetailscode = models.CharField(db_column='VulnerabilityDetailsCode', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'base_data'
| [
"[email protected]"
] | |
f5b07c5b0dc5bcd60a50df0be0d1325b8cd409ca | def844c8977db04924dc6d5bf3def2c0049efb11 | /coinge2(test).py | 94d8f110799dafbb150e72f6a9b44797efb065ef | [] | no_license | alfdav/Blockchain-in-Python | 7169c4adab19ebe24b678f364c5654132f9b7102 | 007fdc3ede293e59d4d31bd1cceae8143ed58004 | refs/heads/master | 2021-07-05T05:06:58.150605 | 2017-09-20T14:25:49 | 2017-09-20T14:25:49 | 103,992,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | import hashlib as hasher
class Block:
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
sha = hasher.sha256()
sha.update(str(self.index) +
str(self.timestamp) +
str(self.data) +
str(self.previous_hash))
return sha.hexdigest()
import datetime as date
def create_genesis_block():
# Manually construct a block with
# index zero and arbitrary previous hash
return Block(0, date.datetime.now(), "Genesis Block", "0")
def next_block(last_block):
this_index = last_block.index + 1
this_timestamp = date.datetime.now()
this_data = "Hey! I'm block " + str(this_index)
this_hash = last_block.hash
return Block(this_index, this_timestamp, this_data, this_hash)
# Create the blockchain and add the genesis block
blockchain = [create_genesis_block()]
previous_block = blockchain[0]
# How many blocks should we add to the chain
# after the genesis block
num_of_blocks_to_add = 150
# Add blocks to the chain
for i in range(0, num_of_blocks_to_add):
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
# Tell everyone about it!
print "Block #{} has been added to the blockchain!".format(block_to_add.index)
print "Hash: {}\n".format(block_to_add.hash) | [
"[email protected]"
] | |
3fc8d1d616816e248a00f97756d8be7e9aa0c5c3 | b6aa74ea79fbae7d77ebc3f27005c36c245b6ff3 | /vw_question/migrations/0005_auto_20180515_1703.py | 77523572b81ec0ff76ce5539b9d42b5fdc554047 | [] | no_license | leonboripgs/anytime-jango-interview | f0183739047eb41a68d0388166fb60252988188a | c46f10b9df4b8504eab19116bdcb8e0c5348d83c | refs/heads/master | 2023-01-21T21:37:59.182766 | 2020-12-02T13:23:33 | 2020-12-02T13:23:33 | 317,868,904 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-15 14:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vw_question', '0004_auto_20180515_1450'),
]
operations = [
migrations.AlterModelOptions(
name='competency',
options={'verbose_name': 'Competency', 'verbose_name_plural': 'Competencies'},
),
]
| [
"[email protected]"
] | |
9bf44b26152437c8b689ed8ac7b6ae7e03eb3771 | acaaf295c275aef2a5d5893ca9eccf5666d3da8c | /fetch_data.py | f284337b7b191a2775a1dc4a79be4aa5000f33e7 | [] | no_license | xiachengdu/stock | dfef23f276e2b923ef9df887f327ed14d7c7b933 | 7a8a56dddbb0a93664c93647a8e4a060a4b97e6c | refs/heads/master | 2021-01-21T11:23:25.562902 | 2017-03-01T14:57:17 | 2017-03-01T14:57:39 | 83,565,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | # -*- coding: utf-8 -*-
import re
import urllib2
import ConfigParser
import mysql.connector
conf = ConfigParser.ConfigParser()
conf.read("config/config.ini")
stockId = conf.get("stock", "stock_code")
#数据请求方法
def data_api(id):
strHtml = urllib2.urlopen('http://hq.sinajs.cn/list=' + id).read()
return strHtml
def request(stockId):
#query
conn = mysql.connector.connect(host='60.205.207.56',port='3306',user='rht102',password='rht102',database='stock',use_unicode=True);
cursor = conn.cursor();
#原始数据
sorce_data = data_api(stockId)
#print re.split(';|,|"',sorce_data)
#正则处理过的数据(数组)
processed_data = re.split(';|,|"',sorce_data)
#涨跌幅
float_persent = (float(processed_data[4]) - float(processed_data[3]))/float(processed_data[3])*100
#print float_persent
#股票名称
#print processed_data[1]
#昨日收盘价
#print processed_data[3]
#今日开盘价
#print processed_data[2]
#当前价
#print processed_data[4]
#当前时刻
#print processed_data[-4]
#成交量
#print processed_data[9]
#成交额
#print processed_data[10]
#股票代码
#print processed_data[0]
#sql update
sql = """UPDATE stocks_data
SET price_yesterday = %s,price_open = %s,price = %s,count_time = %s,turnover_volume = %s,turnover_amount = %s,float_persent = %s
WHERE stock_code = %s """
data = (processed_data[3],processed_data[2],processed_data[4],processed_data[-4],processed_data[9],processed_data[10],float_persent,stockId)
cursor = conn.cursor();
cursor.execute(sql,data)
conn.commit()
#close db sorce
cursor.close()
conn.close()
request(stockId)
| [
"[email protected]"
] | |
033fe3c9187494163d46a1203d00442491ef63b6 | f2dc44edef3578a134f5853dcfb18d830e58eee1 | /testpy2.py | 47ead0ea3737cbad1c011df9e460afa7f80a40aa | [] | no_license | arpitarya07/Spark-Data-Modeling | 4e400ec68fd3b06189f0ba435e24224ccdd8935b | 6b05ae05b0c2db4c84a6672d7885d2ad24441181 | refs/heads/master | 2016-09-13T03:42:43.366930 | 2016-04-20T23:33:44 | 2016-04-20T23:33:44 | 56,727,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | import argparse
import csv
#import os
parser = argparse.ArgumentParser(description='Process arguments. Fetch source file, Destination folder, column names')
parser.add_argument('--src',help='give fully classified path of source file')
parser.add_argument('--des',help='give destination folder')
parser.add_argument('--col',help='specifiy col name Note: first id column name, second symptom column name')
args = parser.parse_args()
DESIRED_COLUMNS = ('vaers_id','symptom_text')
#f = open(args.src)
f = open("Y:\SJSU_Studies\Sem5_CS298\Project_data\Autism\Yearwise_Data_Autism\Autism_2014.csv")
reader = csv.reader(f)
headers = None
results = []
for row in reader:
if not headers:
headers = []
for i, col in enumerate(row):
if col in DESIRED_COLUMNS:
# Store the index of the cols of interest
headers.append(i)
else:
results.append(tuple([row[i] for i in headers]))
count = 0
for r in results:
print r
# print "--------------------------------"
# print "id-> ",r[0]," symptoms-> ",r[1],"\n"
count = count + 1
#fname = "neg"+r[0]+".txt"
#print "fname:",fname
#filename = os.path.join("Y:\","SJSU_Studies","Sem5_CS298","Project_data","Text_files2","Neg","Autism1991")
#print filename
filename = "Y:\SJSU_Studies\Sem5_CS298\Project_data\Text_files2\Pos\Autism2014\pos"+r[0]+".txt"
f1 = open(filename,"wb")
f1.write(r[1])
f1.close()
f.close()
| [
"[email protected]"
] | |
f8e29bebea095bc418cb314421c0844c2e037f6f | fd1612fb542fede6899c3f69ff124e7b2335ad95 | /list/migrations/0005_auto_20200104_1957.py | f34965279f18ad47b95e1edb2638d0a69f024a87 | [] | no_license | Shovon588/toDoList | 821a7163caa6d6abb4c7f8e6ecea34e6249b1b87 | bf037097a37734a106c959729c05d9beb0f503e6 | refs/heads/master | 2021-09-25T08:07:45.355541 | 2020-04-19T05:18:23 | 2020-04-19T05:18:23 | 231,726,869 | 0 | 0 | null | 2021-09-22T18:52:30 | 2020-01-04T07:37:16 | JavaScript | UTF-8 | Python | false | false | 440 | py | # Generated by Django 3.0 on 2020-01-04 13:57
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('list', '0004_auto_20200104_1956'),
]
operations = [
migrations.AlterField(
model_name='item',
name='time',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 4, 19, 57, 7, 924535)),
),
]
| [
"[email protected]"
] | |
6639c8cd374ef3308f690b9a95a9bc233e098b80 | 94c8d9c43272524af229d6ad4d50ea8e29b34e4a | /bin/test_aniso.py | 73b218dfb6d84e0412cbb5b0441a11dc0eb51a2d | [
"BSD-3-Clause"
] | permissive | simonsobs/symlens | 81b519bfed0f37f027ea881ebd2591cdcb9a7849 | 2e93220fc7b7ea2969b76af494b14480b6e7a92b | refs/heads/master | 2022-11-11T19:20:43.432138 | 2022-09-29T15:07:01 | 2022-09-29T15:07:01 | 174,179,916 | 8 | 6 | NOASSERTION | 2022-06-30T00:22:59 | 2019-03-06T16:23:55 | Python | UTF-8 | Python | false | false | 1,842 | py | from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap
import numpy as np
import os,sys
import symlens
nsims = 40
deg = 25.
px = 2.0
shape,wcs = maps.rect_geometry(width_deg=deg,px_res_arcmin=px,proj='plain')
modlmap = enmap.modlmap(shape,wcs)
ymap,xmap = enmap.posmap(shape,wcs)
omap = np.sin(ymap/np.pi*100) + np.cos(xmap/np.pi*100)
mfact = 10
afact = 20
rms = (omap - omap.min())*mfact + afact
# io.hplot(rms,colorbar=True)
pmap = enmap.pixsizemap(shape,wcs)
ivar = maps.ivar(shape,wcs,rms,ipsizemap=pmap)
# io.hplot(ivar,colorbar=True)
my_tasks = range(nsims)
theory = cosmology.default_theory()
cov = theory.lCl('TT',modlmap)
mgen = maps.MapGen((1,)+shape,wcs,cov=cov[None,None])
fwhm = 1.5
wnoise = 40.
kbeam = maps.gauss_beam(modlmap,fwhm)
feed_dict = {}
lmin = 200
lmax = 3000
Lmin = 40
Lmax = 3000
xmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)
ymask = xmask
kmask = maps.mask_kspace(shape,wcs,lmin=Lmin,lmax=Lmax)
feed_dict['uC_T_T'] = cov
feed_dict['tC_T_T'] = cov + (wnoise*np.pi/180/60)**2 / kbeam**2
qe = symlens.QE(shape,wcs,feed_dict,'hu_ok','TT',xmask=xmask,ymask=ymask,kmask=kmask)
s = stats.Stats()
for task in my_tasks:
cmb = maps.filter_map(mgen.get_map(seed=(1,task))[0],kbeam)
nseed = (2,task)
nmap = maps.white_noise(shape,wcs,noise_muK_arcmin=None,seed=nseed,ipsizemap=pmap,div=ivar)
obs = cmb + nmap
kobs = enmap.fft(obs,normalize='phys')/kbeam
kobs[~np.isfinite(kobs)] = 0
feed_dict['X'] = kobs
feed_dict['Y'] = kobs
krecon = qe.reconstruct(feed_dict)
print(cmb.shape,nmap.shape,krecon.shape)
s.add_to_stack('kreal',krecon.real)
s.add_to_stack('kimag',krecon.imag)
s.get_stacks()
mf = enmap.ifft(s.stacks['kreal'] + 1j*s.stacks['kimag'],normalize='phys')
io.hplot(mf,'mf.png')
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.