id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1766600
|
"""
Code that processes SQL files and returns modules of database functions.
"""
from . import parser, context
from .exceptions import NoConnectionError
from contextlib import contextmanager
from glob import glob
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import threading
import re
__pdoc__ = {}
class Module(object):
"""
Holds a set of SQL functions loaded from files.
"""
sqlpath = None
def __init__(self, sqlpath):
"""
Loads functions found in the *sql files specified by `sqlpath` into
properties on this object.
The named sql functions in files should be unique.
"""
if not os.path.isdir(sqlpath):
raise ValueError('Directory not found: %s' % sqlpath)
self.sqlpath = sqlpath
self._statements = {}
self._engine = None
self._sessionmaker = None
self._locals = threading.local()
for sqlfile in glob(os.path.join(self.sqlpath, '*sql')):
with open(sqlfile, 'r') as f:
pugsql = f.read()
# handle multiple statements per file
statements = re.split(r'\n+(?=--\s*:name)', pugsql)
for statement in statements:
s = parser.parse(statement, ctx=context.Context(sqlfile))
if hasattr(self, s.name):
if s.name not in self._statements:
raise ValueError(
'Error loading %s - the function name "%s" is '
'reserved. Please choose another name.' % (
sqlfile, s.name))
raise ValueError(
'Error loading %s - a SQL function named %s was already '
'defined in %s.' % (
sqlfile, s.name, self._statements[s.name].filename))
s.set_module(self)
setattr(self, s.name, s)
self._statements[s.name] = s
@contextmanager
def transaction(self):
"""
Returns a session that manages a transaction scope, in which
many statements can be run. Statements run on this module will
automatically use this transaction. The normal use case is to use this
like a context manager, rather than interact with the result:
foo = pugsql.module('sql/foo')
with foo.transaction():
x = foo.get_x(x_id=1234)
foo.update_x(x_id=1234, x+1)
# when the context manager exits, the transaction is committed.
# if an exception occurs, it is rolled back.
The transaction is active for statements executed on the current thread
only.
For engines that support SAVEPOINT, calling this method a second time
begins a nested transaction.
For more info, see here:
https://docs.sqlalchemy.org/en/13/orm/session_transaction.html
"""
if not getattr(self._locals, 'session', None):
if not self._sessionmaker:
raise NoConnectionError()
self._locals.session = self._sessionmaker()
session = self._locals.session
try:
yield session
session.commit()
except Exception as e:
session.rollback()
raise e
finally:
session.close()
self._locals.session = None
def _execute(self, clause, *multiparams, **params):
if getattr(self._locals, 'session', None):
return self._locals.session.execute(clause, multiparams or params)
if not self._engine:
raise NoConnectionError()
return self._engine.execute(clause, *multiparams, **params)
@property
def _dialect(self):
"""
Gets the dialect for the SQLAlchemy engine.
"""
if not self._engine:
raise NoConnectionError()
return self._engine.dialect
def connect(self, connstr):
"""
Sets the connection string for SQL functions on this module.
See https://docs.sqlalchemy.org/en/13/core/engines.html for examples of
legal connection strings for different databases.
"""
self.set_engine(create_engine(connstr))
def set_engine(self, engine):
"""
Sets the SQLAlchemy engine for SQL functions on this module. This can
be used instead of the connect method, when more customization of the
connection engine is desired.
See also: https://docs.sqlalchemy.org/en/13/core/connections.html
"""
self._engine = engine
self._sessionmaker = sessionmaker(bind=engine)
def disconnect(self):
"""
Disassociates the module from any connection it was previously given.
"""
self._engine = None
self._sessionmaker = None
def __iter__(self):
return iter(self._statements.values())
__pdoc__['Module.sqlpath'] = (
'The path that the `pugsql.compiler.Module` was loaded from.')
modules = {}
def _module(sqlpath):
"""
Compiles a new `pugsql.compiler.Module`, or returns a cached one. Use the
`pugsql.module` function instead of this one.
"""
global modules
if sqlpath not in modules:
modules[sqlpath] = Module(sqlpath)
return modules[sqlpath]
|
StarcoderdataPython
|
1690895
|
<reponame>Syunkolee9891/Mayan-EDMS<filename>mayan/apps/document_states/links.py
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.navigation.classes import Link
from .permissions import (
permission_workflow_create, permission_workflow_delete,
permission_workflow_edit, permission_workflow_tools,
permission_workflow_view,
)
link_setup_document_type_workflows = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_document_type_workflow_list',
permissions=(permission_document_type_edit,), text=_('Workflows'),
view='document_states:document_type_workflows',
)
link_setup_workflow_create = Link(
icon_class_path='mayan.apps.document_states.icons.icon_workflow_create',
permissions=(permission_workflow_create,),
text=_('Create workflow'), view='document_states:setup_workflow_create'
)
link_setup_workflow_delete = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_delete',
permissions=(permission_workflow_delete,),
tags='dangerous', text=_('Delete'),
view='document_states:setup_workflow_delete',
)
link_setup_workflow_document_types = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_document_type_list',
permissions=(permission_workflow_edit,), text=_('Document types'),
view='document_states:setup_workflow_document_types',
)
link_setup_workflow_edit = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_edit',
permissions=(permission_workflow_edit,),
text=_('Edit'), view='document_states:setup_workflow_edit',
)
link_setup_workflow_list = Link(
icon_class_path='mayan.apps.document_states.icons.icon_setup_workflow_list',
permissions=(permission_workflow_view,), text=_('Workflows'),
view='document_states:setup_workflow_list'
)
link_setup_workflow_state_action_delete = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_action_delete',
permissions=(permission_workflow_edit,),
tags='dangerous', text=_('Delete'),
view='document_states:setup_workflow_state_action_delete',
)
link_setup_workflow_state_action_edit = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_action_edit',
permissions=(permission_workflow_edit,),
text=_('Edit'), view='document_states:setup_workflow_state_action_edit',
)
link_setup_workflow_state_action_list = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_action_list',
permissions=(permission_workflow_edit,),
text=_('Actions'),
view='document_states:setup_workflow_state_action_list',
)
link_setup_workflow_state_action_selection = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_action',
permissions=(permission_workflow_edit,), text=_('Create action'),
view='document_states:setup_workflow_state_action_selection',
)
link_setup_workflow_state_create = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_create',
permissions=(permission_workflow_edit,), text=_('Create state'),
view='document_states:setup_workflow_state_create',
)
link_setup_workflow_state_delete = Link(
args='object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_delete',
permissions=(permission_workflow_edit,),
tags='dangerous', text=_('Delete'),
view='document_states:setup_workflow_state_delete',
)
link_setup_workflow_state_edit = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state_edit',
permissions=(permission_workflow_edit,),
text=_('Edit'), view='document_states:setup_workflow_state_edit',
)
link_setup_workflow_states = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_state',
permissions=(permission_workflow_view,), text=_('States'),
view='document_states:setup_workflow_state_list',
)
link_setup_workflow_transition_create = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_transition_create',
permissions=(permission_workflow_edit,), text=_('Create transition'),
view='document_states:setup_workflow_transition_create',
)
link_setup_workflow_transition_delete = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_transition_delete',
permissions=(permission_workflow_edit,),
tags='dangerous', text=_('Delete'),
view='document_states:setup_workflow_transition_delete',
)
link_setup_workflow_transition_edit = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_transition_edit',
permissions=(permission_workflow_edit,),
text=_('Edit'), view='document_states:setup_workflow_transition_edit',
)
link_setup_workflow_transitions = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_transition',
permissions=(permission_workflow_view,), text=_('Transitions'),
view='document_states:setup_workflow_transition_list',
)
link_workflow_transition_events = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_transition_triggers',
permissions=(permission_workflow_edit,),
text=_('Transition triggers'),
view='document_states:setup_workflow_transition_events'
)
link_workflow_preview = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_preview',
permissions=(permission_workflow_view,),
text=_('Preview'), view='document_states:workflow_preview'
)
link_tool_launch_all_workflows = Link(
icon_class_path='mayan.apps.document_states.icons.icon_tool_launch_all_workflows',
permissions=(permission_workflow_tools,),
text=_('Launch all workflows'),
view='document_states:tool_launch_all_workflows'
)
# Document workflow instances
link_document_workflow_instance_list = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_document_workflow_instance_list',
permissions=(permission_workflow_view,), text=_('Workflows'),
view='document_states:document_workflow_instance_list',
)
link_workflow_instance_detail = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_instance_detail',
permissions=(permission_workflow_view,),
text=_('Detail'), view='document_states:workflow_instance_detail',
)
link_workflow_instance_transition = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_instance_transition',
text=_('Transition'),
view='document_states:workflow_instance_transition',
)
# Runtime proxies
link_workflow_runtime_proxy_document_list = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_runtime_proxy_document_list',
permissions=(permission_workflow_view,),
text=_('Workflow documents'),
view='document_states:workflow_document_list',
)
link_workflow_runtime_proxy_list = Link(
icon_class_path='mayan.apps.document_states.icons.icon_workflow_runtime_proxy_list',
permissions=(permission_workflow_view,),
text=_('Workflows'), view='document_states:workflow_list'
)
link_workflow_runtime_proxy_state_document_list = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_runtime_proxy_state_document_list',
permissions=(permission_workflow_view,),
text=_('State documents'),
view='document_states:workflow_state_document_list',
)
link_workflow_runtime_proxy_state_list = Link(
args='resolved_object.pk',
icon_class_path='mayan.apps.document_states.icons.icon_workflow_runtime_proxy_state_list',
permissions=(permission_workflow_view,),
text=_('States'), view='document_states:workflow_state_list',
)
|
StarcoderdataPython
|
1778709
|
"""
process_your_images.py (author: <NAME> / git: ankonzoid)
Process your images in the `input` directory using any of the following techniques.
The results
Standard techniques:
1) Force resizing (ypixels, xpixels) -> (ypixels_force, xpixels_force)
2) Grey scaling (3 rgb channels -> 1 greyscale channel)
3) K-means color quantization (using cluster colors or user-defined colors)
4) Edge detection (gaussian blue, then sobel edge detection)
Extra techniques (requires opencv):
1) object crop (opencv for contour search, then force resize to object binding box)
2) object binarization
"""
import os
from src.img2resize import img2resize
from src.img2greyscale import img2greyscale
from src.img2kmeans import img2kmeans
from src.img2edges import img2edges
def main():
settings = {
"img2resize": {"use": True, "ypixels": 100, "xpixels": 100},
"img2greyscale": {"use": True},
"img2kmeans": {"use": False, "k": 5, "use_custom_colors": False,
"custom_colors": [[1, 1, 1],
[37/255, 156/255, 247/255],
[0, 0, 0]]},
"img2edges": {"use": True}, # requires opencv
}
# Run our tools
process_your_images(settings, input_dir="input", output_dir="output")
def process_your_images(settings, input_dir="input", output_dir="output"):
# Check input and output directories exist
if not os.path.isdir(input_dir):
exit("err: could not find input directory '{}'".format(input_dir))
if not os.path.isdir(output_dir):
exit("err: could not find output directory '{}'".format(output_dir))
# Process each image in the input directory
files_input_dir = os.listdir(input_dir)
n_files = len(files_input_dir)
for i, file in enumerate(files_input_dir): # extract local filenames
# Consider only files that end with .jpg and .jpeg
if not file.endswith((".jpg", ".jpeg")):
continue
# Build filename and nametag for output
img_filename = os.path.join(input_dir, file)
nametag = os.path.splitext(file)[0]
# Process image
print("[{}/{}] Processing '{}'...".format(i+1, n_files, file))
if settings["img2resize"]["use"]:
output_filename = os.path.join(output_dir, nametag + "_resized.jpg")
img2resize(img_filename, output_filename,
ypixels=settings["img2resize"]["ypixels"],
xpixels=settings["img2resize"]["xpixels"])
if settings["img2greyscale"]["use"]:
output_filename = os.path.join(output_dir, nametag + "_greyscale.jpg")
img2greyscale(img_filename, output_filename)
if settings["img2kmeans"]["use"]:
output_filename = os.path.join(output_dir, nametag + "_kmeans.jpg")
img2kmeans(img_filename, output_filename,
k=settings["img2kmeans"]["k"],
use_custom_colors=settings["img2kmeans"]["use_custom_colors"],
custom_colors=settings["img2kmeans"]["custom_colors"])
if settings["img2edges"]["use"]:
output_filename = os.path.join(output_dir, nametag + "_edges.jpg")
img2edges(img_filename, output_filename)
# Driver
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4835148
|
<reponame>hamed1361554/recipe-app-api
from django.contrib.auth import get_user_model
from django.test import TestCase
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
import rest_framework.status as status
TOKEN_URL = reverse('users:token')
def create_user(**kwargs):
return get_user_model().objects.create_user(**kwargs)
class UserTokenTests(TestCase):
"""User Token Tests"""
def setUp(self):
"""Sets up"""
self.client = APIClient()
def test_create_user_token(self):
"""Tests that user token created successfully"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_invalid_user_credentials(self):
"""Tests that invalid user credentials fails"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
create_user(**payload)
res = self.client.post(TOKEN_URL, {**payload, 'password': '<PASSWORD>'})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_invalid_user(self):
"""Tests that invalid user fails"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_invalid_user_password(self):
"""Tests that invalid user fails"""
payload = {
'email': '<EMAIL>',
'password': ''
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
|
StarcoderdataPython
|
6514978
|
from pyridge.generic.scaler import Scaler
import numpy as np
class StandardScaler(Scaler):
"""
Scaler for data, similar to StandardScaler from
sklearn but avoiding shape restrictions.
"""
def __init__(self):
self.mean_: np.float
self.std_: np.float
def get_params(self):
return {'mean_': self.mean_, 'std_': self.std_}
def fit(self, values):
self.mean_ = np.mean(values, axis=0)
self.std_ = np.std(values, axis=0)
def transform(self, values):
return (values - self.mean_) / self.std_
def fit_transform(self, values):
self.fit(values)
return self.transform(values)
def inverse_transform(self, values):
return values * self.std_ + self.mean_
|
StarcoderdataPython
|
5003504
|
# -*- coding: utf-8 -*-
# Created on Sun Jul 19 18:10:11 2020
#
# Copyright 2020 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from alpha_vantage.timeseries import TimeSeries
import pandas as pd
import time as time
KEY = 'yourkeyhere!'
# Define output_format as pandas, otherwise we'll get the data in JSON format
ts = TimeSeries(key=KEY, output_format='pandas')
data = ts.get_daily('TSLA', outputsize='full')[0]
data.columns = ['open', 'high', 'low', 'close', 'volume']
# Data comes sorted by date desc and we need to reverse it
data = data.iloc[::-1]
stocks = ["TSLA", "AMZN", "GOOG", "MSFT", "FB", "ES=F", "CABK.MC"]
close_price = pd.DataFrame()
# To overcome the API call frequency limitation, we need to make our query slower
number_api_calls = 0
for symbol_ticker in stocks:
start_time = time.time()
ts = TimeSeries(key=KEY, output_format='pandas')
data = ts.get_intraday(symbol=symbol_ticker, interval='1min', outputsize='full')[0]
number_api_calls += 1
data.columns = ['open', 'high', 'low', 'close', 'volume']
close_price[symbol_ticker] = data['close']
if number_api_calls == 5:
number_api_calls = 0
time.sleep(60 - ((time.time() - start_time) % 60.0))
|
StarcoderdataPython
|
3406764
|
<filename>api/cron.py<gh_stars>1-10
import redis
from .models import Patch
from django.db import transaction
@transaction.atomic
def store_from_redis():
pool = redis.ConnectionPool(host="127.0.0.1", port=6379, max_connections=10)
rds = redis.Redis(connection_pool=pool)
for patch in Patch.objects.select_for_update().all():
result = rds.hget("apply_count_hash", "/report_update?patch_id=%d" % (patch.id))
if result is not None:
count = int(result)
if count > patch.apply_count:
patch.apply_count = count
patch.supersave()
else:
# TODO
# Data inconsistency occurred, to logging
pass
result = rds.hget("download_count_hash", "patch_id=%d" % (patch.id))
if result is not None:
count = int(result)
if count > patch.download_count:
patch.download_count = count
patch.supersave()
else:
# TODO
# Data inconsistency occurred, to logging
pass
def main():
store_from_redis()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1909989
|
import argparse
import sys
from typing import List, Tuple, Set, Union
# Part 1
def two_sum(lst: List[int], total: int) -> Union[Tuple[int, int], None] :
container: Set[int] = set()
for num in lst:
if total - num in container:
return (num, total - num)
else:
container.add(num)
# Part 2
def three_sum(lst: List[int], total: int) -> Union[Tuple[int, int, int], None]:
container: Set[int] = set()
for num in lst:
tup = two_sum(lst, total - num)
if tup:
return (num, tup[0], tup[1])
else:
container.add(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Solve day1 of AoC 2020")
parser.add_argument(
"input file", metavar="FILE", help="name of input file (day1.in)"
)
args: argparse.Namespace = parser.parse_args()
file_name: str = sys.argv[1]
with open(file_name) as f:
lst: List[int] = list(map(int, f.read().split()))
result1: Union[Tuple[int, int], None] = two_sum(lst, 2020)
if result1:
num1, num2 = result1
print(f"Part 1: The product of {num1} and {num2} is: {num1 * num2}")
result2: Union[Tuple[int, int, int], None] = three_sum(lst, 2020)
if result2:
num1, num2, num3 = result2
print(
f"Part 2: The product of {num1}, {num2} and {num3} is: {num1 * num2 * num3}"
)
|
StarcoderdataPython
|
8002957
|
import time
while(True):
print 'hello'
time.sleep(2)
|
StarcoderdataPython
|
5099748
|
<reponame>terop/latexbot
#!/usr/bin/env python3
"""A program (bot) for rendering short snippets of LaTeX code as an image.
A LaTeX distribution needs to be installed on the machine where this code
is ran."""
import sys
from io import BytesIO
from tempfile import NamedTemporaryFile
from os.path import basename
from os import remove
from shlex import split
from glob import glob
import re
import subprocess
from sympy import preview
from jinja2 import Environment, PackageLoader
from flask import Flask, request, abort, send_file, make_response, render_template
# pylint: disable=invalid-name
app = Flask(__name__)
app.config.from_pyfile('latexbot.cfg')
def render(latex_source, mode, image_name=None, output_buffer=None):
"""Renders the given LaTeX source and outputs it in a PNG image
with the given name. Returns True on success and False otherwise."""
try:
if mode == 'link':
preview(latex_source, euler=False, viewer='file', filename=image_name)
else:
preview(latex_source, euler=False, output='png', viewer='BytesIO',
outputbuffer=output_buffer)
except RuntimeError as err:
print('Got a Latex error: {}'.format(err), file=sys.stderr)
return False
return True
# Routes
@app.route('/', methods=['GET'])
def index():
"""Index route."""
return render_template('index.html')
@app.route('/render/<latex_input>', methods=['GET'])
def render_url_input(latex_input):
"""Render the provided LaTeX input."""
if 'mode' in request.args:
mode = request.args['mode']
else:
mode = app.config['OUTPUT_MODE']
# Hack to generate a temporary filename
with NamedTemporaryFile(dir='/tmp', prefix='latexbot_', suffix='.png', delete=True) as tmpfile:
tmpfile_name = tmpfile.name
if mode == 'link':
if not render(latex_input, mode, image_name=tmpfile_name):
return make_response('Internal server error, please check input validity', 500)
return '{}{}image/{}'.format(request.url_root,
'{}/'.format(app.config['EXTRA_URL_PATH'])
if app.config['EXTRA_URL_PATH'] != '' else '',
re.search(r'latexbot_(\w+)\.png',
basename(tmpfile_name)).group(1))
else:
out_buffer = BytesIO()
if not render(latex_input, mode, output_buffer=out_buffer):
return make_response('Internal server error, please check input validity', 500)
out_buffer.seek(0)
return send_file(out_buffer, mimetype='image/png')
@app.route('/image/<image_id>', methods=['GET'])
def get_image(image_id):
"""Returns the image referred by the given ID."""
try:
image = open('/tmp/latexbot_{}.png'.format(image_id), 'rb')
except FileNotFoundError:
print('Tried to access non-existent image: {}'.format(image_id),
file=sys.stderr)
abort(404)
return send_file(image, mimetype='image/png')
@app.route('/input', methods=['GET'])
def input_form():
"""Render an input form."""
return render_template('input.html')
@app.route('/input', methods=['POST'])
def render_from_form():
"""Render LaTeX from the input form."""
env = Environment(loader=PackageLoader('latexbot', 'templates'))
template = env.get_template('template.tex')
# pylint: disable=no-member
rendered_template = template.render(latex_input=request.form['latex-input'])
with NamedTemporaryFile(dir='/tmp', prefix='latexbot_', suffix='.tex',
delete=True) as tmpfile:
tmpfile_name = tmpfile.name
with open(tmpfile_name, 'w') as tmpfile:
tmpfile.write(rendered_template)
rc = subprocess.call(['latex', '-interaction=nonstopmode', '-output-directory=/tmp',
tmpfile_name])
if rc != 0:
# Render failed
for f in glob(tmpfile_name.replace('tex', '*')):
remove(f)
return make_response('Internal server error: LaTeX rendering failed. '
'Please check input validity.', 500)
rc = subprocess.call(split('dvipng -T tight -D 150 -z 9 {} -o {}'.
format(tmpfile_name.replace('.tex', '.dvi'),
tmpfile_name.replace('.tex', '.png'))))
if rc != 0:
# DVI to PNG conversion failed
for f in glob(tmpfile_name.replace('tex', '*')):
remove(f)
return make_response('Internal server error: image conversion failed.', 500)
# Remove auxiliary files generated during render
for f in glob(tmpfile_name.replace('tex', '*')):
if not f.endswith('png'):
remove(f)
if request.form['output'] == 'link':
return '{}{}image/{}'.format(request.url_root,
'{}/'.format(app.config['EXTRA_URL_PATH'])
if app.config['EXTRA_URL_PATH'] != '' else '',
re.search(r'latexbot_(\w+)\.tex',
basename(tmpfile_name)).group(1))
else:
return send_file(open(tmpfile_name.replace('.tex', '.png'), 'rb'),
mimetype='image/png')
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
StarcoderdataPython
|
3473684
|
<reponame>ManuelAlvarezC/keyboard-anywhere
# -*- coding: utf-8 -*-
"""Top-level package for keyboard_anywhere."""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.0.0.dev0'
|
StarcoderdataPython
|
4863915
|
'''
URL: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee/
Time complexity: O(n)
Space complexity: O(1)
'''
class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
if len(prices) == 0:
return 0
ti0 = 0
ti1 = float('-inf')
for i in range(len(prices)):
old_ti0 = ti0
ti0 = max(ti0, ti1 + prices[i])
ti1 = max(ti1, ti0 - prices[i] - fee)
return ti0
|
StarcoderdataPython
|
9714069
|
import hashlib
import json
import time
from command import buildcmd
from common import *
def sifchain_denom_hash(network_descriptor, token_contract_address):
assert on_peggy2_branch
assert token_contract_address.startswith("0x")
s = str(network_descriptor) + token_contract_address.lower()
return "sif" + hashlib.sha256(s.encode("UTF-8")).digest().hex()
class Sifnoded:
def __init__(self, cmd, home=None):
self.cmd = cmd
self.binary = "sifnoded"
self.home = home
self.keyring_backend = "test"
# self.sifnoded_burn_gas_cost = 16 * 10**10 * 393000 # see x/ethbridge/types/msgs.go for gas
# self.sifnoded_lock_gas_cost = 16 * 10**10 * 393000
def init(self, moniker, chain_id):
args = [self.binary, "init", moniker, "--chain-id", chain_id]
res = self.cmd.execst(args)
return json.loads(res[2]) # output is on stderr
def keys_list(self):
args = ["keys", "list", "--output", "json"]
res = self.sifnoded_exec(args, keyring_backend=self.keyring_backend, sifnoded_home=self.home)
return json.loads(stdout(res))
def keys_show(self, name, bech=None):
args = ["keys", "show", name] + \
(["--bech", bech] if bech else [])
res = self.sifnoded_exec(args, keyring_backend=self.keyring_backend, sifnoded_home=self.home)
return yaml_load(stdout(res))
def get_val_address(self, moniker):
res = self.sifnoded_exec(["keys", "show", "-a", "--bech", "val", moniker], keyring_backend=self.keyring_backend, sifnoded_home=self.home)
expected = exactly_one(stdout_lines(res))
result = exactly_one(self.keys_show(moniker, bech="val"))["address"]
assert result == expected
return result
# How "sifnoded keys add <name> --keyring-backend test" works:
# If name does not exist yet, it creates it and returns a yaml
# If name alredy exists, prompts for overwrite (y/n) on standard input, generates new address/pubkey/mnemonic
# Directory used is xxx/keyring-test if "--home xxx" is specified, otherwise $HOME/.sifnoded/keyring-test
def keys_add(self, moniker, mnemonic):
stdin = [" ".join(mnemonic)]
res = self.sifnoded_exec(["keys", "add", moniker, "--recover"], keyring_backend=self.keyring_backend,
sifnoded_home=self.home, stdin=stdin)
account = exactly_one(yaml_load(stdout(res)))
return account
# Creates a new key in the keyring and returns its address ("sif1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx").
# Since this is a test keyring, we don't need to save the generated private key.
# If we wanted to recreate it, we can capture the mnemonic from the message that is printed to stderr.
def keys_add_1(self, moniker):
res = self.sifnoded_exec(["keys", "add", moniker], keyring_backend=self.keyring_backend, sifnoded_home=self.home, stdin=["y"])
account = exactly_one(yaml_load(stdout(res)))
unused_mnemonic = stderr(res).splitlines()[-1].split(" ")
return account
def keys_delete(self, name):
self.cmd.execst(["sifnoded", "keys", "delete", name, "--keyring-backend", self.keyring_backend], stdin=["y"], check_exit=False)
def add_genesis_account(self, sifnodeadmin_addr, tokens):
tokens_str = ",".join([sif_format_amount(amount, denom) for amount, denom in tokens])
self.sifnoded_exec(["add-genesis-account", sifnodeadmin_addr, tokens_str], sifnoded_home=self.home)
def add_genesis_validators(self, address):
args = ["sifnoded", "add-genesis-validators", address]
res = self.cmd.execst(args)
return res
# At the moment only on future/peggy2 branch, called from PeggyEnvironment
def add_genesis_validators_peggy(self, evm_network_descriptor, valoper, validator_power):
self.sifnoded_exec(["add-genesis-validators", str(evm_network_descriptor), valoper, str(validator_power)],
sifnoded_home=self.home)
def set_genesis_oracle_admin(self, address):
self.sifnoded_exec(["set-genesis-oracle-admin", address], sifnoded_home=self.home)
def set_genesis_whitelister_admin(self, address):
self.sifnoded_exec(["set-genesis-whitelister-admin", address], sifnoded_home=self.home)
def set_gen_denom_whitelist(self, denom_whitelist_file):
self.sifnoded_exec(["set-gen-denom-whitelist", denom_whitelist_file], sifnoded_home=self.home)
# At the moment only on future/peggy2 branch, called from PeggyEnvironment
# This was split from init_common
def peggy2_add_account(self, name, tokens, is_admin=False):
# TODO Peggy2 devenv feed "yes\nyes" into standard input, we only have "y\n"
account = self.keys_add_1(name)
account_address = account["address"]
self.add_genesis_account(account_address, tokens)
if is_admin:
self.set_genesis_oracle_admin(account_address)
self.set_genesis_whitelister_admin(account_address)
return account_address
def peggy2_add_relayer_witness_account(self, name, tokens, evm_network_descriptor, validator_power, denom_whitelist_file):
account_address = self.peggy2_add_account(name, tokens) # Note: is_admin=False
# Whitelist relayer/witness account
valoper = self.get_val_address(name)
self.set_gen_denom_whitelist(denom_whitelist_file)
self.add_genesis_validators_peggy(evm_network_descriptor, valoper, validator_power)
return account_address
def tx_clp_create_pool(self, chain_id, from_name, symbol, fees, native_amount, external_amount):
args = ["tx", "clp", "create-pool", "--chain-id", chain_id, "--from", from_name, "--symbol", symbol,
"--fees", sif_format_amount(*fees), "--nativeAmount", str(native_amount), "--externalAmount",
str(external_amount), "--yes"]
res = self.sifnoded_exec(args, keyring_backend=self.keyring_backend) # TODO home?
return yaml_load(stdout(res))
def peggy2_token_registry_register_all(self, registry_path, gas_prices, gas_adjustment, from_account,
chain_id
):
args = ["tx", "tokenregistry", "register-all", registry_path, "--gas-prices", sif_format_amount(*gas_prices),
"--gas-adjustment", str(gas_adjustment), "--from", from_account, "--chain-id", chain_id, "--yes"]
res = self.sifnoded_exec(args, keyring_backend=self.keyring_backend, sifnoded_home=self.home)
return [json.loads(x) for x in stdout(res).splitlines()]
def peggy2_set_cross_chain_fee(self, admin_account_address, network_id, ethereum_cross_chain_fee_token,
cross_chain_fee_base, cross_chain_lock_fee, cross_chain_burn_fee, admin_account_name, chain_id, gas_prices,
gas_adjustment
):
# Checked OK
args = ["tx", "ethbridge", "set-cross-chain-fee", admin_account_address, str(network_id),
ethereum_cross_chain_fee_token, str(cross_chain_fee_base), str(cross_chain_lock_fee),
str(cross_chain_burn_fee), "--from", admin_account_name, "--chain-id", chain_id, "--gas-prices",
sif_format_amount(*gas_prices), "--gas-adjustment", str(gas_adjustment), "-y"]
res = self.sifnoded_exec(args, keyring_backend=self.keyring_backend, sifnoded_home=self.home)
return res
def sifnoded_start(self, tcp_url=None, minimum_gas_prices=None, log_format_json=False, log_file=None):
sifnoded_exec_args = self.build_start_cmd(tcp_url=tcp_url, minimum_gas_prices=minimum_gas_prices,
log_format_json=log_format_json)
return self.cmd.spawn_asynchronous_process(sifnoded_exec_args, log_file=log_file)
def build_start_cmd(self, tcp_url=None, minimum_gas_prices=None, log_format_json=False):
args = [self.binary, "start"] + \
(["--minimum-gas-prices", sif_format_amount(*minimum_gas_prices)] if minimum_gas_prices is not None else []) + \
(["--rpc.laddr", tcp_url] if tcp_url else []) + \
(["--log_level", "debug"] if log_format_json else []) + \
(["--log_format", "json"] if log_format_json else []) + \
(["--home", self.home] if self.home else [])
return buildcmd(args)
def sifnoded_exec(self, args, sifnoded_home=None, keyring_backend=None, stdin=None, cwd=None):
args = [self.binary] + args + \
(["--home", sifnoded_home] if sifnoded_home else []) + \
(["--keyring-backend", keyring_backend] if keyring_backend else [])
res = self.cmd.execst(args, stdin=stdin, cwd=cwd)
return res
def get_status(self, host, port):
url = "http://{}:{}/node_info".format(host, port)
return json.loads(http_get(url).decode("UTF-8"))
def wait_up(self, host, port):
while True:
from urllib.error import URLError
try:
return self.get_status(host, port)
except URLError:
time.sleep(1)
class Sifgen:
def __init__(self, cmd):
self.cmd = cmd
self.binary = "sifgen"
# Reference: docker/localnet/sifnode/root/scripts/sifnode.sh (branch future/peggy2):
# sifgen node create "$CHAINNET" "$MONIKER" "$MNEMONIC" --bind-ip-address "$BIND_IP_ADDRESS" --standalone --keyring-backend test
def create_standalone(self, chainnet, moniker, mnemonic, bind_ip_address, keyring_backend=None):
args = ["node", "create", chainnet, moniker, mnemonic, bind_ip_address]
return self.sifgen_exec(args, keyring_backend=keyring_backend)
def sifgen_exec(self, args, keyring_backend=None, cwd=None, env=None):
args = [self.binary] + args + \
(["--keyring-backend", keyring_backend] if keyring_backend else [])
return self.cmd.execst(args, cwd=cwd, env=env)
class Ebrelayer:
def __init__(self, cmd):
self.cmd = cmd
self.binary = "ebrelayer"
def peggy2_build_ebrelayer_cmd(self, init_what, network_descriptor, tendermint_node, web3_provider,
bridge_registry_contract_address, validator_mnemonic, chain_id, node=None, keyring_backend=None,
sign_with=None, symbol_translator_file=None, relayerdb_path=None, log_format=None, extra_args=None,
ethereum_private_key=None, ethereum_address=None, home=None, cwd=None
):
env = _env_for_ethereum_address_and_key(ethereum_address, ethereum_private_key)
args = [
self.binary,
init_what,
"--network-descriptor", str(network_descriptor), # Network descriptor for the chain (31337)
"--tendermint-node", tendermint_node, # URL to tendermint node
"--web3-provider", web3_provider, # Ethereum web3 service address (ws://localhost:8545/)
"--bridge-registry-contract-address", bridge_registry_contract_address,
"--validator-mnemonic", validator_mnemonic,
"--chain-id", chain_id # chain ID of tendermint node (localnet)
] + \
(extra_args if extra_args else []) + \
(["--node", node] if node else []) + \
(["--keyring-backend", keyring_backend] if keyring_backend else []) + \
(["--from", sign_with] if sign_with else []) + \
(["--relayerdb-path", relayerdb_path] if relayerdb_path else []) + \
(["--home", home] if home else []) + \
(["--symbol-translator-file", symbol_translator_file] if symbol_translator_file else []) + \
(["--log_format", log_format] if log_format else [])
return buildcmd(args, env=env, cwd=cwd)
# Legacy stuff - pre-peggy2
# Called from IntegrationContext
def init(self, tendermind_node, web3_provider, bridge_registry_contract_address, validator_moniker,
validator_mnemonic, chain_id, ethereum_private_key=None, ethereum_address=None, gas=None, gas_prices=None,
node=None, keyring_backend=None, sign_with=None, symbol_translator_file=None, relayerdb_path=None,
trace=True, cwd=None, log_file=None
):
env = _env_for_ethereum_address_and_key(ethereum_address, ethereum_private_key)
args = [self.binary, "init", tendermind_node, web3_provider, bridge_registry_contract_address,
validator_moniker, " ".join(validator_mnemonic), "--chain-id={}".format(chain_id)] + \
(["--gas", str(gas)] if gas is not None else []) + \
(["--gas-prices", sif_format_amount(*gas_prices)] if gas_prices is not None else []) + \
(["--node", node] if node is not None else []) + \
(["--keyring-backend", keyring_backend] if keyring_backend is not None else []) + \
(["--from", sign_with] if sign_with is not None else []) + \
(["--symbol-translator-file", symbol_translator_file] if symbol_translator_file else []) + \
(["--relayerdb-path", relayerdb_path] if relayerdb_path else []) + \
(["--trace"] if trace else [])
return self.cmd.popen(args, env=env, cwd=cwd, log_file=log_file)
# This is probably useful for any program that uses web3 library in the same way
# ETHEREUM_ADDRESS has to start with "0x" and ETHEREUM_PRIVATE_KEY has to be without "0x".
def _env_for_ethereum_address_and_key(ethereum_address, ethereum_private_key):
env = {}
if ethereum_private_key:
assert not ethereum_private_key.startswith("0x")
env["ETHEREUM_PRIVATE_KEY"] = ethereum_private_key
if ethereum_address:
assert ethereum_address.startswith("0x")
env["ETHEREUM_ADDRESS"] = ethereum_address
return env or None # Avoid passing empty environment
|
StarcoderdataPython
|
9722709
|
from abc import ABCMeta, abstractmethod
class BaseInsSegModel(metaclass=ABCMeta):
"""Base model. The model object must inherit form this class."""
def __init__(self, project_id, data_dir, **kwargs):
"""
:param project_id: The project id that use this model.
:param train_dir: the dataset for training .
:param test_dir: the dataset for testing
:param
:param
:param kwargs: Other necessary params.
"""
self._proj_id = project_id
self.data_dir = data_dir
@abstractmethod
def fit(self, **kwargs):
"""Train the model use the all train dataset.
"""
def fit_on_subset(self, **kwargs):
"""Train the model use the subset train dataset.
"""
@abstractmethod
def predict_proba(self, data_dir, **kwargs):
"""proba predict.
:param data_dir: str
The path to the data folder.
:param kwargs: list of dict
Other necessary params.
"""
@abstractmethod
def predict(self, data_dir, **kwargs):
"""predict class label.
:param data_dir: str
The path to the data folder.
"""
@abstractmethod
def test(self, data_dir, label, batch_size:'int', **kwargs):
"""test the model.
return the miou
"""
@abstractmethod
def save_model(self):
"""Save the model after using (distributed system)."""
|
StarcoderdataPython
|
6621735
|
<gh_stars>1-10
import sys
sys.path.append(__path__[0])
from mef90EXODUS import *
from mef90ABAQUS import *
from mef90GMSH import *
from mef90MSC import *
|
StarcoderdataPython
|
8087013
|
#!/bin/env/python
#-*- encoding: utf-8 -*-
"""
"""
from __future__ import print_function, division
import os
def main():
# OSX Cleanup
dirpath = os.path.dirname(os.path.abspath(__file__))
for root, dirs, files in os.walk(dirpath):
for d in dirs:
if d in ['__pycache__']:
path = os.path.join(root, d)
print("Removing: {}".format(path))
os.system('rm -rf {}'.format(path))
print("Removing dKeras.egg-info")
os.system('rm -rf {}'.format(os.path.join(dirpath,'dKeras.egg-info')))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3256178
|
from django.urls import reverse
from django.test import TestCase
from sso.organisations.models import OrganisationCountry
from sso.test.client import SSOClient
class AccountsTest(TestCase):
fixtures = ['roles.json', 'test_l10n_data.json', 'app_roles.json', 'test_organisation_data.json', 'test_app_roles.json', 'test_user_data.json']
def setUp(self):
self.client = SSOClient()
def tearDown(self):
pass
def test_app_admin_user_list(self):
result = self.client.login(username='ApplicationAdmin', password='<PASSWORD>')
self.assertEqual(result, True)
response = self.client.get(reverse('accounts:app_admin_user_list'), data={'country': OrganisationCountry.objects.first().pk})
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('accounts:app_admin_user_list'), data={'country': 99999})
self.assertEqual(response.status_code, 200)
def test_app_admin_update_user(self):
result = self.client.login(username='ApplicationAdmin', password='<PASSWORD>')
self.assertEqual(result, True)
# User.objects.get()
response = self.client.get(reverse('accounts:app_admin_user_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('accounts:app_admin_update_user', kwargs={'uuid': 'a8992f0<PASSWORD>c83ee'}))
response = self.client.get(reverse('accounts:app_admin_update_user', kwargs={'uuid': 'a8992f0348634f76b0dac2de4e4c83ee'}))
self.assertEqual(response.status_code, 200)
|
StarcoderdataPython
|
8175030
|
from __future__ import print_function
from traceback import print_tb
from tk_utils import *
from imutils.video import VideoStream
import time,subprocess
out = "/home/nishantg96/ZeMA/"
rospy.init_node('myNodeName')
# cam = VideoStream(src=0,resolution=(1280,720)).start()
# cam2 = VideoStream(src=1,resolution=(1280,720)).start()
width = 1280
height = 720
cap = cv2.VideoCapture(0, cv2.CAP_V4L2)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
cap2 = cv2.VideoCapture(2, cv2.CAP_V4L2)
cap2.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
mypid = os.getpid()
time.sleep(2.0)
app = camera_gui(cap,cap2,out)
app.root.mainloop()
print("Main Exited Successfully")
com = "pkill -9 -f main.py"
subprocess.Popen(com, stdout = subprocess.PIPE, shell = True)
|
StarcoderdataPython
|
4821129
|
<reponame>jsatt/python-catalog
#!/usr/bin/env python
from setuptools import setup
long_description = open('README.rst').read()
setup_args = dict(
name='pycatalog',
version='1.2.0',
description='Data structure for complexe enumeration.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jsatt/python-catalog',
license="MIT License",
install_requires=[
'future'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
],
py_modules=['catalog'],
)
if __name__ == '__main__':
setup(**setup_args)
|
StarcoderdataPython
|
3576024
|
<filename>addons/website_sale_stock/controllers/main.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website_sale.controllers.main import WebsiteSale
from odoo import http,_
from odoo.http import request
from odoo.exceptions import ValidationError
class WebsiteSaleStock(WebsiteSale):
@http.route()
def payment_transaction(self, *args, **kwargs):
""" Payment transaction override to double check cart quantities before
placing the order
"""
order = request.website.sale_get_order()
values = []
for line in order.order_line:
if line.product_id.type == 'product' and line.product_id.inventory_availability in ['always', 'threshold']:
cart_qty = sum(order.order_line.filtered(lambda p: p.product_id.id == line.product_id.id).mapped('product_uom_qty'))
avl_qty = line.product_id.with_context(warehouse=order.warehouse_id.id).virtual_available
if cart_qty > avl_qty:
values.append(_(
'You ask for %(quantity)s products but only %(available_qty)s is available',
quantity=cart_qty,
available_qty=avl_qty if avl_qty > 0 else 0
))
if values:
raise ValidationError('. '.join(values) + '.')
return super(WebsiteSaleStock, self).payment_transaction(*args, **kwargs)
|
StarcoderdataPython
|
5122028
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.sparse import csr_matrix, identity, kron
from scipy.sparse.linalg import eigs, eigsh
import itertools
from scipy.linalg import block_diag, eig, expm, eigh
from scipy.sparse import save_npz, load_npz, csr_matrix, csc_matrix
import scipy.sparse as sp
from scipy.special import binom
import yaml
import copy
import warnings
import os
import time
from .Hamiltonians import DisplacedAnharmonicOscillator, PolymerVibrations, Polymer, DiagonalizeHamiltonian, LadderOperators
from .general_Liouvillian_classes import LiouvillianConstructor
class OpenPolymer(Polymer,LiouvillianConstructor):
def __init__(self,site_energies,site_couplings,dipoles):
"""Extends Polymer object to an open systems framework,
using the Lindblad formalism to describe bath coupling
"""
super().__init__(site_energies,site_couplings,dipoles)
# Values that need to be set
self.optical_dephasing_gamma = 0
self.optical_relaxation_gamma = 0
self.site_to_site_dephasing_gamma = 0
self.site_to_site_relaxation_gamma = 0
self.exciton_relaxation_gamma = 0
self.exciton_exciton_dephasing_gamma = 0
self.kT = 0
def optical_dephasing_operator(self):
total_deph = self.occupied_list[0].copy()
for i in range(1,len(self.occupied_list)):
total_deph += self.occupied_list[i]
return total_deph
def optical_dephasing_instructions(self):
O = self.optical_dephasing_operator()
gamma = self.optical_dephasing_gamma
return self.make_Lindblad_instructions(gamma,O)
def optical_dephasing_Liouvillian(self):
instructions = self.optical_dephasing_instructions()
return self.make_Liouvillian(instructions)
def boltzmann_factors(self,E1,E2):
if E1 == E2:
return 0.5,0.5
if E1 < E2:
return self.boltzmann_factors_ordered_inputs(E1,E2)
else:
E1_to_E2, E2_to_E1 = self.boltzmann_factors_ordered_inputs(E2,E1)
return E2_to_E1, E1_to_E2
def boltzmann_factors_ordered_inputs(self,E1,E2):
"""E1 must be less than E2"""
if self.kT == 0:
return 1, 0
Z = np.exp(-E1/self.kT) + np.exp(-E2/self.kT)
if np.isclose(Z,0):
E2_to_E1 = 1
E1_to_E2 = 0
else:
E2_to_E1 = np.exp(-E1/self.kT)/Z
E1_to_E2 = np.exp(-E2/self.kT)/Z
return E2_to_E1, E1_to_E2
def optical_relaxation_instructions(self):
eg = 0
ins_list = []
gamma = self.optical_relaxation_gamma
for n in range(len(self.energies)):
en = self.energies[n]
bg, bn = self.boltzmann_factors(eg,en)
O = self.up_list[n]
instructions2 = self.make_Lindblad_instructions(gamma * bg,O.T)
ins_list += instructions2
if np.isclose(bn,0):
pass
else:
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
ins_list += instructions1
return ins_list
def optical_relaxation_Liouvillian(self):
inst_list = self.optical_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_relaxation_instructions(self):
nm = itertools.combinations(range(len(self.energies)),2)
i = 0
ins_list = []
gamma = self.site_to_site_relaxation_gamma
for n,m in nm:
en = self.energies[n]
em = self.energies[m]
bn,bm = self.boltzmann_factors(en,em)
O = self.exchange_list[i]
instructions1 = self.make_Lindblad_instructions(gamma * bn,O)
instructions2 = self.make_Lindblad_instructions(gamma * bm,O.T)
ins_list += instructions1
ins_list += instructions2
i+=1
return ins_list
def site_to_site_relaxation_Liouvillian(self):
inst_list = self.site_to_site_relaxation_instructions()
L = self.make_Liouvillian(inst_list)
return L
def site_to_site_dephasing_operator_list(self):
s_deph_list = []
for (i,j) in itertools.combinations(range(self.num_sites),2):
s_deph_list.append(self.occupied_list[i] - self.occupied_list[j])
return s_deph_list
def all_site_dephasing_instructions(self):
s_deph_list = self.site_to_site_dephasing_operator_list()
Lindblad_instruction_list = []
gamma = self.site_to_site_dephasing_gamma
for O in s_deph_list:
Lindblad_instruction_list += self.make_Lindblad_instructions(gamma,O)
return Lindblad_instruction_list
def all_site_dephasing_Liouvillian(self):
inst_list = self.all_site_dephasing_instructions()
L = self.make_Liouvillian(inst_list)
return L/(2*self.num_sites)
def set_electronic_dissipation_instructions(self):
inst_list = []
if self.optical_dephasing_gamma != 0:
inst_list += self.optical_dephasing_instructions()
if self.site_to_site_dephasing_gamma != 0:
inst_list += self.all_site_dephasing_instructions()
if self.site_to_site_relaxation_gamma != 0:
inst_list += self.site_to_site_relaxation_instructions()
if self.optical_relaxation_gamma != 0:
inst_list += self.optical_relaxation_instructions()
self.electronic_dissipation_instructions = inst_list
def make_manifold_hamiltonian_instructions(self,ket_manifold,bra_manifold):
Hket = self.get_electronic_hamiltonian(manifold_num = ket_manifold)
Hbra = self.get_electronic_hamiltonian(manifold_num = bra_manifold)
return self.make_commutator_instructions2(-1j*Hket,-1j*Hbra)
def make_total_Liouvillian(self):
drho = self.make_Liouvillian(self.make_manifold_hamiltonian_instructions('all','all'))
if self.num_sites > 1:
drho += self.all_exciton_dephasing_Liouvillian()
drho += self.exciton_relaxation_Liouvillian()
# drho += self.optical_relaxation_Liouvillian()
drho += self.optical_dephasing_Liouvillian()
self.L = drho
def eigfun(self,L,*,check_eigenvectors = True,invert = True,populations_only = False):
eigvals, eigvecs = np.linalg.eig(L)
eigvals = np.round(eigvals,12)
sort_indices = eigvals.argsort()
eigvals.sort()
eigvecs = eigvecs[:,sort_indices]
for i in range(eigvals.size):
max_index = np.argmax(np.abs(eigvecs[:,i]))
if np.real(eigvecs[max_index,i]) < 0:
eigvecs[:,i] *= -1
if eigvals[i] == 0:
# eigenvalues of 0 correspond to thermal distributions,
# which should have unit trace in the Hamiltonian space
if populations_only:
trace_norm = eigvecs[:,i].sum()
eigvecs[:,i] = eigvecs[:,i] / trace_norm
else:
shape = int(np.sqrt(eigvals.size))
trace_norm = eigvecs[:,i].reshape(shape,shape).trace()
if np.isclose(trace_norm,0):
pass
else:
eigvecs[:,i] = eigvecs[:,i] / trace_norm
if invert:
eigvecs_left = np.linalg.pinv(eigvecs)
else:
eigvals_left, eigvecs_left = np.linalg.eig(L.T)
eigvals_left = np.round(eigvals_left,12)
sort_indices_left = eigvals_left.argsort()
eigvals_left.sort()
eigvecs_left = eigvecs_left[:,sort_indices_left]
eigvecs_left = eigvecs_left.T
for i in range(eigvals_left.size):
norm = np.dot(eigvecs_left[i,:],eigvecs[:,i])
eigvecs_left[i,:] *= 1/norm
if check_eigenvectors:
LV = L.dot(eigvecs)
D = eigvecs_left.dot(LV)
if np.allclose(D,np.diag(eigvals),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))
self.eigenvalues = eigvals
self.eigenvectors = {'left':eigvecs_left,'right':eigvecs}
return eigvals, eigvecs, eigvecs_left
def save_L(self,dirname):
save_npz(os.path.join(dirname,'L.npz'),csr_matrix(self.L))
def save_L_by_manifold(self):
np.savez(os.path.join(self.base_path,'L.npz'),**self.L_by_manifold)
def save_eigsystem(self,dirname):
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds = self.eigenvectors['right'])
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds = self.eigenvectors['left'])
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds = self.eigenvalues)
def save_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu.shape[0])
mu_ket = np.kron(self.mu,II.T)
mu_bra = np.kron(II,self.mu.T)
mu_mask_tol = 10
mu_ket_t = np.dot(np.dot(evl,mu_ket),ev)
mu_ket_3d = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_ket_3d[:,:,0] = mu_ket_t
mu_bra_t = np.dot(np.dot(evl,mu_bra),ev)
mu_bra_3d = np.zeros((mu_bra_t.shape[0],mu_bra_t.shape[0],3),dtype='complex')
mu_bra_3d[:,:,0] = mu_bra_t
if mask:
ket_mask = np.zeros(mu_ket_t.shape,dtype='bool')
ket_mask[:,:] = np.round(mu_ket_t,mu_mask_tol)[:,:]
mu_ket_t_masked = mu_ket_t * ket_mask
mu_ket_3d_masked = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_ket_3d_masked[:,:,0] = mu_ket_t_masked
bra_mask = np.zeros(mu_bra_t.shape,dtype='bool')
bra_mask[:,:] = np.round(mu_bra_t,mu_mask_tol)[:,:]
mu_bra_t_masked = mu_bra_t * bra_mask
mu_bra_3d_masked = np.zeros((mu_ket_t.shape[0],mu_ket_t.shape[0],3),dtype='complex')
mu_bra_3d_masked[:,:,0] = mu_bra_t_masked
np.savez(os.path.join(dirname,'mu.npz'),ket=mu_ket_3d,bra=mu_bra_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket=ket_mask,bra=bra_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket=mu_ket_3d_masked,bra=mu_bra_3d_masked)
else:
np.savez(os.path.join(dirname,'mu.npz'),ket=mu_ket_3d,bra=mu_bra_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
def save_RWA_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = np.kron(self.mu_ket_up,II.T)
mu_ket_down = np.kron(self.mu_ket_up.T,II.T)
mu_bra_up = np.kron(II,self.mu_ket_up)
mu_bra_down = np.kron(II,self.mu_ket_up.T)
mu_mask_tol = 10
mu_ket_up_t = np.dot(np.dot(evl,mu_ket_up),ev)
mu_ket_up_3d = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d[:,:,0] = mu_ket_up_t
mu_bra_up_t = np.dot(np.dot(evl,mu_bra_up),ev)
mu_bra_up_3d = np.zeros((mu_bra_up_t.shape[0],mu_bra_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d[:,:,0] = mu_bra_up_t
mu_ket_down_t = np.dot(np.dot(evl,mu_ket_down),ev)
mu_ket_down_3d = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d[:,:,0] = mu_ket_down_t
mu_bra_down_t = np.dot(np.dot(evl,mu_bra_down),ev)
mu_bra_down_3d = np.zeros((mu_bra_down_t.shape[0],mu_bra_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d[:,:,0] = mu_bra_down_t
if mask:
ket_up_mask = np.zeros(mu_ket_up_t.shape,dtype='bool')
ket_up_mask[:,:] = np.round(mu_ket_up_t,mu_mask_tol)[:,:]
mu_ket_up_t_masked = mu_ket_up_t * ket_up_mask
mu_ket_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d_masked[:,:,0] = mu_ket_up_t_masked
bra_up_mask = np.zeros(mu_bra_up_t.shape,dtype='bool')
bra_up_mask[:,:] = np.round(mu_bra_up_t,mu_mask_tol)[:,:]
mu_bra_up_t_masked = mu_bra_up_t * bra_up_mask
mu_bra_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d_masked[:,:,0] = mu_bra_up_t_masked
ket_down_mask = np.zeros(mu_ket_down_t.shape,dtype='bool')
ket_down_mask[:,:] = np.round(mu_ket_down_t,mu_mask_tol)[:,:]
mu_ket_down_t_masked = mu_ket_down_t * ket_down_mask
mu_ket_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d_masked[:,:,0] = mu_ket_down_t_masked
bra_down_mask = np.zeros(mu_bra_down_t.shape,dtype='bool')
bra_down_mask[:,:] = np.round(mu_bra_down_t,mu_mask_tol)[:,:]
mu_bra_down_t_masked = mu_bra_down_t * bra_down_mask
mu_bra_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d_masked[:,:,0] = mu_bra_down_t_masked
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket_up=ket_up_mask,bra_up=bra_up_mask,
ket_down=ket_down_mask,bra_down=bra_down_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket_up=mu_ket_up_3d_masked,
bra_up=mu_bra_up_3d_masked,ket_down=mu_ket_down_3d_masked,
bra_down=mu_bra_down_3d_masked)
else:
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds=self.eigenvalues)
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds=ev)
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds=evl)
def save_RWA_mu_site_basis(self,dirname):
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = np.kron(self.mu_ket_up,II.T)
mu_ket_down = np.kron(self.mu_ket_up.T,II.T)
mu_bra_up = np.kron(II,self.mu_ket_up)
mu_bra_down = np.kron(II,self.mu_ket_up.T)
mu_mask_tol = 10
mu_ket_up_3d = np.zeros((mu_ket_up.shape[0],mu_ket_up.shape[0],3),dtype='complex')
mu_ket_up_3d[:,:,0] = mu_ket_up
mu_bra_up_3d = np.zeros((mu_bra_up.shape[0],mu_bra_up.shape[0],3),dtype='complex')
mu_bra_up_3d[:,:,0] = mu_bra_up
mu_ket_down_3d = np.zeros((mu_ket_down.shape[0],mu_ket_down.shape[0],3),dtype='complex')
mu_ket_down_3d[:,:,0] = mu_ket_down
mu_bra_down_3d = np.zeros((mu_bra_down.shape[0],mu_bra_down.shape[0],3),dtype='complex')
mu_bra_down_3d[:,:,0] = mu_bra_down
np.savez(os.path.join(dirname,'mu_site_basis.npz'),ket_up=mu_ket_up_3d,bra_up=mu_bra_up_3d,
ket_down=mu_ket_down_3d,bra_down=mu_bra_down_3d)
class OpenPolymerVibrations(OpenPolymer):
def __init__(self,yaml_file,*,mask_by_occupation_num=True,force_detailed_balance=False,for_RKE=False):
"""Initial set-up is the same as for the Polymer class, but I also need
to unpack the vibrational_frequencies, which must be passed as a nested list.
Each site may have N vibrational modes, and each has a frequency, a displacement
and a frequency shift for the excited state
for sites a, b, ...
"""
with open(yaml_file) as yamlstream:
params = yaml.load(yamlstream,Loader=yaml.SafeLoader)
self.base_path = os.path.split(yaml_file)[0]
self.save_path = os.path.join(self.base_path,'open')
os.makedirs(self.save_path,exist_ok=True)
super().__init__(params['site_energies'],params['site_couplings'],np.array(params['dipoles']))
self.H_diagonalization_time = 0
self.L_diagonalization_time = 0
self.L_construction_time = 0
self.truncation_size = params['initial truncation size']
try:
self.maximum_manifold = params['maximum_manifold']
except:
self.maximum_manifold = np.inf
self.maximum_manifold = min(self.maximum_manifold,self.num_sites)
self.params = params
self.set_bath_coupling()
if self.optical_relaxation_gamma != 0:
self.manifolds_separable = False
else:
self.manifolds_separable = True
self.set_electronic_dissipation_instructions()
self.occupation_num_mask = mask_by_occupation_num
self.set_vibrations()
self.set_vibrational_ladder_operators()
e_ham = self.extract_electronic_subspace(self.electronic_hamiltonian,0,self.maximum_manifold)
self.total_hamiltonian = np.kron(e_ham,self.vibrational_identity)
self.add_vibrations()
t0 = time.time()
self.set_H_eigsystem_by_manifold()
self.H_diagonalization_time = time.time() - t0
self.make_condon_mu()
self.make_condon_mu_dict()
if force_detailed_balance:
H_eigentransform = True
t0 = time.time()
self.all_instructions = self.make_commutator_instructions(-1j*self.total_hamiltonian)
self.set_L_by_manifold(H_eigentransform=H_eigentransform,add_eigenstate_relaxation_effects = False)
self.add_eigenstate_relaxation_effects()
self.add_eigenstate_optical_dephasing_effects()
self.L_construction_time = time.time() - t0
else:
H_eigentransform = False
t0 = time.time()
self.all_instructions = self.convert_electronic_instructions_to_full_instructions(self.electronic_dissipation_instructions)
self.all_instructions += self.make_commutator_instructions(-1j*self.total_hamiltonian)
self.all_instructions += self.vibrational_dissipation_instructions()
if self.manifolds_separable:
self.set_L_by_manifold(H_eigentransform=H_eigentransform)
else:
self.set_L()
self.L_construction_time = time.time() - t0
if for_RKE:
self.set_mu_by_manifold(H_eigentransform=H_eigentransform,L_eigentransform=False)
self.save_mu_by_manifold(pruned=False)
self.save_L_by_manifold()
self.save_rho0(H_eigentransform=H_eigentransform)
else:
t0 = time.time()
if self.manifolds_separable:
self.set_eigensystem_by_manifold(force_detailed_balance = force_detailed_balance)
self.set_mu_by_manifold(H_eigentransform=H_eigentransform)
self.save_mu_by_manifold(pruned=True)
self.save_eigensystem_by_manifold()
self.L_diagonalization_time = time.time() - t0
else:
self.set_eigensystem()
# self.set_mu()
# self.save_mu(pruned=True)
# self.save_eigensystem()
# self.L_diagonalization_time = time.time() - t0
self.save_timings()
def save_timings(self):
save_dict = {'H_diagonalization_time':self.H_diagonalization_time,
'L_diagonalization_time':self.L_diagonalization_time,
'L_construction_time':self.L_construction_time}
np.savez(os.path.join(self.save_path,'Liouvillian_timings.npz'),**save_dict)
def set_H_eigsystem_by_manifold(self):
self.H_eigenvalues = []
self.H_eigenvectors = []
for i in range(self.maximum_manifold+1):
e,v = np.linalg.eigh(self.extract_vibronic_manifold(self.total_hamiltonian,i))
for i in range(e.size):
max_ind = np.argmax(np.abs(v[:,i]))
if v[max_ind,i] < 0:
v[:,i] = v[:,i] * -1
self.H_eigenvalues.append(e)
self.H_eigenvectors.append(v)
def save_rho0(self,*,H_eigentransform=False):
H_size = self.H_eigenvalues[0].size
if H_size == 1:
rho0 = np.array([[1]])
elif self.kT == 0:
rho0 = np.zeros((H_size,H_size))
rho0[0,0] = 1
else:
Z = np.sum(np.exp(-self.H_eigenvalues[0]/self.kT))
rho0_diag = np.exp(-self.H_eigenvalues[0]/self.kT)/Z
rho0 = np.diag(rho0_diag)
if H_eigentransform:
# Already in the eigenbasis
pass
else:
# Go back to original basis
v = self.H_eigenvectors[0]
rho0 = v.dot(rho0.dot(v.T))
rho0 = rho0.flatten()
np.save(os.path.join(self.base_path,'rho0.npy'),rho0)
def save_L(self):
save_npz(os.path.join(self.save_path,'L.npz'),csr_matrix(self.L))
def save_L_by_manifold(self):
np.savez(os.path.join(self.save_path,'L.npz'),**self.L_by_manifold)
def eigfun2(self,ket_manifold_num,bra_manifold_num,*,check_eigenvectors = True):
key = str(ket_manifold_num) + str(bra_manifold_num)
L = self.L_by_manifold[key]
E = L.diagonal().copy()
V = np.eye(E.size,dtype='complex')
VL = V.copy()
if ket_manifold_num == bra_manifold_num:
size = self.H_eigenvalues[ket_manifold_num].size
pop_inds = np.arange(size)*(size+1)
L_pop = L[pop_inds,:]
L_pop = L_pop[:,pop_inds]
e, v, vl = self.eigfun(L_pop,populations_only=True)
E[pop_inds] = e[:]
for i,j in zip(pop_inds,range(len(pop_inds))):
V[pop_inds,i] = v[:,j]
VL[pop_inds,i] = vl[:,j]
if check_eigenvectors:
LV = L.dot(V)
D = VL.dot(LV)
if np.allclose(D,np.diag(E),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(E)))))
self.eigenvalues = E
self.eigenvectors = {'left':VL,'right':V}
return E,V,VL
def vibrational_occupation_to_indices(self,vibration,occ_num,manifold_num):
single_mode_occ = np.arange(self.truncation_size)
vib_occ = self.vibrational_vector_of_ones_kron(vibration,single_mode_occ)
masked_single_mode_occ = vib_occ[self.vibrational_mask]
electronic_manifold_hamiltonian = self.get_electronic_hamiltonian(manifold_num = manifold_num)
elec_size = electronic_manifold_hamiltonian.shape[0]
masked_single_mode_occ = np.kron(np.ones(elec_size),masked_single_mode_occ)
return np.where(masked_single_mode_occ == occ_num)[0]
def electronic_occupation_to_indices(self,site_num,manifold_num):
single_mode_occ = np.arange(2)
elec_occ = self.electronic_vector_of_ones_kron(site_num,single_mode_occ)
mask = self.electronic_manifold_mask(manifold_num)
masked_elec_occ = elec_occ[mask]
masked_elec_occ = np.kron(masked_elec_occ,np.ones(self.vibrational_mask[0].size))
return np.where(masked_elec_occ == 1)[0]
def get_vibrational_relaxation_rates(self,manifold_num):
e = self.H_eigenvalues[manifold_num]
rates = np.zeros((e.size,e.size))
for i in range(e.size):
for j in range(e.size):
for n in range(self.num_vibrations):
if j > i:
rates[i,j] += self.single_vibrational_relaxation_rate(i,j,n,manifold_num)
return rates
def single_vibrational_relaxation_rate(self,i,j,vibration,manifold_num):
vi = self.H_eigenvectors[manifold_num][:,i]
vj = self.H_eigenvectors[manifold_num][:,j]
rate = 0
for k in range(self.truncation_size):
k_inds = self.vibrational_occupation_to_indices(vibration,k,manifold_num)
kp1_inds = self.vibrational_occupation_to_indices(vibration,k+1,manifold_num)
for k_ind,kp1_ind in zip(k_inds,kp1_inds):
rate = rate + np.abs(vi[k_ind])**2 * np.abs(vj[kp1_ind])**2*np.sqrt(k+1)
return rate
def get_electronic_relaxation_rates(self,a,b,manifold_num):
e = self.H_eigenvalues[manifold_num]
rates = np.zeros((e.size,e.size))
for i in range(e.size):
for j in range(e.size):
if j > i:
rates[i,j] += self.single_electronic_relaxation_rate(i,j,a,b,manifold_num)
return rates
def get_all_electronic_relaxation_rates(self,manifold_num):
"""Treats all sites as having the same relaxation rates
"""
e = self.H_eigenvalues[manifold_num]
rates = np.zeros((e.size,e.size))
for i in range(e.size):
for j in range(e.size):
if j > i:
for a in range(len(self.energies)):
Ea = self.energies[a]
for b in range(len(self.energies)):
Eb = self.energies[b]
if Eb > Ea:
rates[i,j] += self.single_electronic_relaxation_rate(i,j,a,b,manifold_num)
return rates
def get_all_relaxation_rates(self,manifold_num):
rates = self.vibrational_gamma * self.get_vibrational_relaxation_rates(manifold_num)
rates = rates + self.site_to_site_relaxation_gamma * self.get_all_electronic_relaxation_rates(manifold_num)
return rates
def all_eigenstate_relaxation_instructions_by_manifold(self,manifold_num):
rates = self.get_all_relaxation_rates(manifold_num)
E = self.H_eigenvalues[manifold_num]
ins = []
for i in range(rates.shape[0]):
for j in range(rates.shape[1]):
if j > i:
O = np.zeros(rates.shape)
O[i,j] = 1
down, up = self.boltzmann_factors(E[i],E[j])
down = down * rates[i,j]
up = up * rates[i,j]
ins += self.make_Lindblad_instructions(down,O)
if np.isclose(up,0):
pass
else:
ins += self.make_Lindblad_instructions(up,O.T)
return ins
def all_eigenstate_relaxation_instructions_by_coherence(self,ket_manifold_num,bra_manifold_num):
if ket_manifold_num == bra_manifold_num:
return self.all_eigenstate_relaxation_instructions_by_manifold(ket_manifold_num)
ket_rates = self.get_all_relaxation_rates(ket_manifold_num)
E_ket = self.H_eigenvalues[ket_manifold_num]
bra_rates = self.get_all_relaxation_rates(bra_manifold_num)
E_bra = self.H_eigenvalues[bra_manifold_num]
ins = []
Obra = np.zeros(bra_rates.shape)
for i in range(ket_rates.shape[0]):
for j in range(ket_rates.shape[1]):
if j > i:
Oket = np.zeros(ket_rates.shape)
Oket[i,j] = 1
down,up = self.boltzmann_factors(E_ket[i],E_ket[j])
down = down * ket_rates[i,j]
up = up * ket_rates[i,j]
ins += self.make_Lindblad_instructions2_Obra0(down,Oket,Obra)
if np.isclose(up,0):
pass
else:
ins += self.make_Lindblad_instructions2_Obra0(up,Oket.T,Obra)
Oket = np.zeros(ket_rates.shape)
for i in range(bra_rates.shape[0]):
for j in range(bra_rates.shape[1]):
if j > i:
Obra = np.zeros(bra_rates.shape)
Obra[i,j] = 1
down,up = self.boltzmann_factors(E_bra[i],E_bra[j])
down = down * bra_rates[i,j]
up = up * bra_rates[i,j]
ins += self.make_Lindblad_instructions2_Oket0(down,Oket,Obra)
if np.isclose(up,0):
pass
else:
ins += self.make_Lindblad_instructions2_Oket0(up,Oket,Obra.T)
return ins
def single_electronic_relaxation_rate(self,i,j,a,b,manifold_num):
vi = self.H_eigenvectors[manifold_num][:,i]
vj = self.H_eigenvectors[manifold_num][:,j]
a_inds = self.electronic_occupation_to_indices(a,manifold_num)
b_inds = self.electronic_occupation_to_indices(b,manifold_num)
rate = np.sum(np.abs(vi[a_inds])**2) * np.sum(np.abs(vj[b_inds])**2)
return rate
def make_eigenstate_relaxation_Lindblad_all_rates(self,rates,manifold_num):
"""From j to i. Factor of 0.5 matches my previous definition of Lindblad formalism"""
E = self.H_eigenvalues[manifold_num]
size = E.size
pop_inds = np.arange(size)*(size+1)
pop_subspace = np.zeros((pop_inds.size,pop_inds.size))
L_diagonal = np.zeros((size,size))
for i in range(size):
for j in range(size):
if j > i:
down,up = self.boltzmann_factors(E[i],E[j])
down = down * rates[i,j]
up = up * rates[i,j]
pop_subspace[j,j] += -0.5*down
pop_subspace[i,j] += 0.5*down
pop_subspace[i,i] += -0.5*up
pop_subspace[j,i] += 0.5*up
L_diagonal[j,:] += -0.25*down
L_diagonal[:,j] += -0.25*down
L_diagonal[j,j] += -0.5*down
L_diagonal[i,:] += -0.25*up
L_diagonal[:,i] += -0.25*up
L_diagonal[i,i] += -0.5*up
L_total = np.diag(L_diagonal.ravel())
for i,j in zip(pop_inds,np.arange(pop_inds.size)):
L_total[i,pop_inds] = pop_subspace[j,:]
return L_total
def make_eigenstate_relaxation_Lindblad_all_rates_by_coherence(self,ket_rates,bra_rates,ket_manifold_num,bra_manifold_num):
"""From j to i. Factor of 0.5 matches my previous definition of Lindblad formalism"""
if ket_manifold_num == bra_manifold_num:
return self.make_eigenstate_relaxation_Lindblad_all_rates(ket_rates,ket_manifold_num)
E_ket = self.H_eigenvalues[ket_manifold_num]
E_bra = self.H_eigenvalues[bra_manifold_num]
ket_size = E_ket.size
bra_size = E_bra.size
L_diagonal = np.zeros((ket_size,bra_size))
for i in range(ket_size):
for j in range(ket_size):
if j > i:
down,up = self.boltzmann_factors(E_ket[i],E_ket[j])
down = down * ket_rates[i,j]
up = up * ket_rates[i,j]
L_diagonal[j,:] += -0.25*down
L_diagonal[i,:] += -0.25*up
for i in range(bra_size):
for j in range(bra_size):
if j > i:
down,up = self.boltzmann_factors(E_bra[i],E_bra[j])
down = down * bra_rates[i,j]
down = down * bra_rates[i,j]
L_diagonal[:,j] += -0.25*down
L_diagonal[:,i] += -0.25*up
L_total = np.diag(L_diagonal.ravel())
return L_total
def add_eigenstate_relaxation_effects(self):
for k in range(self.maximum_manifold+1):
rates_k = self.get_all_relaxation_rates(k)
for l in range(self.maximum_manifold+1):
rates_l = self.get_all_relaxation_rates(l)
key = str(k) + str(l)
L = self.L_by_manifold[key]
L += self.make_eigenstate_relaxation_Lindblad_all_rates_by_coherence(rates_k,rates_l,k,l)
def add_eigenstate_optical_dephasing_effects(self):
for k in range(self.maximum_manifold+1):
for l in range(self.maximum_manifold+1):
if k == l:
pass
else:
key = str(k) + str(l)
L = self.L_by_manifold[key]
L += self.make_eigenstate_optical_dephasing_Lindblad(k,l)
def make_eigenstate_relaxation_Lindblad(self,gamma,i,j,manifold_num):
"""From j to i. Factor of 0.5 matches my previous definition of Lindblad formalism"""
size = self.H_eigenvalues[manifold_num].size
pop_inds = np.arange(size)*(size+1)
pop_subspace = np.zeros((pop_inds.size,pop_inds.size))
pop_subspace[j,j] = -0.5
pop_subspace[i,j] = 0.5
L_diagonal = np.zeros((size,size))
L_diagonal[j,:] = -0.25
L_diagonal[:,j] = -0.25
L_diagonal[j,j] = -0.5
L_total = np.diag(L_diagonal.ravel())
for i,j in zip(pop_inds,np.arange(pop_inds.size)):
L_total[i,pop_inds] = pop_subspace[j,:]
return gamma*L_total
def make_eigenstate_relaxation_Lindblad_optical_coherence(self,gamma,i,j,ket_manifold_num,bra_manifold_num,*,
relaxation_in_ket = True):
"""From j to i. Factor of 0.25 matches my previous definition of Lindblad formalism"""
ket_size = self.H_eigenvalues[ket_manifold_num].size
bra_size = self.H_eigenvalues[bra_manifold_num].size
L_diagonal = np.zeros((ket_size,bra_size))
if relaxation_in_ket:
L_diagonal[j,:] = -0.25
else:
L_diagonal[:,j] = -0.25
L_total = np.diag(L_diagonal.ravel())
return gamma*L_total
def make_eigenstate_optical_dephasing_Lindblad(self,ket_manifold_num,bra_manifold_num):
"""Use a constant dephasing rate for all states: my best idea is to
createe the dephasing Lindblad for the electronic space only, and use it to
fill in a single rate on the diagonal of the Liouvillian. The trick is to get
dephasing between the nth and n+kth manifold right, when k > 1 (k = 1 is simply
gamma)"""
opt_deph = self.optical_dephasing_Liouvillian().diagonal().reshape(self.electronic_hamiltonian.shape)
opt_deph = self.extract_coherence(opt_deph,ket_manifold_num,bra_manifold_num).ravel()
if np.allclose(opt_deph[0],opt_deph):
pass
else:
raise Exception('All optical dephasing rates are not the same, unknown error')
ket_size = self.H_eigenvalues[ket_manifold_num].size
bra_size = self.H_eigenvalues[bra_manifold_num].size
opt_deph = np.ones((ket_size,bra_size),dtype='complex') * opt_deph[0]
return np.diag(opt_deph.ravel())
def set_bath_coupling(self):
try:
self.site_to_site_relaxation_gamma = self.params['bath']['site_to_site_relaxation_gamma']
except KeyError:
pass
try:
self.site_to_site_dephasing_gamma = self.params['bath']['site_to_site_dephasing_gamma']
except KeyError:
pass
try:
self.optical_dephasing_gamma = self.params['bath']['optical_dephasing_gamma']
except KeyError:
pass
try:
self.optical_relaxation_gamma = self.params['bath']['optical_relaxation_gamma']
except KeyError:
pass
try:
self.vibrational_gamma = self.params['bath']['vibrational_gamma']
except KeyError:
self.vibrational_gamma = 0.1
try:
self.kT = self.params['bath']['kT']
except KeyError:
pass
def convert_electronic_instructions_to_full_instructions(self,inst_list):
new_inst_list = []
for ins in inst_list:
left,right = ins
if self.manifolds_separable == True:
pass
else:
left = self.extract_electronic_subspace(left,0,self.maximum_manifold)
right = self.extract_electronic_subspace(right,0,self.maximum_manifold)
left = np.kron(left,self.vibrational_identity)
right = np.kron(right,self.vibrational_identity)
new_inst_list.append((left,right))
return new_inst_list
def vibronic_manifold_mask(self,manifold_num):
"""Gets the indices of the Hilbert space that occupy a particular electronic
manifold, including all vibrational degrees of freedom from that manifold
"""
try:
vib_size = self.vibrational_mask[0].size
except AttributeError:
N = self.truncation_size
nv = self.num_vibrations
vib_size = N**nv
vib_ones = np.ones(vib_size,dtype='int')
vibronic_occupation_number = np.kron(self.electronic_total_occupation_number,vib_ones)
manifold_inds = np.where(vibronic_occupation_number == manifold_num)[0]
return manifold_inds
def extract_vibronic_coherence(self,O,manifold1,manifold2):
"""Returns result of projecting the Operator O onto manifold1
on the left and manifold2 on the right
"""
manifold1_inds = self.vibronic_manifold_mask(manifold1)
manifold2_inds = self.vibronic_manifold_mask(manifold2)
O = O[manifold1_inds,:]
O = O[:,manifold2_inds]
return O
def extract_vibronic_manifold(self,O,manifold_num):
"""Projects operator into the given electronic excitation manifold
"""
return self.extract_vibronic_coherence(O,manifold_num,manifold_num)
def set_L(self):
self.L = self.make_Liouvillian(self.all_instructions)
def set_eigensystem(self):
self.eigfun(self.L)
def set_L_by_manifold(self,*,H_eigentransform=False,add_eigenstate_relaxation_effects = False):
all_inst = self.all_instructions
self.L_by_manifold = dict()
for i in range(self.maximum_manifold+1):
for j in range(self.maximum_manifold+1):
key = str(i) + str(j)
inst = self.extract_coherence_instructions_from_full_instructions(all_inst,i,j,H_eigentransform=H_eigentransform)
if add_eigenstate_relaxation_effects:
inst += self.all_eigenstate_relaxation_instructions_by_coherence(i,j)
self.L_by_manifold[key] = self.make_Liouvillian(inst)
def set_eigensystem_by_manifold(self,*,force_detailed_balance = False):
self.right_eigenvectors_by_manifold = dict()
self.left_eigenvectors_by_manifold = dict()
self.eigenvalues_by_manifold = dict()
for i in range(self.maximum_manifold+1):
for j in range(self.maximum_manifold+1):
key = str(i) + str(j)
if force_detailed_balance:
e, r, l = self.eigfun2(i,j,check_eigenvectors = False)
else:
e, r, l = self.eigfun(self.L_by_manifold[key])
self.right_eigenvectors_by_manifold[key] = r
self.left_eigenvectors_by_manifold[key] = l
self.eigenvalues_by_manifold[key] = e
def make_mu_by_manifold_ket(self,old_manifold,change,*,H_eigentransform=False,L_eigentransform=True):
i,j = old_manifold
i2 = i + change
if i2 >= 0 and i2 <= self.maximum_manifold:
pass
else:
return None, None
if H_eigentransform:
Vold = self.H_eigenvectors[i]
Vnew = self.H_eigenvectors[i2]
else:
pass
j2 = j
bra_eye = np.eye(self.extract_vibronic_manifold(self.total_hamiltonian,j).shape[0])
old_key = str(i) + str(j)
new_key = str(i2) + str(j2)
all_mus = []
mu_dtype='float64'
for pol in self.pols:
full_mu = self.vibronic_mu_dict[pol]
mu = self.extract_vibronic_coherence(full_mu,i2,i)
if H_eigentransform:
mu = Vnew.T.dot(mu.dot(Vold))
mu = np.kron(mu,bra_eye)
if L_eigentransform:
l = self.left_eigenvectors_by_manifold[new_key]
r = self.right_eigenvectors_by_manifold[old_key]
mu = l.dot(mu.dot(r))
if np.allclose(np.imag(mu),0):
mu = np.real(mu)
else:
mu_dtype = 'complex128'
all_mus.append(mu)
mu_shape = all_mus[0].shape
mu_3d = np.zeros((mu_shape[0],mu_shape[1],3),dtype=mu_dtype)
for i in range(3):
mu_3d[:,:,i] = all_mus[i]
mu_key = old_key + '_to_' + new_key
return mu_key, mu_3d
def make_mu_by_manifold_bra(self,old_manifold,change,*,H_eigentransform=False,L_eigentransform=True):
i,j = old_manifold
j2 = j + change
if j2 >= 0 and j2 <= self.maximum_manifold:
pass
else:
return None, None
if H_eigentransform:
Vold = self.H_eigenvectors[j]
Vnew = self.H_eigenvectors[j2]
else:
pass
i2 = i
ket_eye = np.eye(self.extract_vibronic_manifold(self.total_hamiltonian,i).shape[0])
old_key = str(i) + str(j)
new_key = str(i2) + str(j2)
all_mus = []
mu_dtype='float64'
for pol in self.pols:
full_mu = self.vibronic_mu_dict[pol]
mu = self.extract_vibronic_coherence(full_mu,j,j2)
if H_eigentransform:
mu = Vold.T.dot(mu.dot(Vnew))
mu = np.kron(ket_eye,mu.T)
if L_eigentransform:
l = self.left_eigenvectors_by_manifold[new_key]
r = self.right_eigenvectors_by_manifold[old_key]
mu = l.dot(mu.dot(r))
if np.allclose(np.imag(mu),0):
mu = np.real(mu)
else:
mu_dtype = 'complex128'
all_mus.append(mu)
mu_shape = all_mus[0].shape
mu_3d = np.zeros((mu_shape[0],mu_shape[1],3),dtype=mu_dtype)
for i in range(3):
mu_3d[:,:,i] = all_mus[i]
mu_key = old_key + '_to_' + new_key
return mu_key, mu_3d
def append_mu_by_manifold(self,old_manifold,change,ket_flag,H_eigentransform=False,
L_eigentransform=True):
if ket_flag:
f = self.make_mu_by_manifold_ket
else:
f = self.make_mu_by_manifold_bra
key, mu = f(old_manifold,change,H_eigentransform=H_eigentransform,
L_eigentransform=L_eigentransform)
if key == None:
pass
else:
boolean_mu = np.zeros(mu.shape[:2],dtype='bool')
boolean_mu[:,:] = np.round(np.sum(np.abs(mu)**2,axis=-1),12)
mu = mu * boolean_mu[:,:,np.newaxis]
self.boolean_mu_by_manifold[key] = boolean_mu
self.mu_by_manifold[key] = mu
def set_mu_by_manifold(self,H_eigentransform=False,L_eigentransform=True):
self.mu_by_manifold = dict()
self.boolean_mu_by_manifold = dict()
changes = [-1,1]
for i in range(self.maximum_manifold+1):
for j in range(self.maximum_manifold+1):
manifold = (i,j)
self.append_mu_by_manifold(manifold,1,True,H_eigentransform=H_eigentransform,L_eigentransform=L_eigentransform)
self.append_mu_by_manifold(manifold,-1,True,H_eigentransform=H_eigentransform,L_eigentransform=L_eigentransform)
self.append_mu_by_manifold(manifold,1,False,H_eigentransform=H_eigentransform,L_eigentransform=L_eigentransform)
self.append_mu_by_manifold(manifold,-1,False,H_eigentransform=H_eigentransform,L_eigentransform=L_eigentransform)
def save_mu_by_manifold(self,*,pruned=True):
if pruned:
np.savez(os.path.join(self.save_path,'mu_pruned.npz'),**self.mu_by_manifold)
np.savez(os.path.join(self.save_path,'mu_boolean.npz'),**self.boolean_mu_by_manifold)
else:
np.savez(os.path.join(self.save_path,'mu.npz'),**self.mu_by_manifold)
def save_eigensystem_by_manifold(self):
np.savez(os.path.join(self.save_path,'eigenvalues.npz'),**self.eigenvalues_by_manifold)
np.savez(os.path.join(self.save_path,'right_eigenvectors.npz'),**self.right_eigenvectors_by_manifold)
np.savez(os.path.join(self.save_path,'left_eigenvectors.npz'),**self.left_eigenvectors_by_manifold)
def extract_coherence_instructions_from_full_instructions(self,inst_list,manifold1,manifold2,*,H_eigentransform=False,trim = None):
new_inst_list = []
H1 = self.extract_vibronic_manifold(self.total_hamiltonian,manifold1)
H2 = self.extract_vibronic_manifold(self.total_hamiltonian,manifold2)
if H_eigentransform:
V1 = self.H_eigenvectors[manifold1]
V2 = self.H_eigenvectors[manifold2]
else:
V1 = np.eye(H1.shape[0])
V2 = np.eye(H2.shape[0])
for (left,right) in inst_list:
new_left = self.extract_vibronic_manifold(left,manifold1)
new_left = V1.T.dot(new_left.dot(V1))
new_right = self.extract_vibronic_manifold(right,manifold2)
new_right = V2.T.dot(new_right.dot(V2))
new_inst_list.append((new_left[:trim,:trim],new_right[:trim,:trim]))
return new_inst_list
def extract_manifold_instructions_from_full_instructions(self,inst_list,manifold):
return self.extract_coherence_instructions_from_full_instructions(inst_list,manifold,manifold)
def add_vibrations(self):
v0 = self.empty_vibrations
v1 = self.occupied_vibrations
self.vibrational_hamiltonian = np.zeros(self.total_hamiltonian.shape)
for i in range(len(v0)):
self.vibrational_hamiltonian += v0[i]
self.vibrational_hamiltonian += v1[i]
self.total_hamiltonian = self.total_hamiltonian + self.vibrational_hamiltonian
def set_vibrations(self):
vibration_params = self.params['vibrations']
# Vibrations in the ground manifold are assumed to be diagonal
emp_vibs = [self.construct_vibrational_hamiltonian(mode_dict,0)
for mode_dict in vibration_params]
self.num_vibrations = len(emp_vibs)
occ_vibs = [self.construct_vibrational_hamiltonian(mode_dict,1)
for mode_dict in vibration_params]
if self.occupation_num_mask:
self.set_vibrational_total_occupation_number()
else:
N = self.truncation_size
nv = self.num_vibrations
self.vibrational_mask = (np.arange(N**nv),)
self.vibrational_identity = np.eye(N**nv)
empty_vibrations = self.kron_up_vibrations(emp_vibs)
occupied_vibrations = self.kron_up_vibrations(occ_vibs)
self.empty_vibrations = []
self.occupied_vibrations = []
for i in range(self.num_vibrations):
site_index = vibration_params[i]['site_label']
if self.manifolds_separable == True:
empty = self.empty_list[site_index]
occupied = self.occupied_list[site_index]
else:
empty = self.extract_electronic_subspace(self.empty_list[site_index],0,self.maximum_manifold)
occupied = self.extract_electronic_subspace(self.occupied_list[site_index],0,self.maximum_manifold)
self.empty_vibrations.append(np.kron(empty,empty_vibrations[i]))
self.occupied_vibrations.append(np.kron(occupied,occupied_vibrations[i]))
def kron_up_vibrations(self,vibrations_list):
n = self.num_vibrations
if n == 1:
return vibrations_list
new_vibrations_list = []
for i in range(n):
new_vibration = self.vibration_identity_kron(i,vibrations_list[i])
if self.occupation_num_mask:
new_vibration = self.mask_vibrational_space(new_vibration)
new_vibrations_list.append(new_vibration)
return new_vibrations_list
def mask_vibrational_space(self,O):
inds = self.vibrational_mask
if type(O) is np.ndarray:
O = O[inds[0],:].copy()
O = O[:,inds[0]].copy()
return O
if type(O) is csr_matrix:
pass
else:
O = O.tocsr()
O = O[inds[0]]
O = O.transpose()
O = O[inds[0]]
O = O.transpose()
return O
def vibration_identity_kron(self,position,item):
"""Takes in a single vibrational hamiltonians and krons it with the correct
number of vibrational identities, inserting it into its position as indexed by its mode
position as specified in the input file"""
identities = [np.eye(self.truncation_size) for n in
range(self.num_vibrations-1)]
identities.insert(position,item)
mat = identities.pop(0)
for next_item in identities:
mat = np.kron(mat,next_item)
return mat
def vibrational_vector_of_ones_kron(self,position,item):
"""Takes in a single vibrational hamiltonians and krons it with the correct
number of vibrational identities, inserting it into its position as indexed by its mode
position as specified in the input file"""
N = self.truncation_size
nv = self.num_vibrations
ones_list = [np.ones(N) for i in range(nv-1)]
ones_list.insert(position,item)
vec = ones_list.pop(0)
for next_item in ones_list:
vec = np.kron(vec,next_item)
return vec
def set_vibrational_total_occupation_number(self):
N = self.truncation_size
nv = self.num_vibrations
single_mode_occ = np.arange(N)
occ_num = self.vibrational_vector_of_ones_kron(0,single_mode_occ)
for i in range(1,nv):
occ_num += self.vibrational_vector_of_ones_kron(i,single_mode_occ)
self.vibrational_total_occupation_number = occ_num
self.vibrational_mask = np.where(occ_num < N)
self.vibrational_identity = np.eye(self.vibrational_mask[0].size)
def construct_vibrational_hamiltonian(self,single_mode,electronic_occupation):
"""For each vibrational mode, construct a list of sparse matrices defining the
vibrational hamiltonian for that mode in each excited state"""
w = single_mode['omega_g']
lam = single_mode['reorganization'][electronic_occupation]
d = single_mode['displacement'][electronic_occupation]
kin = single_mode['kinetic'][electronic_occupation]
pot = single_mode['potential'][electronic_occupation]
aho = DisplacedAnharmonicOscillator(self.truncation_size)
aho.set_ham(lam,d,kin,pot)
return 0.5 * w * aho.ham
def construct_vibrational_ladder_operator(self,single_mode,electronic_occupation):
"""Construct ladder operator given the electronic occupation for that site"""
w = single_mode['omega_g']
d = single_mode['displacement'][electronic_occupation]
lad = LadderOperators(self.truncation_size,disp=d,extra_size=0)
up = lad.ad
return up
def set_vibrational_ladder_operators(self):
vibration_params = self.params['vibrations']
emp_ups = []
occ_ups = []
for i in range(len(vibration_params)):
ad = self.construct_vibrational_ladder_operator(vibration_params[i],0)
emp_ups.append(ad)
ad = self.construct_vibrational_ladder_operator(vibration_params[i],1)
occ_ups.append(ad)
empty_ups = self.kron_up_vibrations(emp_ups)
occupied_ups = self.kron_up_vibrations(occ_ups)
self.empty_ups = []
self.occupied_ups = []
for i in range(self.num_vibrations):
site_index = vibration_params[i]['site_label']
if self.manifolds_separable == True:
empty = self.empty_list[site_index]
occupied = self.occupied_list[site_index]
else:
empty = self.extract_electronic_subspace(self.empty_list[site_index],0,self.maximum_manifold)
occupied = self.extract_electronic_subspace(self.occupied_list[site_index],0,self.maximum_manifold)
self.empty_ups.append(np.kron(empty,empty_ups[i]))
self.occupied_ups.append(np.kron(occupied,occupied_ups[i]))
def make_vibrational_dissipation_Liouvillian(self):
ins_list = self.vibrational_dissipation_instructions()
L = self.make_Liouvillian(ins_list)
return L
def vibrational_dissipation_instructions(self):
gamma = self.vibrational_gamma
instructions = []
for k in range(self.num_vibrations):
E = self.params['vibrations'][k]['omega_g']
if self.params['vibrations'][k]['potential'][1][0] != 1:
warnings.warn('The case of different excited and ground state frequencies is not properly handled by thermal dissipation')
if self.kT == 0:
N = 0
else:
N = 1/(np.exp(E/self.kT)-1)
O = (self.occupied_ups[k]).T + (self.empty_ups[k]).T
ins1 = self.make_Lindblad_instructions(gamma*(N+1),O)
instructions += ins1
if N == 0:
pass
else:
ins2 = self.make_Lindblad_instructions(gamma*N,O.T)
instructions += ins2
return instructions
def make_total_Liouvillian(self):
ins = self.make_commutator_instructions(-1j*self.total_hamiltonian)
self.L = self.make_Liouvillian(ins)
self.L += self.make_vibrational_dissipation_Liouvillian()
def make_condon_mu(self):
try:
vib_size = self.vibrational_mask[0].size
except AttributeError:
N = self.truncation_size
nv = self.num_vibrations
vib_size = N**nv
self.mu = np.kron(self.mu,np.eye(vib_size))
self.mu_ket_up = np.kron(self.mu_ket_up,np.eye(vib_size))
def make_condon_mu_dict(self):
try:
vib_size = self.vibrational_mask[0].size
except AttributeError:
N = self.truncation_size
nv = self.num_vibrations
vib_size = N**nv
self.vibronic_mu_dict = dict()
for pol in self.pols:
self.vibronic_mu_dict[pol] = np.kron(self.mu_dict[pol],np.eye(vib_size))
|
StarcoderdataPython
|
5106749
|
"""Miscellaneous API handlers."""
import copy
from typing import Dict, Any, AnyStr
from aiohttp import web
from dependency_injector.wiring import Provide
from newsfeed.containers import Container
async def get_status_handler(_: web.Request) -> web.Response:
"""Handle status requests."""
return web.json_response({'status': 'OK'})
async def get_openapi_schema_handler(
_: web.Request, *,
base_path: AnyStr = Provide[
Container.config.webapi.base_path
],
) -> web.Response:
"""Handle OpenAPI schema requests."""
schema: Dict[str, Any] = copy.deepcopy(OPENAPI_SCHEMA)
schema['servers'] = [{'url': base_path}]
return web.json_response(schema)
OPENAPI_SCHEMA = {
'openapi': '3.0.2',
'info': {
'version': '1.0.0',
'title': 'NewsFeed Microservice',
},
'paths': {
'/newsfeed/{newsfeed_id}/events/': {
'get': {
'summary': 'Return newsfeed events',
'operationId': 'get_newsfeed_events',
'tags': [
'Events',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
],
'responses': {
'200': {
'description': 'List of newsfeed events',
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/EventsList',
},
},
},
},
},
},
'post': {
'summary': 'Post newsfeed event',
'operationId': 'post_newsfeed_event',
'tags': [
'Events',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
],
'requestBody': {
'required': True,
'content': {
'application/json': {
'schema': {
'properties': {
'data': {
'type': 'object',
'example': {
'field_1': 'some_data',
'field_2': 'other_data',
'field_etc': 'etc_data',
},
},
},
},
},
},
},
'responses': {
'202': {
'description': 'Event has been successfully posted',
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/Event',
},
},
},
},
},
},
},
'/newsfeed/{newsfeed_id}/events/{event_id}/': {
'delete': {
'summary': 'Delete newsfeed event',
'operationId': 'delete_newsfeed_event',
'tags': [
'Events',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
{
'in': 'path',
'name': 'event_id',
'required': True,
'schema': {
'type': 'string',
'format': 'uuid',
},
},
],
'responses': {
'204': {
'description': 'Newsfeed event has been successfully deleted',
},
},
},
},
'/newsfeed/{newsfeed_id}/subscriptions/': {
'get': {
'summary': 'Return newsfeed subscriptions',
'operationId': 'get_newsfeed_subscriptions',
'tags': [
'Subscriptions',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
],
'responses': {
'200': {
'description': 'List of newsfeed subscriptions',
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/SubscriptionsList',
},
},
},
},
},
},
'post': {
'summary': 'Post newsfeed subscription',
'operationId': 'post_newsfeed_subscription',
'tags': [
'Subscriptions',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
],
'requestBody': {
'required': True,
'content': {
'application/json': {
'schema': {
'properties': {
'to_newsfeed_id': {
'type': 'string',
'example': '123',
},
},
},
},
},
},
'responses': {
'201': {
'description': 'Subscription has been successfully created',
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/Subscription',
},
},
},
},
},
},
},
'/newsfeed/{newsfeed_id}/subscriptions/{subscription_id}/': {
'delete': {
'summary': 'Delete newsfeed subscription',
'operationId': 'delete_newsfeed_subscription',
'tags': [
'Subscriptions',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
{
'in': 'path',
'name': 'subscription_id',
'required': True,
'schema': {
'type': 'string',
'format': 'uuid',
},
},
],
'responses': {
'204': {
'description': 'Newsfeed subscription has been successfully deleted',
},
},
},
},
'/newsfeed/{newsfeed_id}/subscribers/subscriptions/': {
'get': {
'summary': 'Return newsfeed subscriber subscriptions',
'operationId': 'get_newsfeed_subscriber_subscriptions',
'tags': [
'Subscriptions',
],
'parameters': [
{
'in': 'path',
'name': 'newsfeed_id',
'required': True,
'schema': {
'type': 'string',
},
},
],
'responses': {
'200': {
'description': 'List of newsfeed subscriber subscriptions',
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/SubscriptionsList',
},
},
},
},
},
},
},
'/status/': {
'get': {
'summary': 'Return current microservice status',
'operationId': 'get_status',
'tags': [
'Miscellaneous',
],
'responses': {
'200': {
'description': 'Information about current service status',
},
},
},
},
'/docs/': {
'get': {
'summary': 'Return microservice OpenAPI v3 documentation',
'operationId': 'get_docs',
'tags': [
'Miscellaneous',
],
'responses': {
'200': {
'description': 'Service OpenAPI v3 documentation',
},
},
},
},
},
'components': {
'schemas': {
'Event': {
'properties': {
'id': {
'type': 'string',
'format': 'uuid',
},
'newsfeed_id': {
'type': 'string',
'example': '123',
},
'data': {
'type': 'object',
'example': {
'payload_id': 835,
},
},
'parent_fqid': {
'type': 'array',
'items': {
'type': 'string',
},
'example': ['123', '9d75e08f-f73f-4d80-a581-d3f9290520e6'],
},
'child_fqids': {
'type': 'array',
'items': {
'type': 'array',
'example': ['123', '9d75e08f-f73f-4d80-a581-d3f9290520e6'],
},
},
'first_seen_at': {
'type': 'integer',
'example': 1571436411,
},
'published_at': {
'type': 'integer',
'example': 1571436411,
},
},
},
'EventsList': {
'properties': {
'results': {
'type': 'array',
'items': {
'$ref': '#/components/schemas/Event',
},
},
},
},
'Subscription': {
'properties': {
'id': {
'type': 'string',
'format': 'uuid',
},
'newsfeed_id': {
'type': 'string',
'example': '123',
},
'to_newsfeed_id': {
'type': 'string',
'example': '124',
},
'subscribed_at': {
'type': 'integer',
'example': 1571436411,
},
},
},
'SubscriptionsList': {
'properties': {
'results': {
'type': 'array',
'items': {
'$ref': '#/components/schemas/Subscription',
},
},
},
},
},
},
}
|
StarcoderdataPython
|
8077303
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 10:02:05 2019
@author: yoelr
"""
import biosteam as bst
__all__ = ('load_process_settings',)
# %% Process settings
def load_process_settings():
bst.process_tools.default_utilities()
bst.CE = 607.5 # 2019
bst.PowerUtility.price = 0.065
HeatUtility = bst.HeatUtility
steam_utility = HeatUtility.get_agent('low_pressure_steam')
steam_utility.heat_transfer_efficiency = 0.9
steam_utility.regeneration_price = 0.30626
steam_utility.T = 529.2
steam_utility.P = 44e5
HeatUtility.get_agent('cooling_water').regeneration_price = 0
HeatUtility.get_agent('chilled_water').heat_transfer_price = 0
ton2kg = 1./907.18474 # ton/hr to kg/hr
ethanol_density_kgL = 0.789 # kg/L
liter_per_gallon = 3.78541
ethanol_cost = 2.15 # USD/gal
ethanol_density_kggal = liter_per_gallon * ethanol_density_kgL # kg/gal
enzyme_price = 4.24 * 50/1000 # USD/kg
|
StarcoderdataPython
|
3399385
|
<filename>armory/scenarios/multimodal_so2sat_scenario.py
"""
Multimodal image classification, currently designed for So2Sat dataset
"""
import copy
import logging
import numpy as np
from armory.utils import metrics
from armory.scenarios.scenario import Scenario
logger = logging.getLogger(__name__)
class So2SatClassification(Scenario):
def __init__(self, *args, **kwargs):
if "attack_modality" not in kwargs.keys():
raise ValueError("`attack_modality` must be defined for So2Sat scenario")
attack_modality = (kwargs.pop("attack_modality") or "").lower()
if attack_modality not in ("sar", "eo", "both"):
raise ValueError(
f"Multimodal scenario requires attack_modality parameter in {'SAR', 'EO', 'Both'}"
)
self.attack_modality = attack_modality
super().__init__(*args, **kwargs)
def load_attack(self):
attack_config = self.config["attack"]
attack_channels_mask = attack_config.get("generate_kwargs", {}).get("mask")
if attack_channels_mask is None:
if self.attack_modality == "sar":
logger.info("No mask configured. Attacking all SAR channels")
attack_channels_mask = np.concatenate(
(np.ones(4, dtype=np.float32), np.zeros(10, dtype=np.float32)),
axis=0,
)
elif self.attack_modality == "eo":
logger.info("No mask configured. Attacking all EO channels")
attack_channels_mask = np.concatenate(
(np.zeros(4, dtype=np.float32), np.ones(10, dtype=np.float32)),
axis=0,
)
elif self.attack_modality == "both":
logger.info("No mask configured. Attacking all SAR and EO channels")
attack_channels_mask = np.ones(14, dtype=np.float32)
else:
assert isinstance(
attack_channels_mask, list
), "Mask is specified, but incorrect format. Expected list"
attack_channels_mask = np.array(attack_channels_mask)
where_mask = np.where(attack_channels_mask)[0]
if self.attack_modality == "sar":
assert np.all(
np.logical_and(where_mask >= 0, where_mask < 4)
), "Selected SAR-only attack modality, but specify non-SAR channels"
elif self.attack_modality == "eo":
assert np.all(
np.logical_and(where_mask >= 4, where_mask < 14)
), "Selected EO-only attack modality, but specify non-EO channels"
elif self.attack_modality == "both":
assert np.all(
np.logical_and(where_mask >= 0, where_mask < 14)
), "Selected channels are out-of-bounds"
assert (
len(attack_channels_mask) == 14
), f"Expected channel mask of length 14, found length {len(attack_channels_mask)}"
assert np.all(
np.logical_or(attack_channels_mask == 0, attack_channels_mask == 1)
), "Expected binary attack channel mask, but found values outside {0,1}"
super().load_attack()
self.generate_kwargs["mask"] = attack_channels_mask
def load_metrics(self):
super().load_metrics()
# Overwrite standard metrics_logger
metrics_config = self.config["metric"]
performance_metrics = copy.deepcopy(metrics_config)
performance_metrics.pop("perturbation")
metrics_logger = metrics.MetricsLogger.from_config(
performance_metrics,
skip_benign=self.skip_benign,
skip_attack=self.skip_attack,
targeted=self.targeted,
)
self.profiler_kwargs[
"computational_resource_dict"
] = metrics_logger.computational_resource_dict
perturbation_metrics = copy.deepcopy(self.config["metric"])
perturbation_metrics.pop("task")
if self.attack_modality in ("sar", "both"):
sar_perturbation_logger = metrics.MetricsLogger.from_config(
perturbation_metrics,
skip_benign=True,
skip_attack=False,
targeted=self.targeted,
)
else:
sar_perturbation_logger = None
if self.attack_modality in ("eo", "both"):
eo_perturbation_logger = metrics.MetricsLogger.from_config(
perturbation_metrics,
skip_benign=True,
skip_attack=False,
targeted=self.targeted,
)
else:
eo_perturbation_logger = None
self.metrics_logger = metrics_logger
self.sar_perturbation_logger = sar_perturbation_logger
self.eo_perturbation_logger = eo_perturbation_logger
def run_attack(self):
x, y, y_pred = self.x, self.y, self.y_pred
with metrics.resource_context(name="Attack", **self.profiler_kwargs):
if self.attack_type == "preloaded":
logger.warning(
"Specified preloaded attack. Ignoring `attack_modality` parameter"
)
if len(x) == 2:
x, x_adv = x
else:
x_adv = x
if self.targeted:
y, y_target = y
else:
y_target = None
misclassified = False
else:
if self.use_label:
y_target = y
elif self.targeted:
y_target = self.label_targeter.generate(y)
elif self.skip_benign:
y_target = None # most attacks will call self.model.predict(x)
else:
y_target = y_pred
if self.skip_misclassified:
if self.targeted:
misclassified = all(
metrics.categorical_accuracy(y_target, y_pred)
)
else:
misclassified = not any(metrics.categorical_accuracy(y, y_pred))
else:
misclassified = False
if misclassified:
x_adv = x
else:
x_adv = self.attack.generate(
x=x, y=y_target, **self.generate_kwargs
)
if misclassified:
y_pred_adv = y_pred
else:
# Ensure that input sample isn't overwritten by model
x_adv.flags.writeable = False
y_pred_adv = self.model.predict(x_adv, **self.predict_kwargs)
self.metrics_logger.update_task(y, y_pred_adv, adversarial=True)
if self.targeted:
self.metrics_logger.update_task(
y_target, y_pred_adv, adversarial=True, targeted=True
)
# Update perturbation metrics for SAR/EO separately
x_sar = np.stack(
(x[..., 0] + 1j * x[..., 1], x[..., 2] + 1j * x[..., 3]), axis=3
)
x_adv_sar = np.stack(
(x_adv[..., 0] + 1j * x_adv[..., 1], x_adv[..., 2] + 1j * x_adv[..., 3],),
axis=3,
)
x_eo = x[..., 4:]
x_adv_eo = x_adv[..., 4:]
if self.sar_perturbation_logger is not None:
self.sar_perturbation_logger.update_perturbation(x_sar, x_adv_sar)
if self.eo_perturbation_logger is not None:
self.eo_perturbation_logger.update_perturbation(x_eo, x_adv_eo)
if self.sample_exporter is not None:
self.sample_exporter.export(x, x_adv, y, y_pred_adv)
self.x_adv, self.y_target, self.y_pred_adv = x_adv, y_target, y_pred_adv
def finalize_results(self):
metrics_logger = self.metrics_logger
metrics_logger.log_task()
metrics_logger.log_task(adversarial=True)
if self.targeted:
metrics_logger.log_task(adversarial=True, targeted=True)
self.results = metrics_logger.results()
metrics_logger.log_task(adversarial=True)
if self.targeted:
metrics_logger.log_task(adversarial=True, targeted=True)
# Merge performance, SAR, EO results
combined_results = metrics_logger.results()
if self.sar_perturbation_logger is not None:
combined_results.update(
{
f"sar_{k}": v
for k, v in self.sar_perturbation_logger.results().items()
}
)
if self.eo_perturbation_logger is not None:
combined_results.update(
{f"eo_{k}": v for k, v in self.eo_perturbation_logger.results().items()}
)
self.results = combined_results
|
StarcoderdataPython
|
3565642
|
<reponame>mamaheux/bass-amplifier<filename>tools/signal_processing/ui/utils/gain_plot_widget.py
import matplotlib.pyplot as plt
from PySide2.QtWidgets import QWidget, QVBoxLayout
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
class GainPlotWidget(QWidget):
def __init__(self, min_frequency=10, max_frequency=24000, min_gain=-20, max_gain=20):
super().__init__()
self._min_frequency = min_frequency
self._max_frequency = max_frequency
self._min_gain = min_gain
self._max_gain = max_gain
self._fig = plt.figure()
self._ax = self._fig.add_subplot(111)
self._canvas = FigureCanvasQTAgg(self._fig)
vlayout = QVBoxLayout()
vlayout.addWidget(self._canvas)
self.setLayout(vlayout)
def update(self, f, g):
self._ax.cla()
self._ax.semilogx([self._min_frequency, self._max_frequency], [0, 0], '--', color='tab:orange')
self._ax.semilogx(f, g, color='tab:blue')
self._ax.set_xlim(self._min_frequency, self._max_frequency)
self._ax.set_ylim(self._min_gain, self._max_gain)
self._ax.set_xlabel('Frequency (Hz)')
self._ax.set_ylabel('Gain (dB)')
self._canvas.draw()
|
StarcoderdataPython
|
1649018
|
<filename>akill.py
from __future__ import print_function
__module_name__ = 'AKILL script'
__module_version__ = '0.1'
__module_description__ = 'AKILL oper script for Atheme and forks services packages'
__author__ = '<NAME>.'
import hexchat
help_hook = "\"/sakill <nick|hostmask> <public reason> | <private oper reason>\"" \
" AKILLs the specified nick or hostmask for 24 hours."
def sakill(word, word_eol, userdata):
user = word[1]
public_reason = ' '.join(word[2:word.index('|')])
private_reason = ' '.join(word[word.index('|') + 1:])
if len(word) > 1:
try:
hexchat.command("os akill add %s !T 24h You have violated foo's terms of service. "
"%s. If in error, please send an e-mail to <EMAIL> | %s"
% (user, public_reason, private_reason))
return hexchat.EAT_ALL
except ValueError:
hexchat.prnt("USAGE: /sakill <nick|hostmask> <public reason> | <private oper reason>")
return hexchat.EAT_ALL
else:
hexchat.prnt("USAGE: /sakill <nick|hostmask> <public reason> | <private oper reason>")
return hexchat.EAT_ALL
def unload_sakill(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_command('sakill', sakill, help = help_hook)
hexchat.hook_unload(unload_sakill)
print(__module_name__, 'version', __module_version__, 'loaded.')
|
StarcoderdataPython
|
68172
|
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
if len(grid) <= 0 or grid is None:
return 0
rows = len(grid)
cols = len(grid[0])
for r in range(rows):
for c in range(cols):
if r==0 and c==0:
continue
if r-1<0:
grid[r][c] = grid[r][c] + grid[r][c-1]
elif c-1<0:
grid[r][c] = grid[r][c] + grid[r-1][c]
else:
grid[r][c] = grid[r][c] + min(grid[r-1][c], grid[r][c-1])
return grid[rows-1][cols-1]
|
StarcoderdataPython
|
6539102
|
"""
This script makes Figure 2: a dial plot of the microburst width as a
function of L and MLT
Parameters
----------
catalog_name: str
The name of the catalog in the config.PROJECT_DIR/data/ directory.
r2_thresh: float
The adjusted R^2 threshold for the fits. I chose a default value of 0.9.
max_width_ms: float
Maximum microburst width (FWHM) in milliseconds to histogram. A good default is
500 ms.
percentiles: np.array
The distribution percentiles for the FWHM distributions in L-MLT space.
This script implicitly assumes that you supply 3 percentile values, between
0 and 100, for the 4 subplots (3 percentiles and 1 microburst occurrence
distribution)
"""
import pathlib
import string
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
from sampex_microburst_widths import config
from sampex_microburst_widths.stats import dial_plot
plt.rcParams.update({'font.size': 13})
cmap = 'viridis'
catalog_name = 'microburst_catalog_04.csv'
### Script parameters
statistics_thresh=100 # Don't calculate stats if less microbursts in the bin.
percentiles = np.array([50])
r2_thresh = 0.9
max_width_ms = 500
L_bins = np.linspace(2, 8.1, num=20)
L_labels = [2,4,6]
MLT_bins = np.linspace(0, 24, num=40)
df = pd.read_csv(pathlib.Path(config.PROJECT_DIR, 'data', catalog_name))
df.dropna(inplace=True)
df['width_ms'] = 1000*df['width_s'] # Convert seconds to ms.
df['fwhm_ms'] = 1000*df['fwhm']
df = df[df['width_ms'] < max_width_ms]
df['fwhm_ms'] = df['fwhm_ms'].abs()
df = df[df.adj_r2 > r2_thresh]
num_microbursts_H, _, _ = np.histogram2d(df['MLT'], df['L_Shell'],
bins=[MLT_bins, L_bins])
H = np.nan*np.zeros(
(len(MLT_bins), len(L_bins), len(percentiles))
)
for i, (start_MLT, end_MLT) in enumerate(zip(MLT_bins[:-1], MLT_bins[1:])):
for j, (start_L, end_L) in enumerate(zip(L_bins[:-1], L_bins[1:])):
df_flt = df.loc[(
(df['MLT'] > start_MLT) & (df['MLT'] < end_MLT) &
(df['L_Shell'] > start_L) & (df['L_Shell'] < end_L)
), 'fwhm_ms']
if df_flt.shape[0] >= statistics_thresh:
H[i, j, :] = df_flt.quantile(percentiles/100)
fig = plt.figure(figsize=(9, 4))
ax = [plt.subplot(1, 2, i, projection='polar') for i in range(1, 3)]
for i, ax_i in enumerate(ax[:-1]):
d = dial_plot.Dial(ax_i, MLT_bins, L_bins, H[:, :, i])
d.draw_dial(L_labels=L_labels,
mesh_kwargs={'cmap':cmap},
colorbar_kwargs={'label':f'microburst duration [ms]', 'pad':0.1})
annotate_str = f'({string.ascii_lowercase[i]}) {percentiles[i]}th percentile'
ax_i.text(-0.2, 1.2, annotate_str, va='top', transform=ax_i.transAxes,
weight='bold', fontsize=15)
d4 = dial_plot.Dial(ax[-1], MLT_bins, L_bins, num_microbursts_H)
d4.draw_dial(L_labels=L_labels,
mesh_kwargs={'norm':matplotlib.colors.LogNorm(), 'cmap':cmap},
colorbar_kwargs={'label':'Number of microbursts', 'pad':0.1})
annotate_str = f'({string.ascii_lowercase[len(ax)-1]}) Microburst occurrence'
ax[-1].text(-0.2, 1.2, annotate_str, va='top', transform=ax[-1].transAxes,
weight='bold', fontsize=15)
for ax_i in ax:
ax_i.set_rlabel_position(235)
plt.suptitle(f'Distribution of SAMPEX microburst durations in L-MLT', fontsize=20)
plt.tight_layout()
plt.show()
|
StarcoderdataPython
|
5044313
|
<filename>thortils/utils/math.py
import random
import numpy as np
import math
from scipy.spatial.transform import Rotation as scipyR
# Operations
def remap(oldval, oldmin, oldmax, newmin, newmax, enforce=False):
newval = (((oldval - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin
if enforce:
return min(max(newval, newmin), newmax)
else:
return newval
def closest(values, query):
"""Returns the entry in `values` that is
closest to `query` in terms of absolute value difference."""
return min(values, key=lambda v: abs(v-query))
def normalize_angles(angles):
"""Returns array-like of angles within 0 to 360 degrees"""
return type(angles)(map(lambda x: x % 360, angles))
def euclidean_dist(p1, p2):
return math.sqrt(sum([(a - b)** 2 for a, b in zip(p1, p2)]))
def roundany(x, base):
"""
rounds the number x (integer or float) to
the closest number that increments by `base`.
"""
return base * round(x / base)
def floorany(x, base):
return base * math.floor(x / base)
def clip(x, minval, maxval):
return min(maxval, max(x, minval))
def diff(rang):
return rang[1] - rang[0]
def in_range(x, rang):
return x >= rang[0] and x < rang[1]
def in_range_inclusive(x, rang):
return x >= rang[0] and x <= rang[1]
def in_region(p, ranges):
return in_range(p[0], ranges[0]) and in_range(p[1], ranges[1]) and in_range(p[2], ranges[2])
def approx_equal(v1, v2, epsilon=1e-6):
if len(v1) != len(v2):
return False
for i in range(len(v1)):
if abs(v1[i] - v2[i]) > epsilon:
return False
return True
_operations_ = ['remap',
'closest',
'normalize_angles',
'euclidean_dist',
'roundany',
'floorany',
'clip',
'diff',
'in_range',
'in_range_inclusive',
'in_region',
'approx_equal']
######## Conversions
def to_radians(th):
return th*np.pi / 180
def to_rad(th):
return th*np.pi / 180
def to_degrees(th):
return th*180 / np.pi
def to_deg(th):
return th*180 / np.pi
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
_conversions_ = ['to_radians',
'to_rad',
'to_degrees',
'to_deg',
'cart2pol',
'pol2cart']
########## Transform; all input angles are degrees
def R_x(th):
th = to_rad(th)
return np.array([
1, 0, 0, 0,
0, np.cos(th), -np.sin(th), 0,
0, np.sin(th), np.cos(th), 0,
0, 0, 0, 1
]).reshape(4,4)
def R_y(th):
th = to_rad(th)
return np.array([
np.cos(th), 0, np.sin(th), 0,
0, 1, 0, 0,
-np.sin(th), 0, np.cos(th), 0,
0, 0, 0, 1
]).reshape(4,4)
def R_z(th):
th = to_rad(th)
return np.array([
np.cos(th), -np.sin(th), 0, 0,
np.sin(th), np.cos(th), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
]).reshape(4,4)
def R2d(th):
th = to_rad(th)
return np.array([
np.cos(th), -np.sin(th),
np.sin(th), np.cos(th)
]).reshape(2,2)
def R_between(v1, v2):
if len(v1) != 3 or len(v2) != 3:
raise ValueError("Only applicable to 3D vectors!")
v = np.cross(v1, v2)
c = np.dot(v1, v2)
s = np.linalg.norm(v)
I = np.identity(3)
vX = np.array([
0, -v[2], v[1],
v[2], 0, -v[0],
-v[1], v[0], 0
]).reshape(3,3)
R = I + vX + np.matmul(vX,vX) * ((1-c)/(s**2))
return R
def R_euler(thx, thy, thz, affine=False, order='xyz'):
"""
Obtain the rotation matrix of Rz(thx) * Ry(thy) * Rx(thz); euler angles
"""
R = scipyR.from_euler(order, [thx, thy, thz], degrees=True)
if affine:
aR = np.zeros((4,4), dtype=float)
aR[:3,:3] = R.as_matrix()
aR[3,3] = 1
R = aR
return R
def R_quat(x, y, z, w, affine=False):
R = scipyR.from_quat([x,y,z,w])
if affine:
aR = np.zeros((4,4), dtype=float)
aR[:3,:3] = R.as_matrix()
aR[3,3] = 1
R = aR
return R
def R_to_euler(R, order='xyz'):
"""
Obtain the thx,thy,thz angles that result in the rotation matrix Rz(thx) * Ry(thy) * Rx(thz)
Reference: http://planning.cs.uiuc.edu/node103.html
"""
return R.as_euler(order, degrees=True)
def R_to_quat(R):
return R.as_quat()
def euler_to_quat(thx, thy, thz, order='xyz'):
return scipyR.from_euler(order, [thx, thy, thz], degrees=True).as_quat()
def quat_to_euler(x, y, z, w, order='xyz'):
return scipyR.from_quat([x,y,z,w]).as_euler(order, degrees=True)
def T(dx, dy, dz):
return np.array([
1, 0, 0, dx,
0, 1, 0, dy,
0, 0, 1, dz,
0, 0, 0, 1
]).reshape(4,4)
def vec(p1, p2):
""" vector from p1 to p2 """
if type(p1) != np.ndarray:
p1 = np.array(p1)
if type(p2) != np.ndarray:
p2 = np.array(p2)
return p2 - p1
def proj(vec1, vec2, scalar=False):
# Project vec1 onto vec2. Returns a vector in the direction of vec2.
scale = np.dot(vec1, vec2) / np.linalg.norm(vec2)
if scalar:
return scale
else:
return vec2 * scale
_transforms_ = ['R_x',
'R_y',
'R_z',
'R2d',
'R_between',
'R_euler',
'R_quat',
'R_to_euler',
'R_to_quat',
'euler_to_quat',
'quat_to_euler',
'T',
'vec',
'proj']
# Probability
def sep_spatial_sample(candidates, sep, num_samples,
sample_func=None, rnd=random):
"""Draws samples from candidates,
such that the samples are minimally of euclidean distance
`sep` apart. Note that this will attempt to
draw `num_samples` samples but is not guaranteed
to return this number of samples.
You can optionally supply a sample_func
that takes in the candidates and return
a sample. If not provided, will draw uniformly.
The samples will not be at duplicate locations."""
samples = set()
for _ in range(num_samples):
if sample_func is None:
s = rnd.sample(candidates, 1)[0]
else:
s = sample_func(candidates)
if len(samples) > 0:
closest = min(samples,
key=lambda c: euclidean_dist(s, c))
if euclidean_dist(closest, s) >= sep:
samples.add(s)
else:
samples.add(s)
return samples
_probability_ = ["sep_spatial_sample"]
__all__ = _operations_ + _conversions_ + _transforms_ + _probability_
|
StarcoderdataPython
|
8084108
|
"""Unit test package for gpdre."""
|
StarcoderdataPython
|
1665527
|
<reponame>samarthg/jira-scrum-update-automation<filename>fabfile.py
from fabric.api import local
def hello():
print("hi there")
def prepare_patch():
local("bumpversion patch")
def prepare_minor():
local("bumpversion minor")
def prepare_major():
local("bumpversion major")
def release():
local("git push origin master")
local("git push --tags origin master")
local("python setup.py sdist upload -r pypi")
|
StarcoderdataPython
|
6487218
|
"""
==========================================================================
TorusRouterFL.py
==========================================================================
FL route unit that implements dimension order routing.
Author : <NAME>
Date : June 30, 2019
"""
from pymtl3 import *
from .directions import *
from .RouteUnitDorFL import RouteUnitDorFL
class TorusRouterFL:
def __init__( s, pos_x, pos_y, ncols, nrows, dimension='y' ):
s.pos_x = pos_x
s.pos_y = pos_y
s.ncols = ncols
s.nrows = nrows
s.dimension = dimension
s.route_unit = RouteUnitDorFL( pos_x, pos_y, ncols, nrows, dimension='y' )
#-----------------------------------------------------------------------
# arrange_src_pkts
#-----------------------------------------------------------------------
# A helper function that puts each packet in [lst] into corresponding
# source.
def arrange_src_pkts( s, lst ):
src_pkts = [ [] for _ in range(5) ]
for pkt in lst:
if pkt.src_x == s.pos_x and pkt.src_y == s.pos_y:
in_dir = SELF
elif s.dimension == 'y':
src_x = pkt.src_x.uint()
src_y = pkt.src_y.uint()
dst_x = pkt.dst_x.uint()
dst_y = pkt.dst_y.uint()
# Same x - either comes from north or south
if src_x == s.pos_x:
north_dist = dst_y - src_y if dst_y > src_y else dst_y + s.nrows - src_y
south_dist = src_y - dst_y if dst_y < src_y else src_y + s.nrows - dst_y
in_dir = SOUTH if north_dist < south_dist else NORTH
# Different x - either comes from west or east
else:
east_dist = dst_x - src_x if dst_x > src_x else dst_x + s.ncols - src_x
west_dist = src_x - dst_x if dst_x < src_x else src_x + s.ncols - dst_x
in_dir = EAST if west_dist < east_dist else WEST
else: # s.dimension=='x'
# Same y - either comes from west or east
if src_x == s.pos_x:
east_dist = dst_x - src_x if dst_x > src_x else dst_x + s.ncols - src_x
west_dist = src_x - dst_x if dst_x < src_x else src_x + s.ncols - dst_x
in_dir = EAST if west_dist < east_dist else WEST
# Different y - either comes from north or south
else:
north_dist = dst_y - src_y if dst_y > src_y else dst_y + s.nrows - src_y
south_dist = src_y - dst_y if dst_y < src_y else src_y + s.nrows - dst_y
in_dir = SOUTH if north_dist < south_dist else NORTH
src_pkts[ in_dir ].append( pkt )
return src_pkts
#-----------------------------------------------------------------------
# route
#-----------------------------------------------------------------------
# Use FL route unit to route each packet in [src_pkts] to corresponding
# destination.
def route( s, src_pkts ):
assert len( src_pkts ) == 5
dst_pkts = [ [] for _ in range(5) ]
for pkts in src_pkts:
tmp = s.route_unit.route( pkts )
for i in range(5):
dst_pkts[i].extend( tmp[i] )
return dst_pkts
|
StarcoderdataPython
|
1646657
|
"""
Entendendo o *args
- O *args é um parâmetro, como outro qualquer. Isso significa que você poderá
charmar de qualquer coisa, desde que começe com asterisco.
Exemplo:
*xis
Mas por convenção, utilizamos *args para definí-lo
Mas o que é o *args?
O parâmetro *args utilizado em uma função, coloca os valores extras informados como
entrada em uma tupla. Então desde já lembre-se que tuplas são imutáveis.
# Exemplos
def soma_todos_numeros(num1=1, num2=2, num3=3, num4=4):
return num1 + num2 + num3 + num4
print(soma_todos_numeros(4, 6, 9))
print(soma_todos_numeros(4, 6))
print(soma_todos_numeros(4, 6, 9, 5))
# Entendendo o args
def soma_todos_numeros(nome, email, *args):
return sum(args)
print(soma_todos_numeros('Angelina', 'Jolie'))
print(soma_todos_numeros('Angelina', 'Jolie', 1))
print(soma_todos_numeros('Angelina', 'Jolie', 2, 3))
print(soma_todos_numeros('Angelina', 'Jolie', 2, 3, 4))
print(soma_todos_numeros('Angelina', 'Jolie', 3, 4, 5, 6))
print(soma_todos_numeros('Angelina', 'Jolie', 23.4, 12.5))
# Outro exemplo de utilização do *args
def verifica_info(*args):
if 'Geek' in args and 'University' in args:
return 'Bem-vindo Geek!'
return 'Eu não tenho certeza quem você é ...'
print(verifica_info())
print(verifica_info(1, True, 'University', 'Geek'))
print(verifica_info(1, 'University', 3.145))
"""
def soma_todos_numeros(*args):
return sum(args)
# print(soma_todos_numeros())
# print(soma_todos_numeros(3, 4, 5, 6))
numeros = [1, 2, 3, 4, 5, 6, 7]
# Desempacotador
print(soma_todos_numeros(*numeros))
# OBS: O asterisco serve para que informemos ao Python que estamos
#passando como argumento uma coleção de dados. Desta forma, ele saberá
# que precisará antes desempacotar estes dados.
|
StarcoderdataPython
|
1821094
|
"""
Print inconsistencies to give to and try to force mods to fix
"""
import json
from pathlib import Path
import process_artists.config1_exceptions
song_database = Path("../app/data/expand_mapping.json")
with open(song_database, encoding="utf-8") as json_file:
song_database = json.load(json_file)
for exception in process_artists.config1_exceptions.alternative_names:
for alt_name in exception:
print(f'"{alt_name}"', end=" ")
for anime in song_database:
id_list = set()
for song in anime["songs"]:
if alt_name in song["artist"]:
id_list.add(anime["annId"])
for id in id_list:
print(id, end=" ")
print()
print()
|
StarcoderdataPython
|
68053
|
# -*- mode: python -*-
#!/usr/bin/env sage
import sys
from sage.all import *
def usage():
print("Usage: {0} Lmax [precision]".format(sys.argv[0]))
def gen_gaunt_table(lmax, prec = None):
tmpl = "{0:4d} {1:4d} {2:4d} {3:4d} {4:4d} {5:5d} {6:23.15e}\n"
with open("gaunt_lmax{0}".format(lmax), 'w') as out:
for l1 in range(lmax + 1):
for l2 in range(lmax + 1):
for l3 in range(lmax + 1):
if (l1 + l2 + l3) % 2 == 1:
continue
for m1 in range(-l1, l1+1):
for m2 in range(-l2, l2+1):
for m3 in range(-l3, l3+1):
v = gaunt(l1, l2, l3, m1, m2, m3, prec = prec)
out.write(tmpl.format(l1, l2, l3, m1, m2, m3,
float(v)))
return
if __name__ == "__main__":
if not (len(sys.argv) == 2 or len(sys.argv) == 3):
usage()
sys.exit(1)
prec = None
if len(sys.argv) == 3:
prec = int(sys.argv[2])
lmax = int(sys.argv[1])
gen_gaunt_table(lmax, prec = prec)
|
StarcoderdataPython
|
1661368
|
<reponame>mintproject/mint_cli
from unittest import TestCase
from dame.modelcatalogapi import get_setup
from dame.utils import obtain_id
SETUP_FULL_INFO = "cycles-0.10.2-alpha-collection-oromia-single-point"
SETUP_PARTIAL_INFO = "dsi_1.0_cfg"
testing = "testing"
class Test(TestCase):
def test_get_setup(self):
assert obtain_id(get_setup(SETUP_FULL_INFO, profile=testing).id) == SETUP_FULL_INFO
assert obtain_id(get_setup(SETUP_PARTIAL_INFO, profile=testing).id) == SETUP_PARTIAL_INFO
|
StarcoderdataPython
|
184314
|
# For Keystone Engine. AUTO-GENERATED FILE, DO NOT EDIT [systemz_const.py]
KS_ERR_ASM_SYSTEMZ_INVALIDOPERAND = 512
KS_ERR_ASM_SYSTEMZ_MISSINGFEATURE = 513
KS_ERR_ASM_SYSTEMZ_MNEMONICFAIL = 514
|
StarcoderdataPython
|
3560110
|
# Generated by Django 3.2.9 on 2021-11-10 21:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('djasana', '0023_alter_attachment_url_download_length'),
]
operations = [
migrations.AddField(
model_name='customfield',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djasana.user', to_field='remote_id'),
),
]
|
StarcoderdataPython
|
1987925
|
import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import desc, func
from flask_cors import CORS
import random
from models import setup_db, Question, Category
QUESTIONS_PER_PAGE = 10
def format_categories(categories):
return {category.id : category.type for category in categories}
def format_questions(questions):
return [question.format() for question in questions]
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
'''
Setting up CORS. Allows '*' for origins.
'''
CORS(app)
'''
The after_request decorator to set Access-Control-Allow
'''
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PATCH,POST,DELETE,OPTIONS')
return response
'''
The endpoint to handle GET requests
for all available categories.
'''
@app.route('/categories', methods=['GET'])
def get_categories():
categories = Category.query.order_by(Category.id).all()
# no need to throw an error if categories are empty.
# Seems absolutely valid from API perspective
return jsonify({
"success": True,
"categories": format_categories(categories)
})
'''
GET endpoint to handle requests for questions,
including pagination (every 10 questions).
This endpoint returns a list of questions,
number of total questions, current category, categories.
'''
@app.route('/questions', methods=['GET'])
def get_questions():
page = request.args.get('page', 1, type=int)
questions = Question.query.paginate(page, QUESTIONS_PER_PAGE).items
categories = Category.query.order_by(Category.id).all()
return jsonify({
"success": True,
"questions": format_questions(questions),
"total_questions": Question.query.count(),
"categories": format_categories(categories)
})
'''
DELETE endpoint to delete question using a question ID.
'''
@app.route('/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
try:
question = Question.query.get(question_id)
question.delete()
return jsonify({
"success": True,
"deleted_question": question_id
})
except:
abort(422)
'''
POST endpoint to add a new question,
which will require the question and answer text,
category, and difficulty score.
'''
@app.route('/questions', methods=['POST'])
def post_question():
body = request.get_json()
if (not 'question' in body) or (not 'answer' in body) or (not 'category' in body) or (not 'difficulty' in body):
abort(422)
question = body.get('question')
answer = body.get('answer')
category = body.get('category')
difficulty = body.get('difficulty')
try:
question_obj = Question(question, answer, category, difficulty)
question_obj.insert()
return jsonify({
"success": True,
"created_question": question_obj.format()
})
except:
abort(422)
'''
POST endpoint to get questions based on a search term.
It returns any questions for whom the search term
is a substring of the question.
'''
@app.route('/questions/search', methods=['POST'])
def search_questions():
body = request.get_json()
search_term = body.get('searchTerm', None)
if not search_term:
abort(422)
try:
searched = Question.query.filter(func.lower(Question.question).contains(search_term.lower())).all()
return jsonify({
"success": True,
"questions": format_questions(searched),
"total_questions": len(searched),
})
except:
abort(422)
'''
GET endpoint to get questions based on category.
'''
@app.route('/categories/<int:category_id>/questions', methods=['GET'])
def get_questions_by_category(category_id):
try:
questions_with_needed_category = Question.query.filter(Question.category == str(category_id)).all()
return jsonify({
"success": True,
"questions": format_questions(questions_with_needed_category),
"current_category": category_id
})
except:
abort(404)
'''
POST endpoint to get questions to play the quiz.
This endpoint takes category and previous question parameters
and returns a random questions within the given category,
if provided, and that is not one of the previous questions.
'''
@app.route('/quizzes', methods=['POST'])
def play_game():
body = request.get_json()
if not 'previous_questions' in body:
abort(422)
try:
category = body.get('quiz_category', None)
prev_questions = set(body.get('previous_questions'))
possible_questions = []
if category is None or category['id'] == 0:
possible_questions = Question.query.filter(Question.id not in prev_questions).all()
else:
possible_questions = Question.query.filter_by(category=category['id']).filter(Question.id not in prev_questions).all()
new_question = random.choice(possible_questions).format() if len (possible_questions) > 0 else None
return jsonify({
"success": True,
"question": new_question
})
except:
abort(422)
'''
Error handlers for all expected errors.
'''
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Not Found"
}), 404
@app.errorhandler(422)
def not_found(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable Entity"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad Request"
}), 400
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({
"success": False,
"error": 500,
"message": "Internal Server Error"
}), 500
return app
|
StarcoderdataPython
|
92660
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
class FakePlatformBackend(object):
pass
class FakeDevtoolsClient(object):
def __init__(self, remote_port):
self.is_alive = True
self.tracing_started = False
self.remote_port = remote_port
self.will_raise_exception_in_stop_tracing = False
def IsAlive(self):
return self.is_alive
def StartChromeTracing(self, _trace_options, _filter_string, _timeout=10):
self.tracing_started = True
def StopChromeTracing(self, _trace_data_builder):
self.tracing_started = False
if self.will_raise_exception_in_stop_tracing:
raise Exception
def IsChromeTracingSupported(self):
return True
class FakeTraceOptions(object):
def __init__(self):
self.enable_chrome_trace = True
class FakeCategoryFilter(object):
def __init__(self):
self.filter_string = 'foo'
class ChromeTracingAgentUnittest(unittest.TestCase):
def setUp(self):
self.platform1 = FakePlatformBackend()
self.platform2 = FakePlatformBackend()
self.platform3 = FakePlatformBackend()
def StartTracing(self, platform_backend, enable_chrome_trace=True):
assert chrome_tracing_agent.ChromeTracingAgent.IsSupported(platform_backend)
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
trace_options = FakeTraceOptions()
trace_options.enable_chrome_trace = enable_chrome_trace
agent.Start(trace_options, FakeCategoryFilter(), 10)
return agent
def StopTracing(self, tracing_agent):
tracing_agent.Stop(None)
def testRegisterDevtoolsClient(self):
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(1), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(2), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(3), self.platform1)
tracing_agent_of_platform1 = self.StartTracing(self.platform1)
with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(4), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(5), self.platform2)
self.StopTracing(tracing_agent_of_platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(6), self.platform1)
def testIsSupport(self):
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform2)
devtool2.is_alive = False
# Chrome tracing is only supported on platform 1 since only platform 1 has
# an alive devtool.
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
def testStartAndStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
devtool3 = FakeDevtoolsClient(3)
devtool4 = FakeDevtoolsClient(2)
# Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool3, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool4, self.platform2)
devtool2.is_alive = False
tracing_agent1 = self.StartTracing(self.platform1)
with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
self.StartTracing(self.platform1)
self.assertTrue(devtool1.tracing_started)
self.assertFalse(devtool2.tracing_started)
self.assertTrue(devtool3.tracing_started)
# Devtool 4 shouldn't have tracing started although it has the same remote
# port as devtool 2
self.assertFalse(devtool4.tracing_started)
self.StopTracing(tracing_agent1)
self.assertFalse(devtool1.tracing_started)
self.assertFalse(devtool2.tracing_started)
self.assertFalse(devtool3.tracing_started)
self.assertFalse(devtool4.tracing_started)
# Test that it should be ok to start & stop tracing on platform1 again.
tracing_agent1 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent1)
tracing_agent2 = self.StartTracing(self.platform2)
self.assertTrue(devtool4.tracing_started)
self.StopTracing(tracing_agent2)
self.assertFalse(devtool4.tracing_started)
def testExceptionRaisedInStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
# Register devtools 1, 2 on platform 1
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform1)
tracing_agent1 = self.StartTracing(self.platform1)
self.assertTrue(devtool1.tracing_started)
self.assertTrue(devtool2.tracing_started)
devtool2.will_raise_exception_in_stop_tracing = True
with self.assertRaises(chrome_tracing_agent.ChromeTracingStoppedError):
self.StopTracing(tracing_agent1)
devtool1.is_alive = False
devtool2.is_alive = False
# Register devtools 3 on platform 1 should not raise any exception.
devtool3 = FakeDevtoolsClient(3)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool3, self.platform1)
# Start & Stop tracing on platform 1 should work just fine.
tracing_agent2 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent2)
|
StarcoderdataPython
|
1802514
|
<reponame>vietbm-hcm/modoboa
"""SimpleUsers views."""
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from reversion import revisions as reversion
from modoboa.lib.web_utils import render_to_json_response
from ..forms import ForwardForm
from ..lib import needs_mailbox
from ..models import Alias
@login_required
@needs_mailbox()
@reversion.create_revision()
def forward(request, tplname="admin/forward.html"):
mb = request.user.mailbox
al = Alias.objects.filter(address=mb.full_address, internal=False).first()
if request.method == "POST":
form = ForwardForm(request.POST)
if form.is_valid():
if al is None:
al = Alias.objects.create(
address=mb.full_address, domain=mb.domain,
enabled=mb.user.is_active)
recipients = form.cleaned_data["dest"]
if form.cleaned_data["keepcopies"]:
recipients.append(mb.full_address)
al.set_recipients(recipients)
if len(recipients) == 0:
al.delete()
else:
al.post_create(request.user)
return render_to_json_response(_("Forward updated"))
return render_to_json_response(
{'form_errors': form.errors}, status=400
)
form = ForwardForm()
if al is not None and al.recipients:
recipients = list(al.recipients)
if al.aliasrecipient_set.filter(r_mailbox=mb).exists():
form.fields["keepcopies"].initial = True
recipients.remove(mb.full_address)
form.fields["dest"].initial = "\n".join(recipients)
return render_to_json_response({
"content": render_to_string(tplname, {"form": form}, request)
})
|
StarcoderdataPython
|
6486793
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Helper methods for generating a retrieval-augmented dataset."""
import json
import os
import random
from absl import logging
from language.casper.augment import casper_converters
from language.casper.utils import data_types
import tensorflow as tf
RawExample = data_types.RawExample
AugmentedExample = data_types.AugmentedExample
def expand_path_patterns(path_patterns):
"""Expands the glob patterns in the given list."""
paths = []
for pattern in path_patterns:
paths.extend(tf.io.gfile.glob(pattern))
return paths
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _to_tf_example(example):
feature = {
"inputs": _bytes_feature(example.inputs.encode()),
"targets": _bytes_feature(example.targets.encode()),
}
tf_ex = tf.train.Example(features=tf.train.Features(feature=feature))
return tf_ex.SerializeToString()
def read_orig_examples(data_paths):
"""Reads and deserializes JSONLs from files."""
source_files = expand_path_patterns(data_paths)
for source_file in source_files:
logging.info("Start reading from %s", source_file)
with tf.io.gfile.GFile(source_file) as reader:
for line in reader:
yield json.loads(line)
def write_examples(examples,
base_path,
file_format = "tfr",
num_shards = None):
"""Writes examples to sharded TSV or TFRecord files."""
if not num_shards:
# Automatically compute the number of shards
num_shards = max(1, len(examples) // 5000)
shard_size = len(examples) // num_shards + 1
for i in range(num_shards):
filename = "{}.{}-{:05d}-of-{:05d}".format(base_path, file_format, i,
num_shards)
shard_examples = examples[i * shard_size:(i + 1) * shard_size]
logging.info("Writing to %s", filename)
if file_format == "tsv":
with tf.io.gfile.GFile(filename, "w") as writer:
for example in shard_examples:
writer.write("{}\t{}\n".format(example.inputs, example.targets))
elif file_format == "tfr":
with tf.io.TFRecordWriter(filename) as writer:
for example in shard_examples:
writer.write(_to_tf_example(example))
else:
raise ValueError("Unknown file format: {}".format(file_format))
def create_augmented_examples(orig_examples,
converter,
split,
log_every = 1000):
"""Creates AugmentedExamples from the raw JSONLs.
Args:
orig_examples: An Iterable of deserialzied JSONs.
converter: A subclass of BaseExampleConverter.
split: Split name (used only for logging).
log_every: Logging frequency.
Returns:
a list of AugmentedExamples.
"""
examples = []
for i, orig_example in enumerate(orig_examples):
if i % log_every == 0:
logging.info("[%s:%d] Produced %d examples", split, i, len(examples))
converter.verify_exemplars(orig_example)
examples.extend(converter.convert(orig_example))
logging.info("[%s] Produced %d examples total.", split, len(examples))
return examples
def generate_dataset(orig_train,
orig_dev,
orig_test,
converter,
output_dir,
seed = 42,
log_every = 1000,
train_filename = "train",
dev_filename = "dev",
test_filename = "test",
file_format = "tfr"):
"""Generates and writes retrieval-augmented dataset files.
Args:
orig_train: Iterable of deserialized JSONs for training data.
orig_dev: Iterable of deserialized JSONs for dev data.
orig_test: Iterable of deserialized JSONs for test data.
converter: A subclass of BaseExampleConverter.
output_dir: Output directory.
seed: Random seed.
log_every: Logging frequency.
train_filename: Training data filename prefix.
dev_filename: Dev data filename prefix.
test_filename: Test data filename prefix.
file_format: Output file format.
"""
random.seed(seed)
tf.io.gfile.makedirs(output_dir)
converter.stats.clear()
examples = create_augmented_examples(
orig_train, converter, split="train", log_every=log_every)
if examples:
# Shuffle the training data.
random.shuffle(examples)
base_path = os.path.join(output_dir, train_filename)
write_examples(examples, base_path, file_format=file_format)
logging.info("Train data stats: %s", dict(converter.stats))
else:
logging.warn("No train examples generated.")
converter.stats.clear()
examples = create_augmented_examples(
orig_dev, converter, split="dev", log_every=log_every)
if examples:
# The dev data is not shuffled for easier error analysis.
base_path = os.path.join(output_dir, dev_filename)
write_examples(examples, base_path, file_format=file_format)
logging.info("Dev data stats: %s", dict(converter.stats))
else:
logging.warn("No dev examples generated.")
converter.stats.clear()
examples = create_augmented_examples(
orig_test, converter, split="test", log_every=log_every)
if examples:
# The test data is not shuffled for easier error analysis.
base_path = os.path.join(output_dir, test_filename)
write_examples(examples, base_path, file_format=file_format)
logging.info("Test data stats: %s", dict(converter.stats))
else:
logging.warn("No test examples generated.")
|
StarcoderdataPython
|
1998486
|
<reponame>wlbksy/robotics
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import robotics as rbt
class TestConversions:
def test_rotation3D_axis_angle(self):
axis_z = np.array([0, 0, 1])
assert_array_almost_equal(np.eye(3), rbt.rotation3D_axis_angle(np.zeros(3)))
assert_array_almost_equal(
np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]),
rbt.rotation3D_axis_angle(axis_z * np.pi),
)
assert_array_almost_equal(
np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]),
rbt.rotation3D_axis_angle(axis_z * np.pi / 2),
)
def test_rotation3D_to_axis_angle(self):
axis_z = np.array([0, 0, 1])
axis = rbt.rotation3D_to_axis_angle(np.eye(3))
assert_array_almost_equal(axis, np.zeros(3))
axis = rbt.rotation3D_to_axis_angle(
np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
)
assert_array_almost_equal(axis, axis_z * np.pi)
axis = rbt.rotation3D_to_axis_angle(
np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
)
assert_array_almost_equal(axis, axis_z * np.pi / 2)
|
StarcoderdataPython
|
5072878
|
<filename>database_schema.py
from sqlalchemy import UniqueConstraint
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String, Table, Boolean, Text
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Normalization(Base):
# Table to store normalized forms of entities
__tablename__ = "normalization"
id = Column(Integer, primary_key=True)
text = Column(Text)
reference_name = Column(Text)
entity_type = Column(String(32), default="")
reference_source = Column(String(32), default="")
class Pair(Base):
__tablename__ = 'pairs'
id = Column(Integer, primary_key=True)
type = Column(String(32), default="")
source = Column(String(32), default="")
values = Column(String(32), default="")
entity1_id = Column(Integer, ForeignKey('entities.id'))
entity2_id = Column(Integer, ForeignKey('entities.id'))
document_id = Column(Integer, ForeignKey('documents.pmid'))
corpus_id = Column(Integer, ForeignKey('corpora.id'))
entity1 = relationship("Entity", foreign_keys=[entity1_id]) #, cascade="all, delete, delete-orphan", single_parent=True)
entity2 = relationship("Entity", foreign_keys=[entity2_id]) #, cascade="all, delete, delete-orphan", single_parent=True)
document = relationship("Document", foreign_keys=[document_id])
corpus = relationship("Corpus", foreign_keys=[corpus_id])
class Entity(Base):
__tablename__ = 'entities'
id = Column(Integer, primary_key=True)
start = Column(Integer)
end = Column(Integer)
ner = Column(String(32))
type = Column(String(32), default="")
text = Column(Text, default="")
normalized = Column(Text, default="")
sentence_id = Column(Integer, ForeignKey('sentences.id'))
corpus_id = Column(Integer, ForeignKey('corpora.id'))
start_token_id = Column(Integer, ForeignKey('tokens.id'))
end_token_id = Column(Integer, ForeignKey('tokens.id'))
__table_args__ = (UniqueConstraint('start', 'end', 'type', 'ner', 'sentence_id', 'corpus_id', name='_entity_annotation'),
)
sentence = relationship("Sentence", back_populates="entities")
token_start = relationship("Token", foreign_keys=[start_token_id])
token_end = relationship("Token", foreign_keys=[end_token_id])
corpus = relationship("Corpus", back_populates="entities")
def __repr__(self):
return "<Entity(start='%s', end='%s', text='%s')>" % (
self.start, self.end, self.text)
class Token(Base):
__tablename__ = 'tokens'
id = Column(Integer, primary_key=True)
start = Column(Integer) #sentence offset
end = Column(Integer)
text = Column(Text, default="")
order = Column(Integer)
pos = Column(String(32), default="")
lemma = Column(Text, default="")
sentence_id = Column(Integer, ForeignKey('sentences.id'))
sentence = relationship("Sentence", back_populates="tokens")
#entities_start = relationship("Entity", back_populates="token_start",
# cascade="all, delete, delete-orphan", foreign_keys="")
#entities_end = relationship("Entity", back_populates="token_end",
# cascade="all, delete, delete-orphan")
def __repr__(self):
return "<Token(start='%s', end='%s', text='%s')>" % (
self.start, self.end, self.text)
class Sentence(Base):
__tablename__ = 'sentences'
id = Column(Integer, primary_key=True)
offset = Column(Integer)
section = Column(String(10))
text = Column(Text, default="")
order = Column(Integer)
document_id = Column(Integer, ForeignKey('documents.pmid'))
document = relationship("Document", back_populates="sentences")
tokens = relationship("Token", order_by=Token.order, back_populates="sentence",
cascade="all, delete, delete-orphan")
entities = relationship("Entity", order_by=Entity.start, back_populates="sentence",
cascade="all, delete, delete-orphan")
CorpusDocument = Table('CorpusDocument', Base.metadata,
Column('id', Integer, primary_key=True),
Column('document_id', Integer, ForeignKey("documents.pmid")),
Column('corpus_id', Integer, ForeignKey("corpora.id"))
)
class Document(Base):
__tablename__ = 'documents'
pmid = Column(Integer, primary_key=True)
title = Column(Text)
abstract = Column(Text)
parsed = Column(Boolean, default=0)
#corpus_id = Column(Integer, ForeignKey('corpora.id'))
corpora = relationship("Corpus", secondary=CorpusDocument, back_populates="documents")
#corpus = relationship("Corpus", back_populates="documents")
sentences = relationship("Sentence", order_by=Sentence.order, back_populates="document",
cascade="all, delete, delete-orphan")
pairs = relationship("Pair", back_populates="document",
cascade="all, delete, delete-orphan")
def __repr__(self):
return "<Document(id='%s', title='%s', abstract='%s')>" % (
self.pmid, self.title, self.abstract)
class Corpus(Base):
__tablename__ = 'corpora'
id = Column(Integer, primary_key=True)
name = Column(String(32))
description = Column(String(32))
documents = relationship("Document", secondary=CorpusDocument, back_populates="corpora")
#documents = relationship("Document", order_by=Document.pmid, back_populates="corpus",
# cascade="all, delete, delete-orphan")
entities = relationship("Entity", back_populates="corpus")
pairs = relationship("Pair", back_populates="corpus")
def __repr__(self):
return "<Corpus(name='%s')>" % self.name
if __name__ == "__main__":
with open("config/database.config", 'r') as f:
for l in f:
if l.startswith("username"):
username = l.split("=")[-1].strip()
elif l.startswith("password"):
password = l.split("=")[-1].strip()
#engine = create_engine('sqlite:///database.sqlite', echo=False)
engine = create_engine('mysql+pymysql://{}:{}@localhost/immuno?charset=utf8mb4'.format(username, password), echo=True)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
|
StarcoderdataPython
|
3366980
|
class Assembly:
def __init__(self, yaml):
self.yaml = yaml
def apply(self, ind):
pass
class Gibson(Assembly):
def __set_junction_size(self):
self.junction_size = self.yaml["Algorithm"]["assemblies"][
"mooda.assembly.Gibson"
]["junction_size"]
def initialise(self):
self.__set_junction_size()
"""
Takes as input all blocks if there is no overlap it
makes it
"""
def apply(self, population):
# for each block
for ind in population.individuals:
for block_index in range(len(ind.blocks[:-1])):
next_block = block_index + 1
# if there is no overlap make it
if ind.blocks[block_index][1] == ind.blocks[next_block][0]:
ind.blocks[block_index][1] = ind.blocks[block_index][1] + self.junction_size
|
StarcoderdataPython
|
1970764
|
<reponame>JianyuTANG/GB-To-UTF8<gh_stars>0
import sys
import os
origin_name = sys.argv[1]
if len(sys.argv) == 3:
to_name = sys.argv[2]
elif len(sys.argv) == 2:
to_name = origin_name
else:
print("ERROR: wrong arg number")
os._exit(0)
def write_to_file(data, filename):
with open(filename, 'wb') as f:
data = data.encode('utf-8')
f.write(data)
f.close()
return
print('ERROR: fail to create and write file: ' + filename)
with open(origin_name, 'rb') as f:
data = f.read()
try:
data = data.decode('utf-8')
print('the original file is utf-8')
print('no need to change')
os._exit(0)
except:
pass
try:
data = data.decode('gb2312')
f.close()
write_to_file(data, to_name)
print('the original encoding is gb2312')
print('already changed to UTF-8 to: ' + to_name)
os._exit(0)
except:
pass
try:
data = data.decode('gbk')
f.close()
write_to_file(data, to_name)
print('the original encoding is gbk')
print('already changed to UTF-8 to: ' + to_name)
os._exit(0)
except:
pass
try:
data = data.decode('gb18030')
f.close()
write_to_file(data, to_name)
print('the original encoding is gb18030')
print('already changed to UTF-8 to: ' + to_name)
os._exit(0)
except:
pass
print('sorry, transfer failed')
|
StarcoderdataPython
|
6409946
|
#!/usr/bin/python
"""
@brief Run QuickBot class for Beaglebone Black
@author <NAME> (<EMAIL>)
@date 02/07/2014
@version: 1.0
@copyright: Copyright (C) 2014, Georgia Tech Research Corporation see the
LICENSE file included with this software (see LINENSE file)
"""
import sys
import argparse
DESCRIPTION = ""
RTYPES = ('quick', 'ultra')
def main(options):
print "Running XBot"
print 'Running XBot Program'
print ' Base IP: ', options.ip
print ' Robot IP: ', options.rip
print ' Robot Type: ', options.rtype
if options.rtype == 'quick':
import xbots.quickbot
qb = xbots.quickbot.QuickBot(options.ip, options.rip)
qb.run()
elif options.rtype == 'ultra':
import xbots.ultrabot
qb = xbots.ultrabot.UltraBot(options.ip, options.rip)
qb.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'--ip', '-i',
default='192.168.7.1',
help="Computer ip (base ip)")
parser.add_argument(
'--rip', '-r',
default='192.168.7.2',
help="BBB ip (robot ip)")
parser.add_argument(
'--rtype', '-t',
default='quick',
help="Type of robot (%s)" % '|'.join(RTYPES))
options = parser.parse_args()
if options.rtype not in RTYPES:
print "Chosen type not exists use (%s)" % '|'.join(RTYPES)
sys.exit(0)
main(options)
|
StarcoderdataPython
|
8022031
|
import pytest
from .helpers import utils, speke_element_assertions
import xml.etree.ElementTree as ET
@pytest.fixture(scope="session")
def widevine_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_WIDEVINE, spekev2_url)
@pytest.fixture(scope="session")
def playready_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_PLAYREADY, spekev2_url)
@pytest.fixture(scope="session")
def fairplay_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_FAIRPLAY, spekev2_url)
@pytest.fixture(scope="session")
def widevine_playready_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_WIDEVINE_PLAYREADY, spekev2_url)
@pytest.fixture(scope="session")
def widevine_fairplay_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_WIDEVINE_FAIRPLAY, spekev2_url)
@pytest.fixture(scope="session")
def playready_fairplay_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_PLAYREADY_FAIRPLAY, spekev2_url)
@pytest.fixture(scope="session")
def widevine_playready_fairplay_response(spekev2_url):
return utils.send_speke_request(utils.TEST_CASE_1_P_V_1_A_1, utils.PRESETS_WIDEVINE_PLAYREADY_FAIRPLAY, spekev2_url)
def test_case_1_widevine(widevine_response):
root_cpix = ET.fromstring(widevine_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cenc")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 2, 2, 2, 0, 0)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
def test_case_1_playready(playready_response):
root_cpix = ET.fromstring(playready_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cenc")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 2, 2, 0, 2, 0)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
def test_case_1_fairplay(fairplay_response):
root_cpix = ET.fromstring(fairplay_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cbcs")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 2, 2, 0, 0, 2)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
def test_case_1_widevine_playready(widevine_playready_response):
root_cpix = ET.fromstring(widevine_playready_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cenc")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 4, 2, 2, 2, 0)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
def test_case_1_widevine_fairplay(widevine_fairplay_response):
root_cpix = ET.fromstring(widevine_fairplay_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cbcs")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 4, 2, 2, 0, 2)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
def test_case_1_playready_fairplay(playready_fairplay_response):
root_cpix = ET.fromstring(playready_fairplay_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cbcs")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 4, 2, 0, 2, 2)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
def test_case_1_widevine_playready_fairplay(widevine_playready_fairplay_response):
root_cpix = ET.fromstring(widevine_playready_fairplay_response)
speke_element_assertions.check_cpix_version(root_cpix)
speke_element_assertions.validate_root_element(root_cpix)
speke_element_assertions.validate_mandatory_cpix_child_elements(root_cpix)
speke_element_assertions.validate_content_key_list_element(root_cpix, 2, "cbcs")
speke_element_assertions.validate_drm_system_list_element(root_cpix, 6, 2, 2, 2, 2)
speke_element_assertions.validate_content_key_usage_rule_list_element(root_cpix, 2)
|
StarcoderdataPython
|
8027776
|
<reponame>tgodzik/intellij-community
from scapy import all as scapy
print('TEST SUCEEDED!')
|
StarcoderdataPython
|
18565
|
<filename>src/planet_box_extractor/extractor.py
from .geo_utils import boundingBox
import time
import PIL.Image
import urllib.request
import mercantile
import numpy as np
class PlanetBoxExtractor:
"""
Extract bounding boxes from satellite images using Planet Tiles API
@radius: distance from the center of the image to the edge in kilometers
@zoom: level of zoom in the Mercantiles
@map_id: url-id of the basemap from the Planet Tiles API, can be found in the Planet Explorer
@api_key: Planet Tiles API Key
base_url: placeholder url of using the Planet Tiles API, containing the API_KEY
IMGSIZE: the size of the images from the Planet Tiles API (256 default)
locations: clockwise order of the tiles
Usage:
extractor = PlanetBoxExtractor(radius, zoom, map_id, API_KEY)
image = extractor.Process(latitude, longitude)
"""
def __init__(self, radius, zoom, map_id, api_key):
self.radius = radius
self.zoom = zoom
self.base_url = 'https://tiles.planet.com/basemaps/v1/planet-tiles/' + map_id + '/gmap/{}/{}/{}.png?api_key=' + api_key
self.IMGSIZE = 256
self.locations = ['upleft', 'upright', 'downleft', 'downright']
def Download(self, latitude, longitude):
"""
Method to retrieve Mercartor tiles corresponding to the bounding box around longitude and latitude with radius self.radius
Returns a list of 4 items, either the image of a tile or an empty object (None)
Parameters
----------
latitude: latitude coordinate of the center of the desired bounding box in degrees
longitude: longitude coordinate of the center of the desired bounding box in degrees
Returns
-------
images: list of PNG images corresponding to the Mercartor tiles
"""
minLat, minLon, maxLat, maxLon = boundingBox(latitude, longitude, self.radius)
tiles = [
mercantile.tile(minLon, maxLat, self.zoom), # upleft
mercantile.tile(maxLon, maxLat, self.zoom), # upright
mercantile.tile(minLon, minLat, self.zoom), # downleft
mercantile.tile(maxLon, minLat, self.zoom), # downright
]
urls = []
images = []
for i, location in enumerate(self.locations):
tile = tiles[i]
url = self.base_url.format(tile.z, tile.x, tile.y)
if url in urls:
images.append(None)
else:
urls.append(urls.append(url))
images.append(PIL.Image.open(urllib.request.urlopen(url)))
time.sleep(0.2)
return images
def Stitch(self, images):
"""
Method to place Mercartor tile images in correct order
Parameters
----------
images: list of images of tiles and empty objects (None) for empty tiles
Returns
-------
img: stitched image with size (self.IMGSIZE * n) x (self.IMGSIZE * m) with n the number of tile rows and m the number of tile columns
"""
total = [(img is not None) for i, img in enumerate(images)]
if sum(total) == 1:
padx, pady = 0, 0
img = np.zeros((self.IMGSIZE, self.IMGSIZE, 3), 'uint8')
elif sum(total) == 2:
if sum(total[:2]) % 2 == 0:
# up/down
padx, pady = 0, self.IMGSIZE
img = np.zeros((self.IMGSIZE, self.IMGSIZE * 2, 3), 'uint8')
else:
# left/right
padx, pady = self.IMGSIZE, 0
img = np.zeros((self.IMGSIZE * 2, self.IMGSIZE, 3), 'uint8')
elif sum(total) == 4:
padx, pady = self.IMGSIZE, self.IMGSIZE
img = np.zeros((self.IMGSIZE * 2, self.IMGSIZE * 2, 3), 'uint8')
#
for location, image in zip(self.locations, images):
if image is None:
continue
if location == 'upleft':
img[:self.IMGSIZE, :self.IMGSIZE] = np.array(image)[:,:,:3]
elif location == 'upright':
img[:self.IMGSIZE, self.IMGSIZE:] = np.array(image)[:,:,:3]
elif location == 'downright':
img[self.IMGSIZE:, self.IMGSIZE:] = np.array(image)[:,:,:3]
elif location == 'downleft':
img[self.IMGSIZE:, :self.IMGSIZE] = np.array(image)[:,:,:3]
return img
def coord2pixel(self, lon, lat, box):
"""
Method to convert longitude and latitude to their corresponding pixel location given the bounding box of the Mercartor containing the coordinates
Parameters
----------
lon: longitude in degrees
lat: latitude in degrees
box: bounding box of the Mercartor tile containing the coordinates returned from mercantile.bounds()
Returns
-------
tuple of 2 pixel locations corresponding to the given longitude and latitude
"""
return int((lon - box.west)/(box.east - box.west)*self.IMGSIZE), int((lat - box.north)/(box.south - box.north)*self.IMGSIZE)
def Bounds(self, latitude, longitude):
"""
Method to calculate the pixel locations of the bounding box with a radius of self.radius given the center longitude and latitude coordinates
Parameters
----------
latitude: latitude coordinate of the center of the bounding box in degrees
longitude: longitude coordinate of the center of the bounding box in degrees
Returns
-------
minY: starting pixel location of the bounding box on the Y-axis
maxY: ending pixel location of the bounding box on the Y-axis
minX: starting pixel location of the bounding box on the X-axis
maxX: ending pixel location of the bounding box on the X-axis
"""
minLat, minLon, maxLat, maxLon = boundingBox(latitude, longitude, self.radius)
minX, minY = self.coord2pixel(minLon, maxLat, mercantile.bounds(mercantile.tile(longitude, latitude, self.zoom)))
maxX, maxY = self.coord2pixel(maxLon, minLat, mercantile.bounds(mercantile.tile(longitude, latitude, self.zoom)))
if minY < 0:
minY += self.IMGSIZE
maxY += self.IMGSIZE
if minX < 0:
minX += self.IMGSIZE
maxX += self.IMGSIZE
return minY, maxY, minX, maxX
def Crop(self, image, minY, maxY, minX, maxX):
"""
Method to perform the cropping of the stitched image to return the bounding box region
Parameters
----------
image: stitched image of the Mercartor tiles
minY: starting pixel location of the bounding box on the Y-axis
maxY: ending pixel location of the bounding box on the Y-axis
minX: starting pixel location of the bounding box on the X-axis
maxX: ending pixel location of the bounding box on the X-axis
Returns
-------
partial image corresponding to the bounding box region
"""
return image[minY:maxY, minX:maxX]
def Process(self, latitude, longitude):
"""
Method that combines the main steps of the API to extract a bounding box image given a latitude and longitude
Parameters
----------
latitude: latitude coordinate of the center of the bounding box in degrees
longitude: latitude coordinate of the center of the bounding box in degrees
Returns
-------
image: partial image corresponding to the bounding box region
"""
images = self.Download(latitude, longitude)
stitched_image = self.Stitch(images)
minY, maxY, minX, maxX = self.Bounds(latitude, longitude)
image = self.Crop(stitched_image, minY, maxY, minX, maxX)
return image
if __name__ == '__main__':
latitude, longitude = 5, 20
zoom = 15
radius = 0.2
API_KEY = ''
map_id = ''
extractor = PlanetBoxExtractor(radius, zoom, map_id, API_KEY)
image = extractor.Process(latitude, longitude)
|
StarcoderdataPython
|
1864584
|
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from huxley.accounts.models import User
from huxley.api.tests import (CreateAPITestCase, DestroyAPITestCase,
ListAPITestCase, PartialUpdateAPITestCase,
RetrieveAPITestCase)
from huxley.utils.test import TestSchools, TestUsers
class UserDetailGetTestCase(RetrieveAPITestCase):
url_name = 'api:user_detail'
def test_anonymous_user(self):
'''It should reject request from an anonymous user.'''
user = TestUsers.new_user()
response = self.get_response(user.id)
self.assertNotAuthenticated(response)
def test_other_user(self):
'''It should reject request from another user.'''
user1 = TestUsers.new_user(username='user1')
user2 = TestUsers.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(user1.id)
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should return the correct fields for a superuser.'''
user1 = TestUsers.new_user(username='user1')
user2 = TestUsers.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(user1.id)
self.assertEqual(response.data, {
'id': user1.id,
'username': user1.username,
'first_name': user1.first_name,
'last_name': user1.last_name,
'user_type': user1.user_type,
'school': user1.school_id,
'committee': user1.committee_id})
def test_self(self):
'''It should return the correct fields for a single user.'''
school = TestSchools.new_school()
user = school.advisor
self.client.login(username=user.username, password='<PASSWORD>')
response = self.get_response(user.id)
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': user.user_type,
'school': {
'id': school.id,
'registered': school.registered.isoformat(),
'name': school.name,
'address': school.address,
'city': school.city,
'state': school.state,
'zip_code': school.zip_code,
'country': school.country,
'primary_name': school.primary_name,
'primary_gender': school.primary_gender,
'primary_email': school.primary_email,
'primary_phone': school.primary_phone,
'primary_type': school.primary_type,
'secondary_name': school.secondary_name,
'secondary_gender': school.secondary_gender,
'secondary_email': school.secondary_email,
'secondary_phone': school.secondary_phone,
'secondary_type': school.secondary_type,
'program_type': school.program_type,
'times_attended': school.times_attended,
'international': school.international,
'waitlist': school.waitlist,
'beginner_delegates': school.beginner_delegates,
'intermediate_delegates': school.intermediate_delegates,
'advanced_delegates': school.advanced_delegates,
'spanish_speaking_delegates': school.spanish_speaking_delegates,
'country_preferences': school.country_preference_ids,
'prefers_bilingual': school.prefers_bilingual,
'prefers_specialized_regional':
school.prefers_specialized_regional,
'prefers_crisis': school.prefers_crisis,
'prefers_alternative': school.prefers_alternative,
'prefers_press_corps': school.prefers_press_corps,
'registration_comments': school.registration_comments,
'fees_owed': float(school.fees_owed),
'fees_paid': float(school.fees_paid),
},
'committee': user.committee_id})
def test_chair(self):
'''It should have the correct fields for chairs.'''
user = TestUsers.new_user(user_type=User.TYPE_CHAIR,
committee_id=4)
self.client.login(username='testuser', password='<PASSWORD>')
response = self.get_response(user.id)
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': user.user_type,
'school': user.school_id,
'committee': user.committee_id})
class UserDetailDeleteTestCase(DestroyAPITestCase):
url_name = 'api:user_detail'
def setUp(self):
self.user = TestUsers.new_user(username='user1', password='<PASSWORD>')
def test_anonymous_user(self):
'''It should reject the request from an anonymous user.'''
response = self.get_response(self.user.id)
self.assertNotAuthenticated(response)
self.assertTrue(User.objects.filter(id=self.user.id).exists())
def test_other_user(self):
'''It should reject the request from another user.'''
TestUsers.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id)
self.assertPermissionDenied(response)
self.assertTrue(User.objects.filter(id=self.user.id).exists())
def test_self(self):
'''It should allow a user to delete themself.'''
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response(self.user.id)
self.assertEqual(response.status_code, 204)
self.assertFalse(User.objects.filter(id=self.user.id).exists())
def test_superuser(self):
'''It should allow a superuser to delete a user.'''
TestUsers.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id)
self.assertEqual(response.status_code, 204)
self.assertFalse(User.objects.filter(id=self.user.id).exists())
class UserDetailPatchTestCase(PartialUpdateAPITestCase):
url_name = 'api:user_detail'
params = {'first_name': 'first',
'last_name': 'last'}
def setUp(self):
self.user = TestUsers.new_user(username='user1', password='<PASSWORD>')
def test_anonymous_user(self):
'''An anonymous user should not be able to change information.'''
response = self.get_response(self.user.id, params=self.params)
self.assertNotAuthenticated(response)
user = User.objects.get(id=self.user.id)
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
def test_other_user(self):
'''Another user should not be able to change information about any other user.'''
TestUsers.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
self.assertPermissionDenied(response)
user = User.objects.get(id=self.user.id)
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
def test_self(self):
'''A User should be allowed to change information about himself.'''
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
user = User.objects.get(id=self.user.id)
self.assertEqual(response.data['first_name'], user.first_name)
self.assertEqual(response.data['last_name'], user.last_name)
def test_superuser(self):
'''A superuser should be allowed to change information about a user.'''
TestUsers.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
user = User.objects.get(id=self.user.id)
self.assertEqual(response.data['first_name'], user.first_name)
self.assertEqual(response.data['last_name'], user.last_name)
class UserListGetTestCase(ListAPITestCase):
url_name = 'api:user_list'
def test_anonymous_user(self):
'''It should reject the request from an anonymous user.'''
TestUsers.new_user(username='user1')
TestUsers.new_user(username='user2')
response = self.get_response()
self.assertNotAuthenticated(response)
def test_user(self):
'''It should reject the request from a regular user.'''
TestUsers.new_user(username='user1', password='<PASSWORD>')
TestUsers.new_user(username='user2')
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response()
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should allow a superuser to list all users.'''
user1 = TestUsers.new_superuser(username='user1', password='<PASSWORD>')
user2 = TestUsers.new_user(username='user2')
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response()
self.assertEqual(response.data, [
{'id': user1.id,
'username': user1.username,
'first_name': user1.first_name,
'last_name': user1.last_name,
'user_type': user1.user_type,
'school': user1.school_id,
'committee': user1.committee_id},
{'id': user2.id,
'username': user2.username,
'first_name': user2.first_name,
'last_name': user2.last_name,
'user_type': user2.user_type,
'school': user2.school_id,
'committee': user2.committee_id}])
class UserListPostTestCase(CreateAPITestCase):
url_name = 'api:user_list'
params = {'username': 'Kunal',
'password': 'password',
'first_name': 'Kunal',
'last_name': 'Mehta'}
def test_valid(self):
params = self.get_params()
response = self.get_response(params)
user_query = User.objects.filter(id=response.data['id'])
self.assertTrue(user_query.exists())
user = User.objects.get(id=response.data['id'])
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': User.TYPE_ADVISOR,
'school': user.school_id,
'email': user.email})
def test_empty_username(self):
response = self.get_response(params=self.get_params(username=''))
self.assertEqual(response.data, {
'username': ['This field is required.']})
def test_taken_username(self):
TestUsers.new_user(username='_Kunal', password='<PASSWORD>')
response = self.get_response(params=self.get_params(username='_Kunal'))
self.assertEqual(response.data, {
'username': ['This username is already taken.']})
def test_invalid_username(self):
response = self.get_response(params=self.get_params(username='>Kunal'))
self.assertEqual(response.data, {
'username': ['Usernames may contain alphanumerics, underscores, '
'and/or hyphens only.']})
def test_empty_password(self):
response = self.get_response(params=self.get_params(password=''))
self.assertEqual(response.data, {
'password': ['<PASSWORD>.']})
def test_invalid_password(self):
response = self.get_response(params=self.get_params(password='><PASSWORD>'))
self.assertEqual(response.data, {
'password': ['Password contains invalid characters.']})
def test_empty_first_name(self):
response = self.get_response(params=self.get_params(first_name=''))
self.assertEqual(response.data, {
'first_name': ['This field is required.']})
def test_empty_last_name(self):
response = self.get_response(params=self.get_params(last_name=''))
self.assertEqual(response.data, {
'last_name': ['This field is required.']})
def test_username_length(self):
response = self.get_response(params=self.get_params(username='user'))
self.assertEqual(response.data, {
'username': ['Username must be at least 5 characters.']})
def test_password_length(self):
response = self.get_response(params=self.get_params(password='<PASSWORD>'))
self.assertEqual(response.data, {
'password': ['Password must be at least 6 characters.']})
class CurrentUserTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('api:current_user')
def get_data(self, url):
return json.loads(self.client.get(url).content)
def test_login(self):
user = TestUsers.new_user(username='lol', password='<PASSWORD>')
user2 = TestUsers.new_user(username='bunny', password='<PASSWORD>')
credentials = {'username': 'lol', 'password': '<PASSWORD>'}
response = self.client.post(self.url,
data=json.dumps(credentials),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(self.client.session['_auth_user_id'], user.id)
credentials = {'username': 'bunny', 'password': '<PASSWORD>'}
response = self.client.post(self.url,
data=json.dumps(credentials),
content_type='application/json')
self.assertEqual(self.client.session['_auth_user_id'], user.id)
data = json.loads(response.content)
self.assertEqual(data['detail'],
'Another user is currently logged in.')
def test_logout(self):
user = TestUsers.new_user(username='lol', password='<PASSWORD>')
self.client.login(username='lol', password='<PASSWORD>')
self.assertEqual(self.client.session['_auth_user_id'], user.id)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 204)
self.assertTrue('_auth_user_id' not in self.client.session)
def test_get(self):
data = self.get_data(self.url)
self.assertEqual(len(data.keys()), 1)
self.assertEqual(data['detail'], 'Not found')
school = TestSchools.new_school()
user = school.advisor
self.client.login(username=user.username, password='<PASSWORD>')
data = self.get_data(self.url)
self.assertEqual(len(data.keys()), 7)
self.assertEqual(data['id'], user.id)
self.assertEqual(data['username'], user.username)
self.assertEqual(data['first_name'], user.first_name)
self.assertEqual(data['last_name'], user.last_name)
self.assertEqual(data['user_type'], User.TYPE_ADVISOR)
self.assertEqual(data['school'], {
'id': school.id,
'registered': school.registered.isoformat(),
'name': school.name,
'address': school.address,
'city': school.city,
'state': school.state,
'zip_code': school.zip_code,
'country': school.country,
'primary_name': school.primary_name,
'primary_gender': school.primary_gender,
'primary_email': school.primary_email,
'primary_phone': school.primary_phone,
'primary_type': school.primary_type,
'secondary_name': school.secondary_name,
'secondary_gender': school.secondary_gender,
'secondary_email': school.secondary_email,
'secondary_phone': school.secondary_phone,
'secondary_type': school.secondary_type,
'program_type': school.program_type,
'times_attended': school.times_attended,
'international': school.international,
'waitlist': school.waitlist,
'beginner_delegates': school.beginner_delegates,
'intermediate_delegates': school.intermediate_delegates,
'advanced_delegates': school.advanced_delegates,
'spanish_speaking_delegates': school.spanish_speaking_delegates,
'country_preferences': school.country_preference_ids,
'prefers_bilingual': school.prefers_bilingual,
'prefers_specialized_regional': school.prefers_specialized_regional,
'prefers_crisis': school.prefers_crisis,
'prefers_alternative': school.prefers_alternative,
'prefers_press_corps': school.prefers_press_corps,
'registration_comments': school.registration_comments,
'fees_owed': float(school.fees_owed),
'fees_paid': float(school.fees_paid),
})
|
StarcoderdataPython
|
3466747
|
from features.fetch import DataSet
from features.output import ExchangesOutput
from model.exchanges import ExchangesSolver
import numpy as np
def solve_all(dataset, verbose=False, with_redistribution=True, output_file = None):
exchangesOutput = ExchangesOutput()
grids = dataset.list_grids()
for _, g in grids.iterrows():
supplier = g['Supply Site Code']
sku = g['SKU']
grid = dataset.select_grid(supplier=supplier, sku=sku)
x_opt_dist, x_opt_dep, x_opt_hub = grid.get_xopt()
supplier_distances = grid.get_supplier_distances()
destination_distances = grid.get_destination_distances()
location_codes = grid.get_all_location_codes()
x_opt = np.concatenate([x_opt_dist, x_opt_dep, x_opt_hub])
current_stock = grid.get_total_current_stock()
if with_redistribution:
from_supply, exchanges = ExchangesSolver(grid, x_opt_dist, x_opt_dep,
x_opt_hub, supplier_distances,
destination_distances).solve()
else:
exchanges = np.matrix((0, 0))
from_supply = x_opt - current_stock
exchangesOutput.add_data(supplier, sku, location_codes,
from_supply, exchanges)
exchangesOutput.print(output_file)
dataset_with_redistribution = DataSet('output/distribution_output_quadprog.csv')
dataset_without_redistribution = DataSet('output/no_redistribution.csv')
solve_all(dataset_without_redistribution, with_redistribution=False, output_file='output/exchanges_output_no_redistribution.csv')
solve_all(dataset_with_redistribution, with_redistribution=True, output_file='output/exchanges_output.csv')
|
StarcoderdataPython
|
6441349
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
from skimage.feature import blob_dog, blob_log, blob_doh
import imutils
import argparse
import os
import math
from classification import training, getLabel
SIGNS = ["nouvelle",
"STOP",
"TURN LEFT",
"TURN RIGHT",
"DO NOT TURN LEFT",
"DO NOT TURN RIGHT",
"ONE WAY",
"SPEED LIMIT 30",
"OTHER",
"PLACA JEDIDA",
"panneaux de danger ",
"Panneaux de fin obligation",
"TURN ",
"SPEED LIMIT 70",
"SPEED LIMIT 90",
"SPEED LIMIT 100",
"SPEED LIMIT 130",
"TRAVEAUX"
]
# Clean all previous file
def clean_images():
file_list = os.listdir('./')
for file_name in file_list:
if '.png' in file_name:
os.remove(file_name)
### Preprocess image
def constrastLimit(image):
img_hist_equalized = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
channels = cv2.split(img_hist_equalized)
channels[0] = cv2.equalizeHist(channels[0])
img_hist_equalized = cv2.merge(channels)
img_hist_equalized = cv2.cvtColor(img_hist_equalized, cv2.COLOR_YCrCb2BGR)
return img_hist_equalized
def LaplacianOfGaussian(image):
LoG_image = cv2.GaussianBlur(image, (3,3), 0) # paramter
gray = cv2.cvtColor( LoG_image, cv2.COLOR_BGR2GRAY)
LoG_image = cv2.Laplacian( gray, cv2.CV_8U,3,3,2) # parameter
LoG_image = cv2.convertScaleAbs(LoG_image)
return LoG_image
def binarization(image):
thresh = cv2.threshold(image,32,255,cv2.THRESH_BINARY)[1]
#thresh = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
return thresh
def preprocess_image(image):
image = constrastLimit(image)
image = LaplacianOfGaussian(image)
image = binarization(image)
return image
# Find Signs
def removeSmallComponents(image, threshold):
#find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=8)
sizes = stats[1:, -1]; nb_components = nb_components - 1
img2 = np.zeros((output.shape),dtype = np.uint8)
#for every component in the image, you keep it only if it's above threshold
for i in range(0, nb_components):
if sizes[i] >= threshold:
img2[output == i + 1] = 255
return img2
def findContour(image):
#find contours in the thresholded image
cnts = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE )
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
return cnts
def contourIsSign(perimeter, centroid, threshold):
# perimeter, centroid, threshold
# # Compute signature of contour
result=[]
for p in perimeter:
p = p[0]
distance = sqrt((p[0] - centroid[0])**2 + (p[1] - centroid[1])**2)
result.append(distance)
max_value = max(result)
signature = [float(dist) / max_value for dist in result ]
# Check signature of contour.
temp = sum((1 - s) for s in signature)
temp = temp / len(signature)
if temp < threshold: # is the sign
return True, max_value + 2
else: # is not the sign
return False, max_value + 2
#crop sign
def cropContour(image, center, max_distance):
width = image.shape[1]
height = image.shape[0]
top = max([int(center[0] - max_distance), 0])
bottom = min([int(center[0] + max_distance + 1), height-1])
left = max([int(center[1] - max_distance), 0])
right = min([int(center[1] + max_distance+1), width-1])
print(left, right, top, bottom)
return image[left:right, top:bottom]
def cropSign(image, coordinate):
width = image.shape[1]
height = image.shape[0]
top = max([int(coordinate[0][1]), 0])
bottom = min([int(coordinate[1][1]), height-1])
left = max([int(coordinate[0][0]), 0])
right = min([int(coordinate[1][0]), width-1])
#print(top,left,bottom,right)
return image[top:bottom,left:right]
def findLargestSign(image, contours, threshold, distance_theshold):
max_distance = 0
coordinate = None
sign = None
for c in contours:
M = cv2.moments(c)
if M["m00"] == 0:
continue
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
is_sign, distance = contourIsSign(c, [cX, cY], 1-threshold)
if is_sign and distance > max_distance and distance > distance_theshold:
max_distance = distance
coordinate = np.reshape(c, [-1,2])
left, top = np.amin(coordinate, axis=0)
right, bottom = np.amax(coordinate, axis = 0)
coordinate = [(left-2,top-2),(right+3,bottom+1)]
sign = cropSign(image,coordinate)
return sign, coordinate
def findSigns(image, contours, threshold, distance_theshold):
signs = []
coordinates = []
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
if M["m00"] == 0:
continue
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
is_sign, max_distance = contourIsSign(c, [cX, cY], 1-threshold)
if is_sign and max_distance > distance_theshold:
sign = cropContour(image, [cX, cY], max_distance)
signs.append(sign)
coordinate = np.reshape(c, [-1,2])
top, left = np.amin(coordinate, axis=0)
right, bottom = np.amax(coordinate, axis = 0)
coordinates.append([(top-2,left-2),(right+1,bottom+1)])
return signs, coordinates
def localization(image, min_size_components, similitary_contour_with_circle, model, count, current_sign_type):
original_image = image.copy()
binary_image = preprocess_image(image)
binary_image = removeSmallComponents(binary_image, min_size_components)
binary_image = cv2.bitwise_and(binary_image,binary_image, mask=remove_other_color(image))
#binary_image = remove_line(binary_image)
cv2.imshow('BINARY IMAGE', binary_image)
contours = findContour(binary_image)
#signs, coordinates = findSigns(image, contours, similitary_contour_with_circle, 15)
sign, coordinate = findLargestSign(original_image, contours, similitary_contour_with_circle, 15)
text = ""
sign_type = -1
i = 0
if sign is not None:
sign_type = getLabel(model, sign)
sign_type = sign_type if sign_type <= 17 else 8
text = SIGNS[sign_type]
cv2.imwrite(str(count)+'_'+text+'.png', sign)
if sign_type > 0 and sign_type != current_sign_type:
cv2.rectangle(original_image, coordinate[0],coordinate[1], (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(original_image,text,(coordinate[0][0], coordinate[0][1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
return coordinate, original_image, sign_type, text
def remove_line(img):
gray = img.copy()
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 5
maxLineGap = 3
lines = cv2.HoughLinesP(edges,1,np.pi/180,15,minLineLength,maxLineGap)
mask = np.ones(img.shape[:2], dtype="uint8") * 255
if lines is not None:
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(mask,(x1,y1),(x2,y2),(0,0,0),2)
return cv2.bitwise_and(img, img, mask=mask)
def remove_other_color(img):
frame = cv2.GaussianBlur(img, (3,3), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([100,128,0])
upper_blue = np.array([215,255,255])
# Threshold the HSV image to get only blue colors
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
lower_white = np.array([0,0,128], dtype=np.uint8)
upper_white = np.array([255,255,255], dtype=np.uint8)
# Threshold the HSV image to get only blue colors
mask_white = cv2.inRange(hsv, lower_white, upper_white)
lower_black = np.array([0,0,0], dtype=np.uint8)
upper_black = np.array([170,150,50], dtype=np.uint8)
mask_black = cv2.inRange(hsv, lower_black, upper_black)
mask_1 = cv2.bitwise_or(mask_blue, mask_white)
mask = cv2.bitwise_or(mask_1, mask_black)
# Bitwise-AND mask and original image
#res = cv2.bitwise_and(frame,frame, mask= mask)
return mask
def main(args):
#Clean previous image
clean_images()
#Training phase
model = training()
vidcap = cv2.VideoCapture(args.file_name)
fps = vidcap.get(cv2.CAP_PROP_FPS)
width = vidcap.get(3) # float
height = vidcap.get(4) # float
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('output.avi',fourcc, fps , (640,480))
# initialize the termination criteria for cam shift, indicating
# a maximum of ten iterations or movement by a least one pixel
# along with the bounding box of the ROI
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
roiHist = None
success = True
similitary_contour_with_circle = 0.65 # parameter
count = 0
current_sign = None
current_text = ""
current_size = 0
sign_count = 0
coordinates = []
position = []
file = open("Output.txt", "w")
while True:
success,frame = vidcap.read()
if not success:
print("la video est terminée")
break
width = frame.shape[1]
height = frame.shape[0]
#frame = cv2.resize(frame, (640,int(height/(width/640))))
frame = cv2.resize(frame, (640,480))
print("Frame:{}".format(count))
# image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
coordinate, image, sign_type, text = localization(frame, args.min_size_components, args.similitary_contour_with_circle, model, count, current_sign)
if coordinate is not None:
cv2.rectangle(image, coordinate[0],coordinate[1], (255, 255, 255), 1)
print("Sign:{}".format(sign_type))
if sign_type > 0 and (not current_sign or sign_type != current_sign):
current_sign = sign_type
current_text = text
top = int(coordinate[0][1]*1.05)
left = int(coordinate[0][0]*1.05)
bottom = int(coordinate[1][1]*0.95)
right = int(coordinate[1][0]*0.95)
position = [count, sign_type if sign_type <= 17 else 8, coordinate[0][0], coordinate[0][1], coordinate[1][0], coordinate[1][1]]
cv2.rectangle(image, coordinate[0],coordinate[1], (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image,text,(coordinate[0][0], coordinate[0][1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
tl = [left, top]
br = [right,bottom]
print(tl, br)
current_size = math.sqrt(math.pow((tl[0]-br[0]),2) + math.pow((tl[1]-br[1]),2))
# grab the ROI for the bounding box and convert it
# to the HSV color space
roi = frame[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)
# compute a HSV histogram for the ROI and store the
# bounding box
roiHist = cv2.calcHist([roi], [0], None, [17], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
roiBox = (tl[0], tl[1], br[0], br[1])
elif current_sign:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
# apply cam shift to the back projection, convert the
# points to a bounding box, and then draw them
(r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
pts = np.int0(cv2.boxPoints(r))
s = pts.sum(axis = 1)
tl = pts[np.argmin(s)]
br = pts[np.argmax(s)]
size = math.sqrt(pow((tl[0]-br[0]),2) +pow((tl[1]-br[1]),2))
print(size)
if current_size < 1 or size < 1 or size / current_size > 30 or math.fabs((tl[0]-br[0])/(tl[1]-br[1])) > 2 or math.fabs((tl[0]-br[0])/(tl[1]-br[1])) < 0.5:
current_sign = None
print("Stop tracking")
else:
current_size = size
if sign_type > 0:
top = int(coordinate[0][1])
left = int(coordinate[0][0])
bottom = int(coordinate[1][1])
right = int(coordinate[1][0])
position = [count, sign_type if sign_type <= 17 else 8, left, top, right, bottom]
cv2.rectangle(image, coordinate[0],coordinate[1], (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image,text,(coordinate[0][0], coordinate[0][1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
elif current_sign:
position = [count, sign_type if sign_type <= 17 else 8, tl[0], tl[1], br[0], br[1]]
cv2.rectangle(image, (tl[0], tl[1]),(br[0], br[1]), (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image,current_text,(tl[0], tl[1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
if current_sign:
sign_count += 1
coordinates.append(position)
cv2.imshow('Result', image)
count = count + 1
#Write to video
if cv2.waitKey(1) & 0xFF == ord('q'):
break
file.write("{}".format(sign_count))
for pos in coordinates:
file.write("\n{} {} {} {} {} {}".format(pos[0],pos[1],pos[2],pos[3],pos[4], pos[5]))
print("Finish {} frames".format(count))
file.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Command Line")
parser.add_argument(
'--file_name',
default= "S1.mp4",
help= "Video to be analyzed"
)
parser.add_argument(
'--min_size_components',
type = int,
default= 300,
help= "Min size component to be reserved"
)
parser.add_argument(
'--similitary_contour_with_circle',
type= float,
default= 0.65,
help= "Similitary to a circle"
)
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
215326
|
# 游戏统计的信息
import json
class GameStats():
def __init__(self, set):
self.set = set
self.reset_stats()
self.game_active = False
with open("high_score.json", "r") as h_s:
self.high_score = json.load(h_s)
"""让初始化可以在创建对象后随时发生"""
"""初始化一次放上面,重复初始化放下面 """
def reset_stats(self):
self.ship_life = self.set.ship_life
self.score = 0
self.level = 1
"""在这里初始化,在set里方便修改,开挂."""
|
StarcoderdataPython
|
6503064
|
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
import argparse
import platform
from library import *
from manifest import *
###################################################################################################
#
def CudaToolkitVersionSatisfies(semantic_ver_string, major, minor, patch = 0):
# by default, use the latest CUDA Toolkit version
cuda_version = [11, 0, 132]
# Update cuda_version based on parsed string
if semantic_ver_string != '':
for i, x in enumerate([int(x) for x in semantic_ver_string.split('.')]):
if i < len(cuda_version):
cuda_version[i] = x
else:
cuda_version.append(x)
return cuda_version >= [major, minor, patch]
###################################################################################################
###################################################################################################
#
def CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.args.kernels == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###########################################################################################################
# ConvolutionOperator support variations
# ____________________________________________________________________
# ConvolutionalOperator | Analytic | Optimized
# ____________________________________________________________________
# | Fprop | (strided) | (strided)
# | Dgrad | (strided, unity*) | (unity)
# | Wgrad | (strided) | (strided)
# ____________________________________________________________________
#
# Note : Operator marked (*) are supported but not generated to keep the instantiated kernel count low
###########################################################################################################
# Convolution for 2D operations
def CreateConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], epilogue_functor = EpilogueFunctor.LinearCombination):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
alignment_c = min(8, alignment)
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size
if manifest.args.kernels == '':
tile_descriptions = [tile_descriptions[0],]
operations = []
for tile in tile_descriptions:
for conv_kind in conv_kinds:
for iterator_algorithm in iterator_algorithms:
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
# unity stride only for Optimized Dgrad
if (iterator_algorithm == IteratorAlgorithm.Optimized) and (conv_kind == ConvKind.Dgrad):
new_operation = Conv2dOperation(conv_kind, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# strided dgrad is not supported by Optimized Dgrad
if (iterator_algorithm == IteratorAlgorithm.Optimized) and (conv_kind == ConvKind.Dgrad):
continue
# strided support for Fprop (Analytic/Optimized), Dgrad (Analytic), and Wgrad (Analytic)
new_operation = Conv2dOperation(conv_kind, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###################################################################################################
###################################################################################################
def GenerateConv2d_Simt(args):
operations = []
layouts = [
(LayoutType.TensorNC4HW4, LayoutType.TensorC4RSK4),
]
math_instructions = [
MathInstruction( \
[1, 1, 4], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
dst_layouts = [
LayoutType.TensorNC4HW4,
LayoutType.TensorNC32HW32,
LayoutType.TensorNHWC,
LayoutType.TensorNHWC,
LayoutType.TensorNCHW
]
dst_types = [
DataType.s8,
DataType.s8,
DataType.u4,
DataType.s4,
DataType.f32,
]
max_cc = 1024
for math_inst in math_instructions:
for layout in layouts:
for dst_type, dst_layout in zip(dst_types, dst_layouts):
if dst_type == DataType.s4 or dst_type == DataType.u4:
min_cc = 75
use_special_optimization = SpecialOptimizeDesc.NoneSpecialOpt
else:
min_cc = 61
use_special_optimization = SpecialOptimizeDesc.ConvFilterUnity
tile_descriptions = [
TileDescription([128, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 64, 32], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 32, 32], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 16, 128, 16], 1, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 16, 64, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
]
for tile in tile_descriptions:
if dst_layout == LayoutType.TensorNC32HW32 and tile.threadblock_shape[0] > 32:
continue
if (dst_layout == LayoutType.TensorNCHW or dst_layout == LayoutType.TensorNHWC) \
and tile.threadblock_shape[0] > 16:
continue
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1],
dst_layout, dst_type, min_cc, 32, 32, 32,
use_special_optimization)
return operations
def GenerateConv2d_TensorOp_8816(args):
operations = []
layouts = [
(LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
dst_layouts = [
LayoutType.TensorNC32HW32,
LayoutType.TensorNC4HW4,
]
dst_types = [
DataType.s8,
DataType.s8,
]
use_special_optimization = SpecialOptimizeDesc.ConvFilterUnity
min_cc = 75
max_cc = 1024
cuda_major = 10
cuda_minor = 2
for math_inst in math_instructions:
for layout in layouts:
for dst_type, dst_layout in zip(dst_types, dst_layouts):
if dst_layout == LayoutType.TensorNC32HW32:
tile_descriptions = [
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 1, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 1, [2, 1, 1], math_inst, min_cc, max_cc),
]
operations += GenerateConv2d(ConvKind.Fprop, tile_descriptions, layout[0], layout[1],
dst_layout, dst_type, min_cc, 128, 128, 64, use_special_optimization,
ImplicitGemmMode.GemmTN, True, cuda_major, cuda_minor)
else:
assert dst_layout == LayoutType.TensorNC4HW4
tile_descriptions = [
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 32], 1, [1, 2, 1], math_inst, min_cc, max_cc),
]
operations += GenerateConv2d(ConvKind.Fprop, tile_descriptions, layout[0], layout[1],
dst_layout, dst_type, min_cc, 128, 128, 64, use_special_optimization,
ImplicitGemmMode.GemmNT, False, cuda_major, cuda_minor)
layouts_nhwc = [
(LayoutType.TensorNHWC, LayoutType.TensorNC4HW4, 32),
(LayoutType.TensorNHWC, LayoutType.TensorNC8HW8, 64),
(LayoutType.TensorNHWC, LayoutType.TensorNC16HW16, 128),
]
dst_layouts_nhwc = [
LayoutType.TensorNHWC,
]
for math_inst in math_instructions:
for layout in layouts_nhwc:
for dst_layout in dst_layouts_nhwc:
dst_type = math_inst.element_b
tile_descriptions = [
TileDescription([128, 32, 32], 1, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([64, 16, 32], 2, [1, 1, 1], math_inst, min_cc, max_cc),
]
for tile in tile_descriptions:
dst_align = 32 if tile.threadblock_shape[1] == 16 else 64
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
dst_type, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, False, cuda_major, cuda_minor)
if tile.threadblock_shape[1] == 16 or tile.threadblock_shape[1] == 32:
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
dst_type, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, True, cuda_major, cuda_minor)
out_dtypes = [DataType.s4, DataType.u4, DataType.f32]
#INT8x8x4 and INT8x8x32
for math_inst in math_instructions:
for layout in layouts_nhwc:
for dst_layout in dst_layouts_nhwc:
for out_dtype in out_dtypes:
tile_descriptions = [
TileDescription([128, 32, 32], 1, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([64, 16, 32], 2, [1, 1, 1], math_inst, min_cc, max_cc),
]
for tile in tile_descriptions:
dst_align = 4 * DataTypeSize[out_dtype] if tile.threadblock_shape[1] == 16 or out_dtype == DataType.f32 \
else 8 * DataTypeSize[out_dtype]
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
out_dtype, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, False, cuda_major, cuda_minor)
if tile.threadblock_shape[1] == 16 or (tile.threadblock_shape[1] == 32 and out_dtype != DataType.f32):
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
out_dtype, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, True, cuda_major, cuda_minor)
return operations
def GenerateConv2d_TensorOp_8832(args):
operations = []
layouts = [
(LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate), \
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
]
dst_layouts = [
LayoutType.TensorNC64HW64,
]
use_special_optimization = SpecialOptimizeDesc.ConvFilterUnity
min_cc = 75
max_cc = 1024
cuda_major = 10
cuda_minor = 2
for math_inst in math_instructions:
for layout in layouts:
for dst_layout in dst_layouts:
dst_type = math_inst.element_b
tile_descriptions = [
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 1, [2, 1, 1], math_inst, min_cc, max_cc),
]
operations += GenerateConv2d(ConvKind.Fprop, tile_descriptions, layout[0], layout[1],
dst_layout, dst_type, min_cc, 128, 128, 64, use_special_optimization,
ImplicitGemmMode.GemmTN, True, cuda_major, cuda_minor)
layouts_nhwc = [
(LayoutType.TensorNHWC, LayoutType.TensorNC8HW8, 32),
(LayoutType.TensorNHWC, LayoutType.TensorNC16HW16, 64),
(LayoutType.TensorNHWC, LayoutType.TensorNC32HW32, 128),
]
dst_layouts_nhwc = [
LayoutType.TensorNHWC,
]
for math_inst in math_instructions:
for layout in layouts_nhwc:
for dst_layout in dst_layouts_nhwc:
dst_type = math_inst.element_b
tile_descriptions = [
TileDescription([128, 16, 64], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 1, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 1, [2, 1, 1], math_inst, min_cc, max_cc),
]
for tile in tile_descriptions:
dst_align = 16 if tile.threadblock_shape[1] == 16 else 32
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
dst_type, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, False, cuda_major, cuda_minor)
if tile.threadblock_shape[1] == 32 or tile.threadblock_shape[1] == 64:
dst_align = 32 if tile.threadblock_shape[1] == 32 else 64
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
dst_type, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, True, cuda_major, cuda_minor)
# INT4x4x8
for math_inst in math_instructions:
for layout in layouts_nhwc:
for dst_layout in dst_layouts_nhwc:
tile_descriptions = [
TileDescription([128, 16, 64], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 1, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 1, [2, 1, 1], math_inst, min_cc, max_cc),
]
for tile in tile_descriptions:
dst_align = 32 if tile.threadblock_shape[1] == 16 else 64
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
DataType.s8, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, False, cuda_major, cuda_minor)
if tile.threadblock_shape[1] == 32 or tile.threadblock_shape[1] == 64:
dst_align = 64 if tile.threadblock_shape[1] == 32 else 128
operations += GenerateConv2d(ConvKind.Fprop, [tile], layout[0], layout[1], dst_layout,
DataType.s8, min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, True, cuda_major, cuda_minor)
return operations
def GenerateDeconv_Simt(args):
operations = []
layouts = [
(LayoutType.TensorNC4HW4, LayoutType.TensorK4RSC4),
]
math_instructions = [
MathInstruction( \
[1, 1, 4], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
dst_layouts = [
LayoutType.TensorNC4HW4,
]
dst_types = [
DataType.s8,
]
use_special_optimization = SpecialOptimizeDesc.DeconvDoubleUpsampling
min_cc = 61
max_cc = 1024
for math_inst in math_instructions:
for layout in layouts:
for dst_type, dst_layout in zip(dst_types, dst_layouts):
tile_descriptions = [
TileDescription([32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 128, 16], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 128, 16], 1, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([16, 64, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
]
operations += GenerateConv2d(ConvKind.Dgrad, tile_descriptions, layout[0], layout[1],
dst_layout, dst_type, min_cc, 32, 32, 32,
use_special_optimization)
return operations
def GenerateDeconv_TensorOp_8816(args):
operations = []
layouts = [
(LayoutType.TensorNHWC, LayoutType.TensorCK4RS4, 32),
(LayoutType.TensorNHWC, LayoutType.TensorCK8RS8, 64),
(LayoutType.TensorNHWC, LayoutType.TensorCK16RS16, 128),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
dst_layouts = [
LayoutType.TensorNHWC,
]
dst_types = [
DataType.s8,
]
use_special_optimization = SpecialOptimizeDesc.DeconvDoubleUpsampling
min_cc = 75
max_cc = 1024
cuda_major = 10
cuda_minor = 2
for math_inst in math_instructions:
for layout in layouts:
for dst_type, dst_layout in zip(dst_types, dst_layouts):
tile_descriptions = [
TileDescription([128, 32, 32], 1, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([64, 16, 32], 2, [1, 1, 1], math_inst, min_cc, max_cc),
]
for tile in tile_descriptions:
dst_align = 32 if tile.threadblock_shape[1] == 16 else 64
operations += GenerateConv2d(ConvKind.Dgrad, [tile], layout[0], layout[1], dst_layout, dst_type,
min_cc, layout[2], layout[2], dst_align, use_special_optimization,
ImplicitGemmMode.GemmTN, False, cuda_major, cuda_minor)
return operations
################################################################################
# parameters
# Edge - for tiles, the edges represent the length of one side
# Ratio - the maximum ratio between 2 edges, limits the skinnyness of tiles
# MaxEdge - maximum length of each edge
# Min/Max - minimum/maximum of the product of edge lengths
################################################################################
warpsPerThreadblockEdge = [1, 2, 4, 8, 16]
warpsPerThreadblockRatio = 2
warpsPerThreadblockMax = 16
# NOTE 1x32 and 2x16 warp tile shapes fail validation for ~10% of cases
warpShapeEdges = [8, 16, 32, 64, 128, 256]
warpShapeRatio = 4
warpShapeMax = 64*64
warpShapeMin = 8*8
threadblockEdgeMax = 256
# char, type bits/elem, max tile, L0 threadblock tiles
precisions = {
"c" : [ "cutlass::complex<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
"d" : [ "double", 64, 64*64, [ [ 64, 64], [ 32, 32] ] ],
"h" : [ "cutlass::half_t", 16, 128*256, [ [256, 128], [ 64, 128], [ 64, 32] ] ],
"i" : [ "int", 32, 128*128, [ [128, 64], [ 16, 32] ] ],
"s" : [ "float", 32, 128*128, [ [128, 256], [128, 128], [ 64, 64] ] ],
"z" : [ "cutlass::complex<double>", 128, 64*64, [ [ 32, 64], [ 16, 32] ] ],
}
# L1 will have a single kernel for every unique shape
# L2 will have everything else
def GenerateGemm_Simt(args):
################################################################################
# warps per threadblock
################################################################################
warpsPerThreadblocks = []
for warpsPerThreadblock0 in warpsPerThreadblockEdge:
for warpsPerThreadblock1 in warpsPerThreadblockEdge:
if warpsPerThreadblock0 / warpsPerThreadblock1 <= warpsPerThreadblockRatio \
and warpsPerThreadblock1 / warpsPerThreadblock0 <= warpsPerThreadblockRatio \
and warpsPerThreadblock0 * warpsPerThreadblock1 <= warpsPerThreadblockMax:
warpsPerThreadblocks.append([warpsPerThreadblock0,
warpsPerThreadblock1])
################################################################################
# warp shapes
################################################################################
warpNumThreads = 32
warpShapes = []
for warp0 in warpShapeEdges:
for warp1 in warpShapeEdges:
if warp0 / warp1 <= warpShapeRatio \
and warp1 / warp0 <= warpShapeRatio \
and warp0 * warp1 <= warpShapeMax \
and warp0*warp1 > warpShapeMin:
warpShapes.append([warp0, warp1])
# sgemm
precisionType, precisionBits, threadblockMaxElements, threadblockTilesL0 = precisions["s"]
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), # nn
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor), # nt
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), # tn
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor), # tt
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 50
max_cc = 1024
operations = []
for math_inst in math_instructions:
for layout in layouts:
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
tile_descriptions = [
TileDescription([64, 256, 8], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 8], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 64, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 32, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 32, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 8, 32, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 16, 32, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 16, 64, 8], 2, [1, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 16, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
for warpsPerThreadblock in warpsPerThreadblocks:
for warpShape in warpShapes:
warpThreadsM = 0
if warpShape[0] > warpShape[1]:
warpThreadsM = 8
else:
warpThreadsM = 4
warpThreadsN = warpNumThreads / warpThreadsM
# skip shapes with conflicting rectangularity
# they are unlikely to be fastest
blockG = warpsPerThreadblock[0] > warpsPerThreadblock[1]
blockL = warpsPerThreadblock[0] < warpsPerThreadblock[1]
warpG = warpShape[0] > warpShape[1]
warpL = warpShape[0] < warpShape[1]
blockG2 = warpsPerThreadblock[0] > warpsPerThreadblock[1]*2
blockL2 = warpsPerThreadblock[0]*2 < warpsPerThreadblock[1]
warpG2 = warpShape[0] > warpShape[1]*2
warpL2 = warpShape[0]*2 < warpShape[1]
if blockG2 and warpL: continue
if blockL2 and warpG: continue
if warpG2 and blockL: continue
if warpL2 and blockG: continue
# check threadblock ratios and max
threadblockTile = [warpShape[0]*warpsPerThreadblock[0],
warpShape[1]*warpsPerThreadblock[1]]
if threadblockTile[0] * threadblockTile[1] > threadblockMaxElements: continue
if threadblockTile[0] > threadblockEdgeMax: continue
if threadblockTile[1] > threadblockEdgeMax: continue
totalThreads = warpNumThreads*warpsPerThreadblock[0]*warpsPerThreadblock[1]
# calculate unroll
# ensure that every iteration at least a full load of A,B are done
unrollMin = 8
unrollMin0 = totalThreads // threadblockTile[0]
unrollMin1 = totalThreads // threadblockTile[1]
unroll = max(unrollMin, unrollMin0, unrollMin1)
threadTileM = warpShape[0] // warpThreadsM
threadTileN = warpShape[1] // warpThreadsN
if threadTileM < 2 or threadTileN < 2: continue
if threadTileM*threadTileN*precisionBits > 8*8*32: continue
# epilogue currently only supports N < WarpNumThreads
if threadblockTile[1] < warpNumThreads: continue
# limit smem
smemBitsA = threadblockTile[0]*unroll*2*precisionBits
smemBitsB = threadblockTile[1]*unroll*2*precisionBits
smemKBytes = (smemBitsA+smemBitsB)/8/1024
if (smemKBytes > 48): continue
tile = TileDescription([threadblockTile[0], threadblockTile[1], unroll], \
2, \
[threadblockTile[0]//warpShape[0], threadblockTile[1]//warpShape[1], 1], \
math_inst, min_cc, max_cc)
def filter(t: TileDescription) -> bool:
nonlocal tile
return t.threadblock_shape[0] == tile.threadblock_shape[0] and \
t.threadblock_shape[1] == tile.threadblock_shape[1] and \
t.threadblock_shape[2] == tile.threadblock_shape[2] and \
t.warp_count[0] == tile.warp_count[0] and \
t.warp_count[1] == tile.warp_count[1] and \
t.warp_count[2] == tile.warp_count[2] and \
t.stages == tile.stages
if not any(t for t in tile_descriptions if filter(t)): continue
operations += GeneratesGemm(tile, data_type, layout[0], layout[1], layout[2], min_cc)
return operations
#
def GenerateGemv_Simt(args):
threadBlockShape_N = [128, 64, 32]
ldgBits_A = [128, 64, 32]
ldgBits_B = [128, 64, 32]
layouts = [
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 50
operations = []
for math_inst in math_instructions:
for layout in layouts:
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
for threadblock_shape_n in threadBlockShape_N:
for align_a in ldgBits_A:
for align_b in ldgBits_B:
ldg_elements_a = align_a // DataTypeSize[math_inst.element_a]
ldg_elements_b = align_b // DataTypeSize[math_inst.element_b]
threadblock_shape_k = (256 * ldg_elements_a) // (threadblock_shape_n // ldg_elements_b)
threadblock_shape = [1, threadblock_shape_n, threadblock_shape_k]
thread_shape = [1, ldg_elements_b, ldg_elements_a]
operations.append(GeneratesGemv(math_inst, \
threadblock_shape, \
thread_shape, \
data_type, \
layout[0], \
layout[1], \
layout[2], \
min_cc, \
align_a, \
align_b))
return operations
#
def GeneratesGemm_TensorOp_1688(args):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), # nn
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor), # nt
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), # tn
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor), # tt
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 4, 2,
#1
]
cuda_major = 10
cuda_minor = 2
operations = []
for math_inst in math_instructions:
for layout in layouts:
for align in alignment_constraints:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
## comment some configuration to reduce compilation time and binary size
# TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
# TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
# TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
for tile in tile_descriptions:
operations += GeneratesGemm(tile, \
data_type, \
layout[0], \
layout[1], \
layout[2], \
min_cc, \
align * 16, \
align * 16, \
align * 16, \
cuda_major, \
cuda_minor)
return operations
#
def GeneratesGemm_TensorOp_884(args):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), # nn
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor), # nt
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), # tn
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor), # tt
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 4, 2,
# 1
]
cuda_major = 10
cuda_minor = 2
operations = []
for math_inst in math_instructions:
for layout in layouts:
for align in alignment_constraints:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
## comment some configuration to reduce compilation time and binary size
# TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
# TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
# TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
for tile in tile_descriptions:
operations += GeneratesGemm(tile, \
data_type, \
layout[0], \
layout[1], \
layout[2], \
min_cc, \
align * 16, \
align * 16, \
align * 16, \
cuda_major, \
cuda_minor)
return operations
#
def GenerateConv2dOperations(args):
if args.type == "simt":
return GenerateConv2d_Simt(args)
elif args.type == "tensorop8816":
return GenerateConv2d_TensorOp_8816(args)
else:
assert args.type == "tensorop8832", "operation conv2d only support" \
"simt, tensorop8816 and tensorop8832. (got:{})".format(args.type)
return GenerateConv2d_TensorOp_8832(args)
def GenerateDeconvOperations(args):
if args.type == "simt":
return GenerateDeconv_Simt(args)
else:
assert args.type == "tensorop8816", "operation deconv only support" \
"simt and tensorop8816. (got:{})".format(args.type)
return GenerateDeconv_TensorOp_8816(args)
def GenerateGemmOperations(args):
if args.type == "tensorop884":
return GeneratesGemm_TensorOp_884(args)
elif args.type == "tensorop1688":
return GeneratesGemm_TensorOp_1688(args)
else:
assert args.type == "simt", "operation gemm only support" \
"simt. (got:{})".format(args.type)
return GenerateGemm_Simt(args)
def GenerateGemvOperations(args):
assert args.type == "simt", "operation gemv only support" \
"simt. (got:{})".format(args.type)
return GenerateGemv_Simt(args)
###################################################################################################
###################################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generates device kernel registration code for CUTLASS Kernels")
parser.add_argument("--operations", type=str, choices=['gemm', 'gemv', 'conv2d', 'deconv'],
required=True, help="Specifies the operation to generate (gemm, gemv, conv2d, deconv)")
parser.add_argument("output", type=str, help="output directory for CUTLASS kernel files")
parser.add_argument("--type", type=str, choices=['simt', 'tensorop8816', 'tensorop8832', 'tensorop884', 'tensorop1688'],
default='simt', help="kernel type of CUTLASS kernel generator")
gemv_wrapper_path = "src/cuda/matrix_mul/cutlass_matrix_mul_wrapper_batched_gemv_strided.cuinl"
short_path = (platform.system() == "Windows" or platform.system().find('NT') >= 0) and ('true'!= os.getenv("CUTLASS_WITH_LONG_PATH", default='False').lower())
args = parser.parse_args()
if args.operations == "gemm":
operations = GenerateGemmOperations(args)
elif args.operations == "gemv":
operations = GenerateGemvOperations(args)
elif args.operations == "conv2d":
operations = GenerateConv2dOperations(args)
elif args.operations == "deconv":
operations = GenerateDeconvOperations(args)
if args.operations == "conv2d" or args.operations == "deconv":
for operation in operations:
with EmitConvSingleKernelWrapper(args.output, operation, short_path) as emitter:
emitter.emit()
elif args.operations == "gemm":
for operation in operations:
with EmitGemmSingleKernelWrapper(args.output, operation, short_path) as emitter:
emitter.emit()
elif args.operations == "gemv":
for operation in operations:
with EmitGemvSingleKernelWrapper(args.output, operation, gemv_wrapper_path, short_path) as emitter:
emitter.emit()
if args.operations != "gemv":
GenerateManifest(args, operations, args.output)
#
###################################################################################################
|
StarcoderdataPython
|
6596871
|
# Find common tracks.
def run(plists, outfile):
print(f"Finding common tracks...")
track_name_sets = []
# Collect data.
for plist in plists:
names = set()
tracks = plist['Tracks']
for (id, track) in tracks.items():
try:
names.add(track['Name'])
except:
pass
track_name_sets.append(names)
# Find common tracks.
common_tracks = set.intersection(*track_name_sets)
# Analyse data.
if len(common_tracks) > 0:
print(f"Found {len(common_tracks)} common tracks.")
else:
print("No common tracks found!")
return
f = open(outfile, "w")
for name in common_tracks:
# s = "%s\n" % name
# f.write(s.encode("UTF-8"))
# s = "%s\n" % name
f.write(f"{name}\n")
f.close()
print(f"Common track names saved to {outfile}")
|
StarcoderdataPython
|
8092787
|
<filename>statsmodels/regression/tests/results/results_grunfeld_ols_robust_cluster.py
import numpy as np
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']):
self[att] = self.params_table[:,i]
est = dict(
N_clust = 10,
N = 200,
df_m = 2,
df_r = 9,
F = 51.59060716590177,
r2 = .8124080178314147,
rmse = 94.40840193979599,
mss = 7604093.484267689,
rss = 1755850.432294737,
r2_a = .8105035307027997,
ll = -1191.80235741801,
ll_0 = -1359.150955647688,
rank = 3,
cmdline = "regress invest mvalue kstock, vce(cluster company)",
title = "Linear regression",
marginsok = "XB default",
vce = "cluster",
depvar = "invest",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
vcetype = "Robust",
clustvar = "company",
)
params_table = np.array([
.11556215606596, .01589433647768, 7.2706499090564, .00004710548549,
.07960666895505, .15151764317688, 9, 2.2621571627982,
0, .23067848754982, .08496711097464, 2.7149150406994,
.02380515903536, .03846952885627, .42288744624337, 9,
2.2621571627982, 0, -42.714369016733, 20.425202580078,
-2.0912580352272, .06604843284516, -88.919387334862, 3.4906493013959,
9, 2.2621571627982, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00025262993207, -.00065043385106, .20961897960949, -.00065043385106,
.00721940994738, -1.2171040967615, .20961897960949, -1.2171040967615,
417.18890043724]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
inexog_ct = 2,
exexog_ct = 0,
endog_ct = 0,
partial_ct = 0,
N_clust = 10,
df_m = 2,
sdofminus = 0,
dofminus = 0,
r2 = .8124080178314146,
rmse = 93.69766358599176,
rss = 1755850.432294737,
mss = 7604093.484267682,
r2_a = .8105035307027995,
F = 51.59060716590192,
Fp = .0000117341240941,
Fdf1 = 2,
Fdf2 = 9,
yy = 13620706.07273678,
yyc = 9359943.916562419,
partialcons = 0,
cons = 1,
jdf = 0,
j = 0,
ll = -1191.802357418011,
rankV = 3,
rankS = 3,
rankxx = 3,
rankzz = 3,
r2c = .8124080178314146,
r2u = .8710896173136538,
clustvar = "company",
hacsubtitleV = "Statistics robust to heteroskedasticity and clustering on company",
hacsubtitleB = "Estimates efficient for homoskedasticity only",
title = "OLS estimation",
predict = "ivreg2_p",
version = "03.1.07",
cmdline = "ivreg2 invest mvalue kstock, cluster(company)",
cmd = "ivreg2",
model = "ols",
depvar = "invest",
vcetype = "Robust",
vce = "robust cluster",
partialsmall = "small",
inexog = "mvalue kstock",
insts = "mvalue kstock",
properties = "b V",
)
params_table = np.array([
.11556215606596, .01500272788516, 7.7027429245215, 1.331761148e-14,
.08615734974119, .14496696239074, np.nan, 1.9599639845401,
0, .23067848754982, .08020079648691, 2.8762618035529,
.00402415789383, .07348781490405, .38786916019559, np.nan,
1.9599639845401, 0, -42.714369016733, 19.27943055305,
-2.2155410088072, .02672295281194, -80.501358543152, -4.9273794903145,
np.nan, 1.9599639845401, 0]).reshape(3,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.000225081844, -.00057950714469, .1867610305767, -.00057950714469,
.00643216775713, -1.0843847053056, .1867610305767, -1.0843847053056,
371.69644244987]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_large = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
N_g = 10,
df_m = 2,
df_r = 9,
F = 97.97910905239282,
r2 = .8124080178314147,
rmse = 94.40840193979599,
lag = 4,
cmd = "xtscc",
predict = "xtscc_p",
method = "Pooled OLS",
depvar = "invest",
vcetype = "Drisc/Kraay",
title = "Regression with Driscoll-Kraay standard errors",
groupvar = "company",
properties = "b V",
)
params_table = np.array([
.11556215606596, .0134360177573, 8.6009231420662, .00001235433261,
.08516777225681, .14595653987512, 9, 2.2621571627982,
0, .23067848754982, .04930800664089, 4.678317037431,
.00115494570515, .11913602714384, .3422209479558, 9,
2.2621571627982, 0, -42.714369016733, 12.190347184209,
-3.5039501641153, .0066818746948, -70.290850216489, -15.137887816977,
9, 2.2621571627982, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00018052657317, -.00035661054613, -.06728261073866, -.00035661054613,
.0024312795189, -.32394785247278, -.06728261073866, -.32394785247278,
148.60456447156]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_groupsum4 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
df_m = 2,
df_r = 197,
F = 73.07593045506036,
N = 200,
lag = 4,
rank = 3,
title = "Regression with Newey-West standard errors",
cmd = "newey",
cmdline = "newey invest mvalue kstock, lag(4) force",
estat_cmd = "newey_estat",
predict = "newey_p",
vcetype = "Newey-West",
depvar = "invest",
properties = "b V",
)
params_table = np.array([
.11556215606596, .01142785251475, 10.112324771147, 1.251631065e-19,
.0930255277205, .13809878441142, 197, 1.9720790337785,
0, .23067848754982, .06842168281423, 3.3714237660029,
.00089998163666, .09574552141602, .36561145368361, 197,
1.9720790337785, 0, -42.714369016733, 16.179042041128,
-2.6401049523298, .00895205094219, -74.620718612662, -10.808019420804,
197, 1.9720790337785, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.0001305958131, -.00022910455176, .00889686530849, -.00022910455176,
.00468152667913, -.88403667445531, .00889686530849, -.88403667445531,
261.76140136858]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_panel4 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
inexog_ct = 2,
exexog_ct = 0,
endog_ct = 0,
partial_ct = 0,
df_r = 9,
N_clust = 10,
N_clust1 = 10,
N_clust2 = 20,
df_m = 2,
sdofminus = 0,
dofminus = 0,
r2 = .8124080178314146,
rmse = 94.40840193979601,
rss = 1755850.432294737,
mss = 7604093.484267682,
r2_a = .8105035307027995,
F = 57.99124535923564,
Fp = 7.21555935862e-06,
Fdf1 = 2,
partialcons = 0,
cons = 1,
jdf = 0,
j = 0,
ll = -1191.802357418011,
rankV = 3,
rankS = 3,
rankxx = 3,
rankzz = 3,
r2c = .8124080178314146,
r2u = .8710896173136538,
yyc = 9359943.916562419,
yy = 13620706.07273678,
Fdf2 = 9,
clustvar = "company time",
hacsubtitleV = "Statistics robust to heteroskedasticity and clustering on company and time",
hacsubtitleB = "Estimates efficient for homoskedasticity only",
title = "OLS estimation",
predict = "ivreg2_p",
version = "03.1.07",
cmdline = "ivreg2 invest mvalue kstock, cluster(company time) small",
cmd = "ivreg2",
model = "ols",
depvar = "invest",
vcetype = "Robust",
clustvar2 = "time",
clustvar1 = "company",
vce = "robust two-way cluster",
partialsmall = "small",
small = "small",
inexog = "mvalue kstock",
insts = "mvalue kstock",
properties = "b V",
)
params_table = np.array([
.11556215606596, .01635175387097, 7.0672636695645, .00005873628221,
.07857191892244, .15255239320949, 9, 2.2621571627982,
0, .23067848754982, .07847391274682, 2.9395563375824,
.01649863150032, .05315816373679, .40819881136285, 9,
2.2621571627982, 0, -42.714369016733, 19.505607409785,
-2.189850750062, .05626393734425, -86.839118533508, 1.4103805000422,
9, 2.2621571627982, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00026737985466, -.00070163493529, .19641438763743, -.00070163493529,
.0061581549818, -.99627581152391, .19641438763743, -.99627581152391,
380.46872042467]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_2groups_small = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
inexog_ct = 2,
exexog_ct = 0,
endog_ct = 0,
partial_ct = 0,
N_clust = 10,
N_clust1 = 10,
N_clust2 = 20,
df_m = 2,
sdofminus = 0,
dofminus = 0,
r2 = .8124080178314146,
rmse = 93.69766358599176,
rss = 1755850.432294737,
mss = 7604093.484267682,
r2_a = .8105035307027995,
F = 57.99124535923565,
Fp = 7.21555935862e-06,
Fdf1 = 2,
Fdf2 = 9,
partialcons = 0,
cons = 1,
jdf = 0,
j = 0,
ll = -1191.802357418011,
rankV = 3,
rankS = 3,
rankxx = 3,
rankzz = 3,
r2c = .8124080178314146,
r2u = .8710896173136538,
yyc = 9359943.916562419,
yy = 13620706.07273678,
clustvar = "company time",
hacsubtitleV = "Statistics robust to heteroskedasticity and clustering on company and time",
hacsubtitleB = "Estimates efficient for homoskedasticity only",
title = "OLS estimation",
predict = "ivreg2_p",
version = "03.1.07",
cmdline = "ivreg2 invest mvalue kstock, cluster(company time)",
cmd = "ivreg2",
model = "ols",
depvar = "invest",
vcetype = "Robust",
clustvar2 = "time",
clustvar1 = "company",
vce = "robust two-way cluster",
partialsmall = "small",
inexog = "mvalue kstock",
insts = "mvalue kstock",
properties = "b V",
)
params_table = np.array([
.11556215606596, .01543448599542, 7.487269488613, 7.032121917e-14,
.08531111939505, .14581319273688, np.nan, 1.9599639845401,
0, .23067848754982, .07407184066336, 3.1142534799181,
.00184410987255, .08550034758104, .3758566275186, np.nan,
1.9599639845401, 0, -42.714369016733, 18.411420987265,
-2.319993065515, .02034125246974, -78.800091055978, -6.6286469774879,
np.nan, 1.9599639845401, 0]).reshape(3,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00023822335794, -.00062512499511, .17499633632219, -.00062512499511,
.00548663757926, -.88763669036779, .17499633632219, -.88763669036779,
338.98042277032]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_2groups_large = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
inexog_ct = 2,
exexog_ct = 0,
endog_ct = 0,
partial_ct = 0,
bw = 5,
N_clust = 20,
df_m = 2,
sdofminus = 0,
dofminus = 0,
r2 = .8124080178314146,
rmse = 93.69766358599176,
rss = 1755850.432294737,
mss = 7604093.484267682,
r2_a = .8105035307027995,
F = 92.14467466912147,
Fp = 1.66368179227e-10,
Fdf1 = 2,
Fdf2 = 19,
yy = 13620706.07273678,
partialcons = 0,
cons = 1,
jdf = 0,
j = 0,
ll = -1191.802357418011,
rankV = 3,
rankS = 3,
rankxx = 3,
rankzz = 3,
r2c = .8124080178314146,
r2u = .8710896173136538,
yyc = 9359943.916562419,
clustvar = "year",
hacsubtitleV2 = "and kernel-robust to common correlated disturbances (Driscoll-Kraay)",
hacsubtitleV = "Statistics robust to heteroskedasticity and clustering on year",
hacsubtitleB = "Estimates efficient for homoskedasticity only",
title = "OLS estimation",
predict = "ivreg2_p",
version = "03.1.07",
cmdline = "ivreg2 invest mvalue kstock, dkraay(5)",
cmd = "ivreg2",
model = "ols",
depvar = "invest",
vcetype = "Robust",
vce = "cluster ac bartlett bw=5",
partialsmall = "small",
ivar = "company",
tvar = "year",
kernel = "Bartlett",
inexog = "mvalue kstock",
insts = "mvalue kstock",
properties = "b V",
)
params_table = np.array([
.11556215606596, .0134360177573, 8.6009231420662, 7.907743030e-18,
.08922804516602, .14189626696591, np.nan, 1.9599639845401,
0, .23067848754982, .04930800664089, 4.678317037431,
2.892390940e-06, .13403657038422, .32732040471542, np.nan,
1.9599639845401, 0, -42.714369016733, 12.190347184209,
-3.5039501641153, .00045841113727, -66.607010456823, -18.821727576643,
np.nan, 1.9599639845401, 0]).reshape(3,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00018052657317, -.00035661054613, -.06728261073866, -.00035661054613,
.0024312795189, -.32394785247278, -.06728261073866, -.32394785247278,
148.60456447156]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_groupsum4_ivreg_large = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
inexog_ct = 2,
exexog_ct = 0,
endog_ct = 0,
partial_ct = 0,
bw = 5,
df_r = 19,
N_clust = 20,
df_m = 2,
sdofminus = 0,
dofminus = 0,
r2 = .8124080178314146,
rmse = 94.40840193979601,
rss = 1755850.432294737,
mss = 7604093.484267682,
r2_a = .8105035307027995,
F = 92.14467466912149,
Fp = 1.66368179227e-10,
Fdf1 = 2,
Fdf2 = 19,
partialcons = 0,
cons = 1,
jdf = 0,
j = 0,
ll = -1191.802357418011,
rankV = 3,
rankS = 3,
rankxx = 3,
rankzz = 3,
r2c = .8124080178314146,
r2u = .8710896173136538,
yyc = 9359943.916562419,
yy = 13620706.07273678,
clustvar = "year",
hacsubtitleV2 = "and kernel-robust to common correlated disturbances (Driscoll-Kraay)",
hacsubtitleV = "Statistics robust to heteroskedasticity and clustering on year",
hacsubtitleB = "Estimates efficient for homoskedasticity only",
title = "OLS estimation",
predict = "ivreg2_p",
version = "03.1.07",
cmdline = "ivreg2 invest mvalue kstock, dkraay(5) small",
cmd = "ivreg2",
model = "ols",
depvar = "invest",
vcetype = "Robust",
vce = "cluster ac bartlett bw=5",
partialsmall = "small",
small = "small",
ivar = "company",
tvar = "year",
kernel = "Bartlett",
inexog = "mvalue kstock",
insts = "mvalue kstock",
properties = "b V",
)
params_table = np.array([
.11556215606596, .0138548615926, 8.3409101775303, 8.967911239e-08,
.08656359748216, .14456071464977, 19, 2.0930240544083,
0, .23067848754982, .0508450956047, 4.5368876743442,
.00022550505646, .12425847940049, .33709849569915, 19,
2.0930240544083, 0, -42.714369016733, 12.570359466158,
-3.3980228752988, .00301793225123, -69.02443375196, -16.404304281506,
19, 2.0930240544083, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00019195718975, -.00037919048186, -.07154282413568, -.00037919048186,
.00258522374705, -.34445964542925, -.07154282413568, -.34445964542925,
158.01393710842]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_nw_groupsum4_ivreg_small = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
######################### WLS
est = dict(
N = 200,
df_m = 2,
df_r = 197,
F = 158.2726503915062,
r2 = .7728224625923459,
rmse = 35.1783035325949,
mss = 829335.6968772264,
rss = 243790.0687679817,
r2_a = .7705160916541971,
ll = -994.3622459900876,
ll_0 = -1142.564592396746,
rank = 3,
cmdline = "regress invest mvalue kstock [aw=1/mvalue], robust",
title = "Linear regression",
marginsok = "XB default",
vce = "robust",
depvar = "invest",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
wexp = "= 1/mvalue",
wtype = "aweight",
vcetype = "Robust",
)
params_table = np.array([
.11694307068216, .00768545583365, 15.2161528494, 4.371656843e-35,
.10178674436759, .13209939699674, 197, 1.9720790337785,
0, .10410756769914, .00986959606725, 10.548310892334,
6.565731752e-21, .08464394422305, .12357119117523, 197,
1.9720790337785, 0, -9.2723336171089, 2.3458404391932,
-3.9526702081656, .00010767530575, -13.898516363832, -4.6461508703863,
197, 1.9720790337785, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00005906623137, 6.805470065e-06, -.01210153268743, 6.805470065e-06,
.00009740892653, -.01511046663892, -.01210153268743, -.01511046663892,
5.502967366154]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_hc1_wls_small = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N_clust = 10,
N = 200,
df_m = 2,
df_r = 9,
F = 22.90591346432732,
r2 = .7728224625923459,
rmse = 35.1783035325949,
mss = 829335.6968772264,
rss = 243790.0687679817,
r2_a = .7705160916541971,
ll = -994.3622459900876,
ll_0 = -1142.564592396746,
rank = 3,
cmdline = "regress invest mvalue kstock[aw=1/mvalue], vce(cluster company)",
title = "Linear regression",
marginsok = "XB default",
vce = "cluster",
depvar = "invest",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
wexp = "= 1/mvalue",
wtype = "aweight",
vcetype = "Robust",
clustvar = "company",
)
params_table = np.array([
.11694307068216, .02609630113434, 4.4812124936848, .00152974827456,
.05790913614858, .17597700521575, 9, 2.2621571627982,
0, .10410756769914, .02285882773869, 4.5543703679489,
.00137730504553, .05239730679689, .15581782860139, 9,
2.2621571627982, 0, -9.2723336171089, 5.7204731422962,
-1.6209032690934, .13948922172294, -22.212942910549, 3.6682756763312,
9, 2.2621571627982, 0]).reshape(3,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00068101693289, -.00006496077364, -.08926939086077, -.00006496077364,
.00052252600559, -.0697116307149, -.08926939086077, -.0697116307149,
32.723812971732]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_wls_small = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
N = 200,
inexog_ct = 2,
exexog_ct = 0,
endog_ct = 0,
partial_ct = 0,
N_clust = 10,
df_m = 2,
sdofminus = 0,
dofminus = 0,
r2 = .772822462592346,
rmse = 34.91346937558495,
rss = 243790.0687679817,
mss = 829335.6968772268,
r2_a = .7705160916541972,
F = 22.9059134643273,
Fp = .000294548654088,
Fdf1 = 2,
Fdf2 = 9,
yy = 1401938.856802022,
yyc = 1073125.765645209,
partialcons = 0,
cons = 1,
jdf = 0,
j = 0,
ll = -994.3622459900874,
rankV = 3,
rankS = 3,
rankxx = 3,
rankzz = 3,
r2c = .772822462592346,
r2u = .8261050632949187,
clustvar = "company",
hacsubtitleV = "Statistics robust to heteroskedasticity and clustering on company",
hacsubtitleB = "Estimates efficient for homoskedasticity only",
title = "OLS estimation",
predict = "ivreg2_p",
version = "03.1.07",
cmdline = "ivreg2 invest mvalue kstock [aw=1/mvalue], cluster(company)",
cmd = "ivreg2",
wtype = "aweight",
wexp = "=1/mvalue",
model = "ols",
depvar = "invest",
vcetype = "Robust",
vce = "robust cluster",
partialsmall = "small",
inexog = "mvalue kstock",
insts = "mvalue kstock",
properties = "b V",
)
params_table = np.array([
.11694307068216, .02463240320082, 4.7475298990826, 2.059159576e-06,
.06866444755588, .16522169380844, np.nan, 1.9599639845401,
0, .10410756769914, .02157653909108, 4.8250355286218,
1.399783125e-06, .06181832816961, .14639680722867, np.nan,
1.9599639845401, 0, -9.2723336171089, 5.3995775192484,
-1.7172331694572, .08593657730569, -19.855311086568, 1.31064385235,
np.nan, 1.9599639845401, 0]).reshape(3,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'mvalue kstock _cons'.split()
cov = np.array([
.00060675528745, -.00005787711139, -.07953498994782, -.00005787711139,
.00046554703915, -.06210991017966, -.07953498994782, -.06210991017966,
29.155437386372]).reshape(3,3)
cov_colnames = 'mvalue kstock _cons'.split()
cov_rownames = 'mvalue kstock _cons'.split()
results_cluster_wls_large = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
|
StarcoderdataPython
|
4858389
|
<gh_stars>1-10
num1 = 11
num2 = 22
num3 = 33333333
num3 = 333
num4 = 4444
|
StarcoderdataPython
|
11243112
|
<gh_stars>10-100
import matplotlib.pyplot as plt
import geneview as gv
df = gv.utils.load_dataset("gwas")
gv.qqplot(df.loc[:, "P"])
plt.tight_layout()
plt.show()
|
StarcoderdataPython
|
4828363
|
from fastapi import Response
from fastapi.routing import APIRouter
import hashlib
import os
from bootstrap import db, CONFIG
import time
from util import *
from pydantic import BaseModel
from starlette.status import *
from tinydb import Query, where
import secrets
router = APIRouter(prefix="/login")
@router.get("/init")
async def init_login():
srv_bits = hashlib.sha256(os.urandom(16)).hexdigest()
uuid = hashlib.sha256(os.urandom(8)).hexdigest()
db.connections.insert(
{
"uuid": uuid,
"srv_bits": srv_bits,
"timestamp": time.time(),
"type": "instantiating",
}
)
db.connections.remove(where("timestamp") < (time.time() - 10))
return {"uuid": uuid, "bits": srv_bits}
class LoginModel(BaseModel):
username: str
password_hash: str
cli_bits: str
uuid: str
@router.post("/")
async def login(model: LoginModel, response: Response):
if not model.username in CONFIG["server"]["users"].keys():
response.status_code = HTTP_404_NOT_FOUND
return error(404, f"Error: user {model.username} not in userbank.")
documents: list[dict] = db.connections.search(
Query().fragment({"type": "instantiating", "uuid": model.uuid})
)
if len(documents) == 0:
response.status_code = HTTP_404_NOT_FOUND
return error(404, f"Error: uuid {model.uuid} has not yet been instantiated.")
doc = documents[0]
if doc["timestamp"] + 5 < time.time():
response.status_code = HTTP_403_FORBIDDEN
return error(
403, f"Error: uuid {model.uuid} has past the instantiation deadline."
)
if (
model.password_hash
== hashlib.sha256(
(doc["srv_bits"]
+ CONFIG["server"]["users"][model.username]["password"]
+ model.cli_bits).encode("utf-8")
).hexdigest()
):
uuid = hashlib.sha256(os.urandom(32)).hexdigest()
db.connections.remove(where("uuid") == model.uuid)
db.connections.remove(where("username") == model.username)
db.connections.insert({
"username": model.username,
"uuid": uuid,
"type": "connection"
})
return {
"uuid": uuid
}
else:
response.status_code = HTTP_403_FORBIDDEN
return error(
403, f"Incorrect password."
)
|
StarcoderdataPython
|
3349582
|
<gh_stars>0
from aocd import submit
#-------------------------------Run once!---------------------
# get data from aocd
#import os
#os.system('del day7.txt')
#os.system('aocd 7 2020 >> day7.txt')
#--------------------------------------------------------------
def unique_colors(target_bag, d, lines, seen):
count = 0
for line in lines:
this_bag, other_bags = line.split(" contain ")
for bag in other_bags.split(", "):
if target_bag in bag:
seen.add(this_bag)
seen |= unique_colors(this_bag[:-1], d, lines, seen)
count += 1
return seen
# find three values that sum to 2020 and print their product
def number_of(stack, d, lines, count):
total = 0
while len(stack) > 0:
find = stack.pop()
if "shiny gold bag" not in find:
print(find, total, int(find.split()[0]))
total += int(find.split()[0])
for line in lines:
this_bag, other_bags = line.split(" contain ")
if ' '.join(find.strip('.').strip('s').strip().split()[1:]) in this_bag:
n = 1
if "shiny gold bag" not in find:
n = int(find.split()[0])
for bag in other_bags.split(", "):
if "no other" in bag:
continue
#print(bag)
count += int(bag.split()[0]) * n
stack.append(f"{int(bag.split()[0]) * n} {' '.join(bag.split()[1:])}")
print(stack)
return total
# ---- simple cubic O(n^3) time complexity / constant O(1) space complexity algorithm ----
def solve(lines):
d = dict()
for line in lines:
this_bag, other_bags = line.split(" contain ")
d[this_bag] = [bag for bag in other_bags.split(", ")]
return number_of(["shiny gold bag"], d, lines, 0)
return len(unique_colors("shiny gold bag", d, lines, set()))
if __name__ == '__main__':
# read in input (get_data() returns string)
lines = [line.strip() for line in open('day7.txt')]
#submit(sol1(i))
#submit(sol1(i), part="a", day=2, year=2020)
print(solve(lines))
#submit(solve(lines))
|
StarcoderdataPython
|
6663123
|
<reponame>Comcast/Fred-Framework
from ovsdb_client import OVSDBConnection
import variables as names
import sys
import argparse
def main(argv):
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--table_name", type=str, required=False, help="FSM table name to be updated")
ap.add_argument("-q", "--update_query", type=str, required=True, help="Update query to run on the FSM table")
args = ap.parse_args()
ovsdb_client = OVSDBConnection(names.__OVSDB_IP__, names.__OVSDB_PORT__)
ovsdb_client.update_fsm_table(args.update_query)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
3545660
|
<filename>teddix-common/teddix/TeddixHPUX.py
#!/usr/bin/env python
#
import os
import re
import sys
import time
import psutil
import platform
import subprocess
# Syslog handler
import TeddixLogger
# Config parser
import TeddixConfigFile
class TeddixHPUX:
# Get installed packages
def getpkgs(self):
return packages
# Get partitions
def getpartitions(self):
return disks
# Get network interfaces
def getnics(self):
return nics
# Get mac address
def getmac(self,nic):
return ''
# Get ipv4 address
def getip(self,nic):
return ips
# Get dnsservers
def getdns(self):
return dns
# Get routes
def getroutes(self):
return routes
# Get groups
def getgroups(self):
return groups
# Get users
def getusers(self):
return users
# Get procs
def getprocs(self):
return procs
|
StarcoderdataPython
|
6529519
|
<reponame>sqoor/SeqGenSQL-ui
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import DataRequired
from markupsafe import Markup
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
host = StringField('Host/Server', validators=[DataRequired()])
db = StringField('Database', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class ConnectForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
server = StringField('Host/Server', validators=[DataRequired()])
db = StringField('Database', validators=[DataRequired()])
submit = SubmitField('Connect')
class ConvertForm(FlaskForm):
text = TextAreaField('Text', render_kw={"rows": 20, "cols": 60})
view_sql = SubmitField('View SQL')
execute_sql = SubmitField('Execute SQL')
|
StarcoderdataPython
|
8037504
|
from django.db import models
class HashSha(models.Model):
frag1 = models.CharField(max_length=254)
frag2 = models.CharField(max_length=254)
frag3 = models.CharField(max_length=130)
frag4 = models.CharField(max_length=4)
frag5 = models.CharField(max_length=2)
resource = models.OneToOneField(
"Resource",
on_delete=models.CASCADE
)
|
StarcoderdataPython
|
6610940
|
import os
import socket
import zmq
import constants
class RTPPacketUDPSender:
def __init__(self, ip, port=9529):
self.ip = ip
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("video RTP packet UDP sender created, pid = %d, target = %s:%d" % (os.getpid(), self.ip, self.port))
def emit(self, rtp_pkt): # rtp_pkt should be a bytes-like object
self.sock.sendto(rtp_pkt, (self.ip, self.port))
def release(self):
self.sock.close()
print("video RTP packet UDP sender released, pid = %d" % os.getpid())
class RTPPacketTCPSender:
def __init__(self, ip, port=9530):
self.ip = ip
self.port = port
self.zmq_context = zmq.Context.instance()
self._connect_socket()
print("video RTP packet TCP sender created, pid = %d, target = %s:%d" % (os.getpid(), self.ip, self.port))
def _connect_socket(self):
self.zmq_socket = self.zmq_context.socket(zmq.REQ)
self.zmq_socket.setsockopt(zmq.RCVTIMEO, constants.get_request_rely_timeout()) # milliseconds
# set option before connect
self.zmq_socket.connect("tcp://%s:%d" % (self.ip, self.port))
def _reset_my_socket(self):
self.zmq_socket.close()
self._connect_socket()
def emit(self, rtp_pkt):
self.zmq_socket.send(rtp_pkt, copy=False)
try:
reply = self.zmq_socket.recv() # receive the reply message
return reply
except zmq.Again: # timeout
print("timeout, did not receive RTP packet sending reply")
self._reset_my_socket()
return None
def release(self):
self.zmq_socket.close()
print("video RTP packet TCP sender released, pid = %d, target = %s:%d" % (os.getpid(), self.ip, self.port))
|
StarcoderdataPython
|
1993308
|
<filename>desktop/core/src/desktop/lib/test_runners.py
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import threading
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.signals import template_rendered
from django_nose.runner import NoseTestSuiteRunner, translate_option
from mako import runtime
from mako.template import Template
__all__ = ['HueTestRunner']
# Capturing the mako context is not thread safe, so we wrap rendering in a mutex.
_MAKO_LOCK = threading.RLock()
def _instrumented_test_render(self, *args, **data):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
with _MAKO_LOCK:
def mako_callable_(context, *args, **kwargs):
template_rendered.send(sender=self, template=self, context=context)
return self.original_callable_[-1](context, *args, **kwargs)
if hasattr(self, 'original_callable_'):
self.original_callable_.append(self.callable_)
else:
self.original_callable_ = [self.callable_]
self.callable_ = mako_callable_
try:
response = runtime._render(self, self.original_callable_[-1], args, data)
finally:
self.callable_ = self.original_callable_.pop()
return response
class HueTestRunner(NoseTestSuiteRunner):
__test__ = False
def setup_test_environment(self, **kwargs):
super(HueTestRunner, self).setup_test_environment(**kwargs)
Template.original_render = Template.render
Template.render = _instrumented_test_render
def teardown_test_environment(self, **kwargs):
super(HueTestRunner, self).teardown_test_environment(**kwargs)
Template.render = Template.original_render
del Template.original_render
def run_tests(self, test_labels, *args):
nose_argv = (['nosetests'] + list(test_labels))
if args:
nose_argv.extend(args)
if hasattr(settings, 'NOSE_ARGS'):
extended_nose_args = settings.NOSE_ARGS
# Remove coverage packages option from settings.NOSE_ARGS if explicitly mentioned as test command-line argument.
# This will help as an option to report coverage for specific packages only if required.
for nose_arg in nose_argv:
if nose_arg.startswith('--cover-package'):
extended_nose_args = []
for arg in settings.NOSE_ARGS:
if not arg.startswith('--cover-package'):
extended_nose_args.append(arg)
nose_argv.extend(extended_nose_args)
# Skip over 'manage.py test' and any arguments handled by django.
django_opts = ['--noinput', '--liveserver', '-p', '--pattern']
#for opt in BaseCommand.option_list:
# django_opts.extend(opt._long_opts)
# django_opts.extend(opt._short_opts)
nose_argv.extend(translate_option(opt) for opt in sys.argv[1:]
if opt.startswith('-') and not any(opt.startswith(d) for d in django_opts))
# if --nose-verbosity was omitted, pass Django verbosity to nose
if ('--verbosity' not in nose_argv and not any(opt.startswith('--verbosity=') for opt in nose_argv)):
nose_argv.append('--verbosity=%s' % str(self.verbosity))
if self.verbosity >= 1:
print(' '.join(nose_argv))
result = self.run_suite(nose_argv)
# suite_result expects the suite as the first argument. Fake it.
return self.suite_result({}, result)
|
StarcoderdataPython
|
8015718
|
import logging
from qolsys.exceptions import QolsysGwConfigIncomplete
from qolsys.exceptions import QolsysGwConfigError
LOGGER = logging.getLogger(__name__)
class QolsysGatewayConfig(object):
_SENTINEL = object()
_DEFAULT_CONFIG = {
'panel_host': _SENTINEL,
'panel_port': None,
'panel_mac': None,
'panel_token': _<PASSWORD>,
'panel_user_code': None,
'panel_unique_id': 'qolsys_panel',
'panel_device_name': 'Qolsys Panel',
'arm_away_exit_delay': None,
'mqtt_namespace': 'mqtt',
'mqtt_retain': True,
'discovery_topic': 'homeassistant',
'control_topic': '{discovery_topic}/alarm_control_panel/{panel_unique_id}/set',
'event_topic': 'qolsys/{panel_unique_id}/event',
'user_control_token': None,
'ha_check_user_code': True,
'ha_user_code': None,
'code_arm_required': False,
'code_disarm_required': False,
'code_trigger_required': False,
'default_trigger_command': None,
'default_sensor_device_class': 'safety',
}
def __init__(self, args=None, check=True):
self._override_config = {}
if args:
self.load(args)
if check:
self.check()
def load(self, args):
for k, v in args.items():
if k in self._DEFAULT_CONFIG:
self._override_config[k] = v
def check(self):
errors = 0
for k in self._DEFAULT_CONFIG.keys():
if self.get(k) is self._SENTINEL:
LOGGER.error(f"Missing mandatory configuration key '{k}'")
errors += 1
if errors > 0:
raise QolsysGwConfigIncomplete
if self.get('panel_user_code') is None:
if self.get('ha_user_code'):
raise QolsysGwConfigError(
"Cannot use 'ha_user_code' if "\
"'panel_user_code' is not set")
for k in ['code_arm_required', 'code_trigger_required']:
if self.get(k):
raise QolsysGwConfigError(
f"Cannot use '{k}' if no disarm code is set, as the "\
"Qolsys Panel does not offer a built-in way to check "\
"for the code on ARM or TRIGGER actions.")
# Without a configured disarm code, we cannot have home assistant
# checking it for us
self._override_config['ha_check_user_code'] = False
# Without a configured disarm code, we will use the one provided
# in home assistant to try and disarm the alarm
self._override_config['code_disarm_required'] = True
trig_cmd = self.get('default_trigger_command')
if trig_cmd:
trig_cmd = trig_cmd.upper()
valid_trigger = [
'TRIGGER',
'TRIGGER_AUXILIARY',
'TRIGGER_FIRE',
'TRIGGER_POLICE',
]
if not trig_cmd in valid_trigger:
raise QolsysGwConfigError(
f"Invalid trigger command '{trig_cmd}'; must be one of "\
f"{', '.join(valid_trigger)}")
self._override_config['default_trigger_command'] = trig_cmd
# Apply a template to the control and event topics if the unique id
# is part of the requested topics
for k in ('control_topic', 'event_topic'):
v = self.get(k)
if v:
self._override_config[k] = v.format(
panel_unique_id=self.get('panel_unique_id') or 'qolsys',
discovery_topic=self.get('discovery_topic'))
def get(self, name):
value = self._override_config.get(name, self._SENTINEL)
if value is self._SENTINEL:
value = self._DEFAULT_CONFIG.get(name, self._SENTINEL)
return value
def __getattr__(self, name):
value = self.get(name)
if value is self._SENTINEL:
raise AttributeError
return value
|
StarcoderdataPython
|
6659023
|
############################################################
### Forked from https://github.com/cmhcbb/attackbox
############################################################
from __future__ import absolute_import, division, print_function
import numpy as np
class HSJA(object):
def __init__(self,model,constraint=2,num_iterations=40,gamma=1.0,stepsize_search='geometric_progression',max_num_evals=1e4,init_num_evals=100, verbose=True):
self.model = model
self.constraint = constraint
self.num_iterations = num_iterations
self.gamma = gamma
self.stepsize_search = stepsize_search
self.max_num_evals = max_num_evals
self.init_num_evals = init_num_evals
self.verbose = verbose
def hsja(self,input_xi,label_or_target,initial_xi,TARGETED):
if (self.model.predict_label(input_xi) != label_or_target):
print("Fail to classify the image. No need to attack.")
return (False, None)
# Set parameters
# original_label = np.argmax(self.model.predict_label(input_xi))
d = int(np.prod(input_xi.shape))
# Set binary search threshold.
if self.constraint == 2:
theta = self.gamma / (np.sqrt(d) * d)
else:
theta = self.gamma / (d ** 2)
# Initialize.
perturbed = self.initialize(input_xi, label_or_target, initial_xi, TARGETED)
if perturbed is None:
print("Fail to find initial adversarial image.")
return (False, None)
# Project the initialization to the boundary.
perturbed, dist_post_update = self.binary_search_batch(input_xi, np.expand_dims(perturbed, 0), label_or_target, theta, TARGETED)
dist = self.compute_distance(perturbed, input_xi)
for j in np.arange(self.num_iterations):
#params['cur_iter'] = j + 1
# Choose delta.
if j==1:
delta = 0.1 * (self.model.bounds[1] - self.model.bounds[0])
else:
if self.constraint == 2:
delta = np.sqrt(d) * theta * dist_post_update
elif self.constraint == np.inf:
delta = d * theta * dist_post_update
# Choose number of evaluations.
num_evals = int(self.init_num_evals * np.sqrt(j+1))
num_evals = int(min([num_evals, self.max_num_evals]))
# approximate gradient.
gradf = self.approximate_gradient(perturbed, label_or_target, num_evals,
delta, TARGETED)
if self.constraint == np.inf:
update = np.sign(gradf)
else:
update = gradf
# search step size.
if self.stepsize_search == 'geometric_progression':
# find step size.
epsilon = self.geometric_progression_for_stepsize(perturbed, label_or_target,
update, dist, j+1, TARGETED)
# Update the sample.
perturbed = self.clip_image(perturbed + epsilon * update,
self.model.bounds[0], self.model.bounds[1])
# Binary search to return to the boundary.
perturbed, dist_post_update = self.binary_search_batch(input_xi,
perturbed[None], label_or_target, theta, TARGETED)
elif params['stepsize_search'] == 'grid_search':
# Grid search for stepsize.
epsilons = np.logspace(-4, 0, num=20, endpoint = True) * dist
epsilons_shape = [20] + len(input_xi.shape) * [1]
perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update
perturbeds = self.clip_image(perturbeds, self.model.bounds[0], self.model.bounds[1])
idx_perturbed = self.decision_function(perturbeds, label_or_target, TARGETED)
if np.sum(idx_perturbed) > 0:
# Select the perturbation that yields the minimum distance # after binary search.
perturbed, dist_post_update = self.binary_search_batch(input_xi,
perturbeds[idx_perturbed], label_or_target, theta, TARGETED)
# compute new distance.
dist = self.compute_distance(perturbed, input_xi)
if self.verbose:
print('iteration: {:d}, distance {:.4E}'.format(j+1, dist))
return (True, perturbed)
def decision_function(self, images, label, TARGETED):
"""
Decision function output 1 on the desired side of the boundary,
0 otherwise.
"""
# images = torch.from_numpy(images).float().cuda()
assert images is not None
la = self.model.predict_label(images)
#print(la,label)
# la = la.cpu().numpy()
if TARGETED:
return (la==label)
else:
return (la!=label)
def clip_image(self, image, clip_min, clip_max):
# Clip an image, or an image batch, with upper and lower threshold.
return np.minimum(np.maximum(clip_min, image), clip_max)
def compute_distance(self, x_ori, x_pert):
# Compute the distance between two images.
if self.constraint == 2:
return np.linalg.norm(x_ori - x_pert)
elif self.constraint == np.inf:
return np.max(abs(x_ori - x_pert))
def approximate_gradient(self, sample, label_or_target, num_evals, delta, TARGETED):
# Generate random vectors.
noise_shape = [num_evals] + list(sample.shape)
if self.constraint == 2:
rv = np.random.randn(*noise_shape)
elif self.constraint == np.inf:
rv = np.random.uniform(low = -1, high = 1, size = noise_shape)
rv = rv / np.sqrt(np.sum(rv ** 2, axis = (1), keepdims = True))
perturbed = sample + delta * rv
perturbed = self.clip_image(perturbed, self.model.bounds[0], self.model.bounds[1])
rv = (perturbed - sample) / delta
# query the model.
decisions = self.decision_function(perturbed, label_or_target, TARGETED)
decision_shape = [len(decisions)] + [1] * len(sample.shape)
fval = 2 * decisions.astype(float).reshape(decision_shape) - 1.0
# Baseline subtraction (when fval differs)
if np.mean(fval) == 1.0: # label changes.
gradf = np.mean(rv, axis = 0)
elif np.mean(fval) == -1.0: # label not change.
gradf = - np.mean(rv, axis = 0)
else:
fval -= np.mean(fval)
gradf = np.mean(fval * rv, axis = 0)
# Get the gradient direction.
gradf = gradf / np.linalg.norm(gradf)
return gradf
def project(self, original_image, perturbed_images, alphas):
# alphas_shape = [1] * len(original_image.shape)
# alphas = alphas.reshape(alphas_shape)
assert len(original_image.shape) == 1
assert len(perturbed_images.shape) == 2
if self.constraint == 2:
#print(alphas.shape,original_image.shape, perturbed_images.shape)
return (1-alphas) * original_image + alphas * perturbed_images
elif self.constraint == np.inf:
out_images = self.clip_image(
perturbed_images,
original_image - alphas,
original_image + alphas
)
return out_images
else:
raise Exception(f"Unsupported constraint {self.constraint}")
def binary_search_batch(self, original_image, perturbed_images, label_or_target, theta, TARGETED):
""" Binary search to approach the boundar. """
assert len(original_image.shape) == 1
assert len(perturbed_images.shape) == 2
# Compute distance between each of perturbed image and original image.
dists_post_update = np.array([
self.compute_distance(
original_image,
perturbed_image
)
for perturbed_image in perturbed_images])
#print(dists_post_update)
# Choose upper thresholds in binary searchs based on constraint.
if self.constraint == np.inf:
highs = dists_post_update
# Stopping criteria.
thresholds = np.minimum(dists_post_update * theta, theta)
else:
highs = np.ones(len(perturbed_images))
thresholds = theta
lows = np.zeros(len(perturbed_images))
# Call recursive function.
while np.max((highs - lows) / thresholds) > 1:
# projection to mids.
mids = (highs + lows) / 2.0
mid_images = self.project(original_image, perturbed_images, mids)
# print(mid_images.shape)
# Update highs and lows based on model decisions.
decisions = self.decision_function(mid_images, label_or_target, TARGETED)
lows = np.where(decisions == 0, mids, lows)
highs = np.where(decisions == 1, mids, highs)
out_images = self.project(original_image, perturbed_images, highs)
# Compute distance of the output image to select the best choice.
# (only used when stepsize_search is grid_search.)
dists = np.array([
self.compute_distance(
original_image,
out_image
)
for out_image in out_images])
idx = np.argmin(dists)
dist = dists_post_update[idx]
out_image = out_images[idx]
return out_image, dist
def initialize(self, input_xi, label_or_target, initial_xi, TARGETED):
"""
Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.
"""
success = 0
num_evals = 0
if initial_xi is None:
# Find a misclassified random noise.
while num_evals < 1e4:
random_noise = np.random.uniform(*self.model.bounds, size = input_xi.shape)
#print(random_noise[None].shape)
success = self.decision_function(random_noise, label_or_target, TARGETED)
self.model.num_queries += 1
if success:
break
if not success:
print("Initialization failed! ")
return None
# Binary search to minimize l2 distance to original image.
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
blended = (1 - mid) * input_xi + mid * random_noise
success = self.decision_function(blended, label_or_target, TARGETED)
if success:
high = mid
else:
low = mid
initialization = (1 - high) * input_xi + high * random_noise
else:
initialization = initial_xi
return initialization
def geometric_progression_for_stepsize(self, x, label_or_target, update, dist, j, TARGETED):
"""
Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary,
"""
epsilon = dist / np.sqrt(j)
def phi(epsilon):
new = x + epsilon * update
success = self.decision_function(new, label_or_target, TARGETED)
return success
while not phi(epsilon):
epsilon /= 2.0
return epsilon
def __call__(self, input_xi, label_or_target, initial_xi=None, target=None, TARGETED=False):
# input_xi = input_xi.cpu().numpy()
# label_or_target = label_or_target.cpu().numpy()
adv = self.hsja(input_xi, label_or_target, initial_xi, TARGETED)
# adv = torch.from_numpy(adv).float().cuda()
return adv
|
StarcoderdataPython
|
6591050
|
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import argparse
from app.db import Document
import os
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--database', type=str, required=True, help="Database.")
parser.add_argument('-i', '--document-id', type=str, required=True, help="Id of document.")
args = parser.parse_args()
return args
def main():
args = parseargs()
database_url = 'sqlite:///' + args.database
engine = create_engine(database_url, convert_unicode=True, connect_args={'check_same_thread': False})
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
document = db_session.query(Document).filter(Document.id == args.document_id).first()
for image in document.images:
extension = os.path.splitext(image.filename)[1]
print(image.filename, "{}{}".format(image.id, extension))
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
1781334
|
<gh_stars>1-10
from __future__ import annotations
from numpy.typing import ArrayLike
from sklearn.linear_model import LinearRegression
from .regression_model import RegressionModel
class LinearModel(RegressionModel):
def __init__(self):
self._lin_regressor: LinearRegression | None = None
def learn(self, x_train: ArrayLike, y_train: ArrayLike):
lin_regressor = LinearRegression()
lin_regressor.fit(x_train, y_train)
self._lin_regressor = lin_regressor
def predict(self, x_test: ArrayLike) -> ArrayLike:
lin_regressor = self._lin_regressor
return lin_regressor.predict(x_test)
|
StarcoderdataPython
|
5131894
|
import tensorflow as tf
from tf_transformers.core import LegacyLayer, LegacyModel
from tf_transformers.utils import tf_utils
class Similarity_Model_Pretraining(LegacyLayer):
def __init__(
self,
encoder,
projection_dimension,
decoder=None,
is_training=True,
use_dropout=False,
initializer="glorot_uniform",
siamese=True,
**kwargs,
):
super(Similarity_Model_Pretraining, self).__init__(
is_training=is_training, use_dropout=use_dropout, name=encoder.name, **kwargs
)
self.is_training = is_training
if siamese:
self.encoder = encoder
self.decoder = encoder
else:
if decoder is None:
raise ValueError("When siamese = False, decoder has to be provided. Provided decoder = None.")
self.encoder = encoder
self.decoder = decoder
self.linear_projection = tf.keras.layers.Dense(
units=projection_dimension,
activation=None,
kernel_initializer=initializer,
name="linear_projection",
)
# As per CLIP paper
self.logits_scale = tf.Variable(tf.math.log(1 / 0.07), name='logits_scale')
def call(self, inputs):
"""Call"""
if self.is_training:
original_inputs = {k.replace("original_", ""): v for k, v in inputs.items() if k.startswith("original_")}
corrupted_inputs = {k.replace("corrupted_", ""): v for k, v in inputs.items() if k.startswith("corrupted_")}
original_outputs = self.encoder(original_inputs)
corrupted_outputs = self.decoder(corrupted_inputs)
if 'cls_output' not in original_outputs:
original_outputs['cls_output'] = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(
original_outputs['token_embeddings']
)
if 'cls_output' not in corrupted_outputs:
corrupted_outputs['cls_output'] = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(
corrupted_outputs['token_embeddings']
)
original_sentence_embedding = self.linear_projection(original_outputs['cls_output'])
corrupted_sentence_embedding = self.linear_projection(corrupted_outputs['cls_output'])
original_sentence_embedding_normalized = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=1))(
original_sentence_embedding
)
corrupted_sentence_embedding_normalized = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=1))(
corrupted_sentence_embedding
)
logits = tf.matmul(
original_sentence_embedding_normalized, corrupted_sentence_embedding_normalized, transpose_b=True
)
# Clamp logits to a max of tf.math.log(100) = 4.6051702 as per CLIP model
logits_scale = tf.math.exp(self.logits_scale)
logits_scale = tf.clip_by_value(
logits_scale, clip_value_min=tf.math.log(1 / 0.07), clip_value_max=4.6051752
)
logits = tf.cast(logits_scale, dtype=tf_utils.get_dtype()) * logits
corrupted_outputs['logits'] = logits
corrupted_outputs['original_sentence_embedding_normalized'] = original_sentence_embedding_normalized
corrupted_outputs['corrupted_sentence_embedding_normalized'] = corrupted_sentence_embedding_normalized
return corrupted_outputs
else:
first_inputs = {k.replace("first_", ""): v for k, v in inputs.items() if k.startswith("first_")}
second_inputs = {k.replace("second_", ""): v for k, v in inputs.items() if k.startswith("second_")}
first_outputs = self.encoder(first_inputs)
second_outputs = self.decoder(second_inputs)
if 'cls_output' not in first_outputs:
first_outputs['cls_output'] = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(
first_outputs['token_embeddings']
)
if 'cls_output' not in second_outputs:
second_outputs['cls_output'] = tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(
second_outputs['token_embeddings']
)
first_sentence_embedding = self.linear_projection(first_outputs['cls_output'])
second_sentence_embedding = self.linear_projection(second_outputs['cls_output'])
first_sentence_embedding_normalized = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=1))(
first_sentence_embedding
)
second_sentence_embedding_normalized = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=1))(
second_sentence_embedding
)
logits = tf.matmul(
first_sentence_embedding_normalized, second_sentence_embedding_normalized, transpose_b=True
)
# Clamp logits to a max of tf.math.log(100) = 4.6051702 as per CLIP model
logits_scale = tf.math.exp(self.logits_scale)
# no need to clamp at testing
# self.logits_scale = tf.clip_by_value(self.logits_scale, clip_value_min=tf.math.log(1/0.07), clip_value_max=4.6051752)
logits = tf.cast(logits_scale, dtype=tf_utils.get_dtype()) * logits
outputs = {}
outputs['first_sentence_embedding_normalized'] = first_sentence_embedding_normalized
outputs['second_entence_embedding_normalized'] = second_sentence_embedding_normalized
outputs['logits'] = logits
return outputs
def get_model(self):
if self.is_training:
inputs = {}
# Assume encoder and decoder have same input types
for k, v in self.encoder.input.items():
inputs["original_" + k] = v
for k, v in self.encoder.input.items():
inputs["corrupted_" + k] = tf.keras.layers.Input(
shape=v.shape[1:], batch_size=v.shape[0], dtype=v.dtype, name=v.name.split(":")[0] + "_2"
)
else:
inputs = {}
# Assume encoder and decoder have same input types
for k, v in self.encoder.input.items():
inputs["first_" + k] = v
for k, v in self.encoder.input.items():
inputs["second_" + k] = tf.keras.layers.Input(
shape=v.shape[1:], batch_size=v.shape[0], dtype=v.dtype, name=v.name.split(":")[0] + "_3"
)
layer_output = self(inputs)
model = LegacyModel(inputs=inputs, outputs=layer_output, name="similarity_model")
try:
model.model_config = self.encoder._config_dict
except:
model.model_config = self.encoder.model_config
return model
|
StarcoderdataPython
|
6486984
|
<reponame>mariajmolina/hysplit_applications<filename>alaska_storms/run_hysplit_ens.py
import pandas as pd
import datetime
import numpy as np
import argparse
import math
from pysplit.trajectory_generator import generate_bulktraj
############################################################################
############################################################################
parser = argparse.ArgumentParser(description='Calculating hysplit trajs.')
parser.add_argument("--climate", choices=["hist", "pgw"], required=True, type=str, help="This is the hist or pgw choice.")
parser.add_argument("--month", required=True, type=int, help="Storm month for trajectory calculation.")
parser.add_argument("--ens", required=True, type=int, help="Ensemble number.")
args=parser.parse_args()
which_climate=args.climate
which_month=args.month
ens_number=args.ens
############################################################################
############################################################################
work_help1=np.hstack([np.array([0 for i in range(36)]),
np.array([1 for i in range(36)]),
np.array([2 for i in range(36)]),
np.array([3 for i in range(36)]),
np.array([4 for i in range(36)]),
np.array([5 for i in range(36)]),
np.array([6 for i in range(36)]),
np.array([7 for i in range(36)]),
np.array([8 for i in range(36)]),
np.array([9 for i in range(36)])
])
work_help2=np.hstack([np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)]),
np.array([i+1 for i in range(36)])
])
which_working=f"work{work_help2[ens_number]}_{work_help1[ens_number]}"
############################################################################
############################################################################
csv_file=pd.read_csv(f'/glade/work/bpoujol/Moisture_tracking/trajectory_information_{which_climate}.csv')
dates=[]
for datetime_string in csv_file['TIME (UTC)'].values:
dates.append(datetime.datetime.strptime(datetime_string, '%Y-%m-%d_%H:%M:%S'))
ready_dates=pd.to_datetime(np.array(dates))
csv_file['YEAR']=ready_dates.year
csv_file['MONTH']=ready_dates.month
csv_file['DAY']=ready_dates.day
csv_file['HOUR']=ready_dates.hour
csv_file=csv_file[csv_file['MONTH']==which_month]
############################################################################
############################################################################
def ens_create(ens_num, lat, lon):
"""Extract the ensemble member's lat and lon coordinates.
"""
ens_help=np.hstack([np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)]),
np.array([i for i in range(49)])
])
ens_num=ens_help[ens_num]
if ens_num==0:
return compute_displacement(lat, lon)
if ens_num==1:
return compute_displacement(lat, lon, dist=1, bear=90)
if ens_num==2:
return compute_displacement(lat, lon, dist=2, bear=90)
if ens_num==3:
return compute_displacement(lat, lon, dist=3, bear=90)
if ens_num==4:
return compute_displacement(lat, lon, dist=1, bear=270)
if ens_num==5:
return compute_displacement(lat, lon, dist=2, bear=270)
if ens_num==6:
return compute_displacement(lat, lon, dist=3, bear=270)
if ens_num==7:
return compute_displacement(lat, lon, dist=1, bear=180)
if ens_num==8:
return compute_displacement(lat, lon, dist=2, bear=180)
if ens_num==9:
return compute_displacement(lat, lon, dist=3, bear=180)
if ens_num==10:
return compute_displacement(lat, lon, dist=1, bear=0)
if ens_num==11:
return compute_displacement(lat, lon, dist=2, bear=0)
if ens_num==12:
return compute_displacement(lat, lon, dist=3, bear=0)
if ens_num==13:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==14:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==15:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==16:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==17:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==18:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==19:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==20:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==21:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==22:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==23:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==24:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==25:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==26:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==27:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==28:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==29:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==30:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=0)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==31:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==32:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==33:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==34:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==35:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==36:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==37:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=90)
if ens_num==38:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=90)
if ens_num==39:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=90)
if ens_num==40:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==41:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==42:
newlat, newlon=compute_displacement(lat, lon, dist=1, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==43:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==44:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==45:
newlat, newlon=compute_displacement(lat, lon, dist=2, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=270)
if ens_num==46:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=1, bear=270)
if ens_num==47:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=2, bear=270)
if ens_num==48:
newlat, newlon=compute_displacement(lat, lon, dist=3, bear=180)
return compute_displacement(newlat, newlon, dist=3, bear=270)
############################################################################
############################################################################
def compute_displacement(lat, lon, dist=None, bear=None):
"""Compute the latitude and longitude for the respective ensemble member.
"""
if not dist:
return lat, lon
if dist:
R = 6378.1 #Radius of the Earth (km)
brng = math.radians(bear) #Bearing is 90 degrees converted to radians.
d = dist #Distance in km
lat1 = math.radians(lat) #Current lat point converted to radians
lon1 = math.radians(lon) #Current long point converted to radians
lat2 = math.asin( math.sin(lat1)*math.cos(d/R) +
math.cos(lat1)*math.sin(d/R)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat1),
math.cos(d/R)-math.sin(lat1)*math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
return lat2, lon2
############################################################################
############################################################################
def height_generator(ens_num, altitude):
"""Generate the height for the respective ensemble member.
"""
fraction=np.hstack([np.array([1 for i in range(49)]),
np.array([0.95 for i in range(49)]),
np.array([0.9 for i in range(49)]),
np.array([0.85 for i in range(49)]),
np.array([0.8 for i in range(49)]),
np.array([0.75 for i in range(49)]),
np.array([0.7 for i in range(49)])
])
return altitude*fraction[ens_num]
############################################################################
############################################################################
#where is hysplit working folder?
working_dir = f'/glade/scratch/molina/hysplit/trunk/{which_working}'
#where is arl format meteo data?
meteo_dir = f'/glade/scratch/molina/basile/{which_climate}'
#where is hysplit model executable?
hysplit_dir=r'/glade/scratch/molina/hysplit/trunk/exec/hyts_std'
#where to put trajs?
output_dir=f'/glade/scratch/molina/basile/{which_climate}_traj/'
############################################################################
############################################################################
runtime = -240
basename = []
years = []
months = []
hours = []
location = []
altitudes = []
for i in range(len(csv_file)):
print(i)
basename = 'trajid'+str(csv_file.iloc[i][0])+'_subregion'+str(csv_file.iloc[i][2])+'_'+'ens'+str(ens_number)+'_'
years = [csv_file.iloc[i][8]]
months = [csv_file.iloc[i][9]]
hours = [csv_file.iloc[i][11]]
location = ens_create(ens_num=ens_number, lat=csv_file.iloc[i][4], lon=csv_file.iloc[i][5])
altitudes = [height_generator(ens_num=ens_number, altitude=csv_file.iloc[i][6])]
day1 = (csv_file.iloc[i][10]-1)
day2 = csv_file.iloc[i][10]
generate_bulktraj(basename=basename,
hysplit_working=working_dir,
output_dir=output_dir,
meteo_dir=meteo_dir,
years=years,
months=months,
hours=hours,
altitudes=altitudes,
coordinates=location,
run=runtime,
meteoyr_2digits=False, outputyr_2digits=False,
monthslice=slice(day1, day2, 1),
meteo_bookends=([1] , [1]),
get_reverse=False, get_clipped=False, hysplit=hysplit_dir)
############################################################################
############################################################################
############################################################################
|
StarcoderdataPython
|
11215006
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import gzip
import json
from dataclasses import dataclass, Field, MISSING
from typing import Any, cast, Dict, IO, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from pytorch3d.common.datatypes import get_args, get_origin
_X = TypeVar("_X")
TF3 = Tuple[float, float, float]
@dataclass
class ImageAnnotation:
# path to jpg file, relative w.r.t. dataset_root
path: str
# H x W
size: Tuple[int, int] # TODO: rename size_hw?
@dataclass
class DepthAnnotation:
# path to png file, relative w.r.t. dataset_root, storing `depth / scale_adjustment`
path: str
# a factor to convert png values to actual depth: `depth = png * scale_adjustment`
scale_adjustment: float
# path to png file, relative w.r.t. dataset_root, storing binary `depth` mask
mask_path: Optional[str]
@dataclass
class MaskAnnotation:
# path to png file storing (Prob(fg | pixel) * 255)
path: str
# (soft) number of pixels in the mask; sum(Prob(fg | pixel))
mass: Optional[float] = None
@dataclass
class ViewpointAnnotation:
# In right-multiply (PyTorch3D) format. X_cam = X_world @ R + T
R: Tuple[TF3, TF3, TF3]
T: TF3
focal_length: Tuple[float, float]
principal_point: Tuple[float, float]
intrinsics_format: str = "ndc_norm_image_bounds"
# Defines the co-ordinate system where focal_length and principal_point live.
# Possible values: ndc_isotropic | ndc_norm_image_bounds (default)
# ndc_norm_image_bounds: legacy PyTorch3D NDC format, where image boundaries
# correspond to [-1, 1] x [-1, 1], and the scale along x and y may differ
# ndc_isotropic: PyTorch3D 0.5+ NDC convention where the shorter side has
# the range [-1, 1], and the longer one has the range [-s, s]; s >= 1,
# where s is the aspect ratio. The scale is same along x and y.
@dataclass
class FrameAnnotation:
"""A dataclass used to load annotations from json."""
# can be used to join with `SequenceAnnotation`
sequence_name: str
# 0-based, continuous frame number within sequence
frame_number: int
# timestamp in seconds from the video start
frame_timestamp: float
image: ImageAnnotation
depth: Optional[DepthAnnotation] = None
mask: Optional[MaskAnnotation] = None
viewpoint: Optional[ViewpointAnnotation] = None
meta: Optional[Dict[str, Any]] = None
@dataclass
class PointCloudAnnotation:
# path to ply file with points only, relative w.r.t. dataset_root
path: str
# the bigger the better
quality_score: float
n_points: Optional[int]
@dataclass
class VideoAnnotation:
# path to the original video file, relative w.r.t. dataset_root
path: str
# length of the video in seconds
length: float
@dataclass
class SequenceAnnotation:
sequence_name: str
category: str
video: Optional[VideoAnnotation] = None
point_cloud: Optional[PointCloudAnnotation] = None
# the bigger the better
viewpoint_quality_score: Optional[float] = None
def dump_dataclass(obj: Any, f: IO, binary: bool = False) -> None:
"""
Args:
f: Either a path to a file, or a file opened for writing.
obj: A @dataclass or collection hierarchy including dataclasses.
binary: Set to True if `f` is a file handle, else False.
"""
if binary:
f.write(json.dumps(_asdict_rec(obj)).encode("utf8"))
else:
json.dump(_asdict_rec(obj), f)
def load_dataclass(f: IO, cls: Type[_X], binary: bool = False) -> _X:
"""
Loads to a @dataclass or collection hierarchy including dataclasses
from a json recursively.
Call it like load_dataclass(f, typing.List[FrameAnnotationAnnotation]).
raises KeyError if json has keys not mapping to the dataclass fields.
Args:
f: Either a path to a file, or a file opened for writing.
cls: The class of the loaded dataclass.
binary: Set to True if `f` is a file handle, else False.
"""
if binary:
asdict = json.loads(f.read().decode("utf8"))
else:
asdict = json.load(f)
if isinstance(asdict, list):
# in the list case, run a faster "vectorized" version
cls = get_args(cls)[0]
res = list(_dataclass_list_from_dict_list(asdict, cls))
else:
res = _dataclass_from_dict(asdict, cls)
return res
def _dataclass_list_from_dict_list(dlist, typeannot):
"""
Vectorised version of `_dataclass_from_dict`.
The output should be equivalent to
`[_dataclass_from_dict(d, typeannot) for d in dlist]`.
Args:
dlist: list of objects to convert.
typeannot: type of each of those objects.
Returns:
iterator or list over converted objects of the same length as `dlist`.
Raises:
ValueError: it assumes the objects have None's in consistent places across
objects, otherwise it would ignore some values. This generally holds for
auto-generated annotations, but otherwise use `_dataclass_from_dict`.
"""
cls = get_origin(typeannot) or typeannot
if typeannot is Any:
return dlist
if all(obj is None for obj in dlist): # 1st recursion base: all None nodes
return dlist
if any(obj is None for obj in dlist):
# filter out Nones and recurse on the resulting list
idx_notnone = [(i, obj) for i, obj in enumerate(dlist) if obj is not None]
idx, notnone = zip(*idx_notnone)
converted = _dataclass_list_from_dict_list(notnone, typeannot)
res = [None] * len(dlist)
for i, obj in zip(idx, converted):
res[i] = obj
return res
is_optional, contained_type = _resolve_optional(typeannot)
if is_optional:
return _dataclass_list_from_dict_list(dlist, contained_type)
# otherwise, we dispatch by the type of the provided annotation to convert to
if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple
# For namedtuple, call the function recursively on the lists of corresponding keys
types = cls._field_types.values()
dlist_T = zip(*dlist)
res_T = [
_dataclass_list_from_dict_list(key_list, tp)
for key_list, tp in zip(dlist_T, types)
]
return [cls(*converted_as_tuple) for converted_as_tuple in zip(*res_T)]
elif issubclass(cls, (list, tuple)):
# For list/tuple, call the function recursively on the lists of corresponding positions
types = get_args(typeannot)
if len(types) == 1: # probably List; replicate for all items
types = types * len(dlist[0])
dlist_T = zip(*dlist)
res_T = (
_dataclass_list_from_dict_list(pos_list, tp)
for pos_list, tp in zip(dlist_T, types)
)
if issubclass(cls, tuple):
return list(zip(*res_T))
else:
return [cls(converted_as_tuple) for converted_as_tuple in zip(*res_T)]
elif issubclass(cls, dict):
# For the dictionary, call the function recursively on concatenated keys and vertices
key_t, val_t = get_args(typeannot)
all_keys_res = _dataclass_list_from_dict_list(
[k for obj in dlist for k in obj.keys()], key_t
)
all_vals_res = _dataclass_list_from_dict_list(
[k for obj in dlist for k in obj.values()], val_t
)
indices = np.cumsum([len(obj) for obj in dlist])
assert indices[-1] == len(all_keys_res)
keys = np.split(list(all_keys_res), indices[:-1])
vals = np.split(list(all_vals_res), indices[:-1])
return [cls(zip(*k, v)) for k, v in zip(keys, vals)]
elif not dataclasses.is_dataclass(typeannot):
return dlist
# dataclass node: 2nd recursion base; call the function recursively on the lists
# of the corresponding fields
assert dataclasses.is_dataclass(cls)
fieldtypes = {
f.name: (_unwrap_type(f.type), _get_dataclass_field_default(f))
for f in dataclasses.fields(typeannot)
}
# NOTE the default object is shared here
key_lists = (
_dataclass_list_from_dict_list([obj.get(k, default) for obj in dlist], type_)
for k, (type_, default) in fieldtypes.items()
)
transposed = zip(*key_lists)
return [cls(*vals_as_tuple) for vals_as_tuple in transposed]
def _dataclass_from_dict(d, typeannot):
if d is None or typeannot is Any:
return d
is_optional, contained_type = _resolve_optional(typeannot)
if is_optional:
# an Optional not set to None, just use the contents of the Optional.
return _dataclass_from_dict(d, contained_type)
cls = get_origin(typeannot) or typeannot
if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple
types = cls._field_types.values()
return cls(*[_dataclass_from_dict(v, tp) for v, tp in zip(d, types)])
elif issubclass(cls, (list, tuple)):
types = get_args(typeannot)
if len(types) == 1: # probably List; replicate for all items
types = types * len(d)
return cls(_dataclass_from_dict(v, tp) for v, tp in zip(d, types))
elif issubclass(cls, dict):
key_t, val_t = get_args(typeannot)
return cls(
(_dataclass_from_dict(k, key_t), _dataclass_from_dict(v, val_t))
for k, v in d.items()
)
elif not dataclasses.is_dataclass(typeannot):
return d
assert dataclasses.is_dataclass(cls)
fieldtypes = {f.name: _unwrap_type(f.type) for f in dataclasses.fields(typeannot)}
return cls(**{k: _dataclass_from_dict(v, fieldtypes[k]) for k, v in d.items()})
def _unwrap_type(tp):
# strips Optional wrapper, if any
if get_origin(tp) is Union:
args = get_args(tp)
if len(args) == 2 and any(a is type(None) for a in args): # noqa: E721
# this is typing.Optional
return args[0] if args[1] is type(None) else args[1] # noqa: E721
return tp
def _get_dataclass_field_default(field: Field) -> Any:
if field.default_factory is not MISSING:
# pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE,
# dataclasses._DefaultFactory[typing.Any]]` is not a function.
return field.default_factory()
elif field.default is not MISSING:
return field.default
else:
return None
def _asdict_rec(obj):
return dataclasses._asdict_inner(obj, dict)
def dump_dataclass_jgzip(outfile: str, obj: Any) -> None:
"""
Dumps obj to a gzipped json outfile.
Args:
obj: A @dataclass or collection hiererchy including dataclasses.
outfile: The path to the output file.
"""
with gzip.GzipFile(outfile, "wb") as f:
dump_dataclass(obj, cast(IO, f), binary=True)
def load_dataclass_jgzip(outfile, cls):
"""
Loads a dataclass from a gzipped json outfile.
Args:
outfile: The path to the loaded file.
cls: The type annotation of the loaded dataclass.
Returns:
loaded_dataclass: The loaded dataclass.
"""
with gzip.GzipFile(outfile, "rb") as f:
return load_dataclass(cast(IO, f), cls, binary=True)
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if get_origin(type_) is Union:
args = get_args(type_)
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
|
StarcoderdataPython
|
4941423
|
import requests
import json
def login(useremail, userpassword):
url = "http://127.0.0.1:8000/api/login"
payload = {'username': useremail,
'password': <PASSWORD>}
files = [
]
headers = {
'Cookie': 'csrftoken=<KEY>; sessionid=5g4v77efjv0r99nziiourrzqocruyasl'
}
response = requests.request(
"POST", url, headers=headers, data=payload, files=files)
return response.json()
# print(response.text)
#login("<EMAIL>", "123")
def getexp_fromqueue(token):
""" """
auth_token = "Token " + token
url = "http://127.0.0.1:8000/api/experiments/queue"
payload = {}
headers = {
'Authorization': auth_token,
# 'Cookie': 'csrftoken=<KEY>; sessionid=5g4v77efjv0r99nziiourrzqocruyasl'
}
response = requests.request("GET", url, headers=headers, data=payload)
# returns KeyError: 'experimentId' if no experiment is in Queue
return response.json()
# print(response.text)
def poststatus_running(token, experimentId):
""" """
auth_token = "Token " + token
url = f"http://127.0.0.1:8000/api/experiments/{experimentId}"
payload = {'status': 'RUNNING'}
files = [
]
headers = {
'Authorization': auth_token,
'Cookie': 'csrftoken=a6IwyqS4I5vjwcNAT5Tm70PuiK7AFjcDVPHbyZy3I189V7eX5iK2m0AwJQoYyVUb; sessionid=5g4v77efjv0r99nziiourrzqocruyasl'
}
response = requests.request(
"PATCH", url, headers=headers, data=payload, files=files)
# print(response.text)
# post_running()
def post_result(token, result):
""" """
auth_token = "Token " + token
url = "http://127.0.0.1:8000/api/results"
payload = result
# payload = json.dumps({
# "totalCounts": "50000",
# "numberOfDetectors": "4",
# "singlePhotonRate": "1500.00",
# "totalTime": "3",
# "experiment": "68c64b96-73ed-4184-9ac1-c2d3bab0e068",
# "experimentData": {
# "countratePerDetector": {
# "d1": "123",
# "d2": "123",
# "d3": "456",
# "d4": "123",
# "d5": "123",
# "d6": "456",
# "d7": "123",
# "d8": "123"
# },
# "encodedQubitMeasurements": {
# "c00": "0.123",
# "c10": "0.123",
# "c01": "0.56",
# "c11": "0.34"
# }
# }
# })
headers = {
'Authorization': auth_token,
'Content-Type': 'application/json',
'Cookie': 'csrftoken=<KEY>3I189V7eX5iK2m0AwJQoYyVUb; sessionid=5g4v77efjv0r99nziiourrzqocruyasl'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
# post_result()
def poststatus_done(token, experimentId):
""" """
auth_token = "Token " + token
url = f"http://127.0.0.1:8000/api/experiments/{experimentId}"
payload = {'status': 'DONE'}
files = [
]
headers = {
'Authorization': auth_token,
'Cookie': 'csrftoken=<KEY>; sessionid=5g4v77efjv0r99nziiourrzqocruyasl'
}
response = requests.request(
"PATCH", url, headers=headers, data=payload, files=files)
print(response.text)
def poststatus_failed(token, experimentId):
""" """
url = f"http://127.0.0.1:8000/api/experiments/{experimentId}"
payload = {'status': 'FAILED'}
files = [
]
headers = {
'Authorization': token,
'Cookie': 'csrftoken=<KEY>; sessionid=5g4v77efjv0r99nziiourrzqocruyasl'
}
response = requests.request(
"PATCH", url, headers=headers, data=payload, files=files)
print(response.text)
|
StarcoderdataPython
|
4873292
|
from ..core import Entity as L10nEntity, Structure
class LOL(Structure):
def add(self, element):
if element is not None:
self.add_element(element)
def add_element(self, element, pos=None):
"""
overwrite silme.core.L10nObject.add_element
"""
if element == None:
return 0
t = type(element).__name__[0]
if t == 's' or t == 'u': # s - str, u - unicode
return self.add_string(element, pos)
elif t == 'E': # E - Entity
return self.add_entity(element, pos)
elif t == 'C': # C - Comment
return self.add_comment(element, pos)
else:
self.append(element)
class WS():
def __init__(self, content):
self.content = content
class Group():
def __init__(self):
self.structure = []
def add(self, entry):
self.structure.append(entry)
class Entity(L10nEntity):
def __init__(self, id, value=None):
L10nEntity.__init__(self, id)
self.kvplist = []
if isinstance(value, unicode):
self._value['default'] = String(value)
else:
self._value['default'] = None
def get_value(self, fallback=None):
return self._value[self.default_code]
class Comment():
def __init__(self, content=None):
self.content = content
class Expression():
pass
class Index():
def __init__(self):
self.expression = None
class Hash():
def __init__(self):
self.key_value_pairs = {}
class Expander():
pass
class Macro():
def __init__(self):
self.structure=[]
class Operator(str):
pass
class KeyValuePair():
def __init__(self, key=None, value=None):
self.key = key
self.value = value
self.ws = []
def add(self, value):
if isinstance(self.value, list):
self.value.append(value)
elif self.value is not None:
self.value = [self.value, value]
else:
self.value = value
class OperatorExpression(list):
pass
class ConditionalExpression(OperatorExpression):
pass
class OrExpression(OperatorExpression):
pass
class AndExpression(OperatorExpression):
pass
class EqualityExpression(OperatorExpression):
pass
class RelationalExpression(OperatorExpression):
pass
class AdditiveExpression(OperatorExpression):
pass
class MultiplicativeExpression(OperatorExpression):
pass
class UnaryExpression(OperatorExpression):
pass
class BraceExpression(list):
pass
class MacroCall():
def __init__(self):
self.structure=[]
class Idref(list):
pass
|
StarcoderdataPython
|
6438488
|
from setuptools import setup, find_packages
setup(
packages=find_packages(exclude=['tests', 'tests.*']),
)
|
StarcoderdataPython
|
242116
|
# Copyright 2020 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""ESC Impact Simulator.
For evaluation of the impact of ESC protection into CBSD transmit power.
It uses the IAP reference implementation to assess how much power reduction is
required on each CBSD in the network.
Inputs:
- Some JSON FADs file defining the ESC networks.
- A CBSD deployment model. It can be either:
+ a JSON file defining the CBSD deployment model. For example one can use any
NTIA model that was generated for DPA neighborhood studies, found in:
https://github.com/Wireless-Innovation-Forum/Spectrum-Access-System/tree/master/data/research/deployment_models
+ The default NTIA nationwide deployment model (if no file specified): this
one is defined in the 2 CSV files NationWide_CatA, NationWide_CatB found
in the same data directory as above.
Output:
- A figure showing the impact of ESC protection on CBSDs power.
- A CSV file showing every CBSD with requested/obtained power.
Example Usage:
# Run the simulation for a list of grants, on all sensors of 2 ESC FADS.
python run_esc_impact.py --cbsd_file data/West14_reg_grant.json.zip \
--esc_fads esc1_fad.json,esc2_fad.json
# Run the simulation for a list of grants, on selected sensors of 2 ESC FADS:
# + only the sensors whose name contains `W12` and `W13` string.
# + also output one CSV per sensor containing all impacted CBSD.
python run_esc_impact.py --cbsd_file data/West14_reg_grant.json.zip \
--esc_fads esc1_fad.json,esc2_fad.json \
--sensors W12,W13 \
--output_csv
"""
import argparse
import json
import glob
import cartopy.crs as ccrs
import csv
import matplotlib.pyplot as plt
import numpy as np
import shapely.geometry as sgeo
import iap_patch
from deploy_model import deploy
from reference_models.iap import iap
from reference_models.geo import vincenty
from reference_models.tools import entities
from reference_models.tools import sim_utils
#----------------------------------------
# Setup the command line arguments
parser = argparse.ArgumentParser(description='ESC Impact Simulator')
# - Generic config.
parser.add_argument('--cbsd_file', type=str, default='',
help='CBSD deployment file (JSON).')
parser.add_argument('--esc_fads', type=str, default='',
help='The ESC FADs file (JSON) separated by a comma.')
parser.add_argument('--per_sensor', action='store_true',
help='If set, no ESC aggregation.')
parser.add_argument('--output_csv', action='store_true',
help='If set, output CSV per sensor of all impacted CBSD.')
parser.add_argument('--sectorized_catb', action='store_true',
help='If set, modelize CatB as multi sector.')
parser.set_defaults(sectorized_catb=False)
parser.add_argument('--sensors', type=str, default='',
help='Sensors to analyse (prefix).')
options = parser.parse_args()
#--------------------------------------------------
# The simulation
def esc_impact_sim(cbsd_reg_grant, esc_fads,
sensor_filters=None,
per_sensor_mode=False,
do_csv_output=False,
force_catb_omni=True):
"""ESC impact simulation.
Performs simulation on given input (CBSD and ESC), creates resulting plots
and output CSV file.
Args:
cbsd_reg_grant: The CBSD file (JSON) in 'reg_grant' format.
esc_fads: A list of ESC FAD data (JSON).
sensor_filters: List of string for filtering sensor (ex: W13, google, ..).
per_sensor_mode: If set, computes impact stats per sensor. No plots or CSV
output is done.
do_csv_output: If set, output
"""
# Read the grants:
if cbsd_reg_grant:
# .. from 'reg_grant' file
grants, _ = sim_utils.ReadTestHarnessConfigFile(cbsd_reg_grant)
else:
# .. from the default nation wide deployment model
print('Using NTIA NationWide deployment model')
cbsds = deploy.ReadNationWideDeploymentModel(force_omni=force_catb_omni)
grants = entities.ConvertToCbsdGrantInfo(cbsds, 3550, 3560)
# Reads the ESC sensors from all FADs.
sensors = []
for fad in esc_fads:
sensors.extend(json.load(open(fad))['recordData'])
# Filter the ESCs to simulate.
if sensor_filters:
filt_sensors = []
for sensor in sensors:
for token in sensor_filters:
if token in sensor['id']:
filt_sensors.append(sensor)
break
sensors = filt_sensors
if not sensors:
print('Simulation cancelled - No sensor name containing one of %s'
% sensor_filters)
return
print('ESCs included in the simulation:')
print([sensor['id'] for sensor in sensors])
# Run IAP simulation.
if not per_sensor_mode:
for sensor in sensors:
esc_allowed_interference = iap.performIapForEsc(sensor, grants, [])
impacted_grants = [grant for grant in grants
if max(grant.iap_eirp) - min(grant.iap_eirp) > 0]
print('Number of impacted CBSDs: %d' % len(impacted_grants))
else:
# Special mode for getting # impacted grants per sensor independently.
for sensor in sensors:
# Clear iap_eirp before simulation of each ESC
for grant in grants:
grant.iap_eirp.clear()
grant.iap_eirp.add(grant.max_eirp)
esc_allowed_interference = iap.performIapForEsc(sensor, grants, [])
impacted_grants = [grant for grant in grants
if max(grant.iap_eirp) - min(grant.iap_eirp) > 0]
print('Number of CBSDs impacted by %s: %d'
% (sensor['id'], len(impacted_grants)))
return
# Output the CSV.
if do_csv_output:
for sensor in sensors:
sensor_loc = (sensor['installationParam']['latitude'],
sensor['installationParam']['longitude'])
neighbor_grants = []
sensor_name = sensor['id'].split('/')
for idx, grant in enumerate(grants):
dist_km, _, _ = vincenty.GeodesicDistanceBearing(
grant.latitude, grant.longitude, sensor_loc[0], sensor_loc[1])
if ((grant.cbsd_category == 'A' and dist_km <= 40) or
(grant.cbsd_category == 'B' and dist_km <= 80)):
neighbor_grants.append(
[sensor_name[1], sensor_name[2], idx,
grant.cbsd_category, grant.indoor_deployment, grant.height_agl,
dist_km, grant.antenna_gain,
grant.max_eirp + 10,
min(grant.iap_eirp) + 10])
file_name = sensor_name[2] + '_neighbors.csv'
with open(file_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(
['ESC Network', 'ESC Sensor','CBSD ID','CBSD Category','Indoor CBSD',
'CBSD AGL','Distance to ESC (km)','CBSD Antenna Gain (dBi)',
'Max EIRP (dBm/10MHz)','Actual EIRP (dBm/10MHz)'])
writer.writerows(neighbor_grants)
# Retrieve the delta EIRP for plots and stats.
delta_eirp = []
for grant in grants:
delta_eirp.append(max(grant.iap_eirp) - min(grant.iap_eirp))
# Create figure with simple projection.
fig = plt.figure(figsize=(10,10))
subplot = 111
ax = fig.add_subplot(subplot, projection=ccrs.PlateCarree())
# Finds the bounding box (all CBSDs).
box_margin = 0.1 # about 10km
box = sgeo.box(*sgeo.MultiPoint(
[(grant.longitude, grant.latitude) for grant in grants]).bounds)
box = box.buffer(box_margin)
# Plot geometries.
ax.axis([box.bounds[0], box.bounds[2], box.bounds[1], box.bounds[3]])
ax.coastlines()
ax.stock_img()
# class1: no power reduction
class1_grants = [grant for grant in grants
if max(grant.iap_eirp) == min(grant.iap_eirp)]
class1_locations = ([grant.longitude for grant in class1_grants],
[grant.latitude for grant in class1_grants])
ax.scatter(*class1_locations, c='g', marker='1', s=50,
label='0 dB power reduction: %d' % len(class1_grants) )
# class2: less than 10 dB power reduction
class2_grants = [grant for grant in grants
if (max(grant.iap_eirp) > min(grant.iap_eirp)
and max(grant.iap_eirp)-min(grant.iap_eirp) < 10)]
class2_locations = ([grant.longitude for grant in class2_grants],
[grant.latitude for grant in class2_grants])
ax.scatter(*class2_locations, c='b', marker='1', s=50,
label='<10 dB power reduction: %d' % len(class2_grants) )
# class3: 10 dB or more power reduction
class3_grants = [grant for grant in grants
if max(grant.iap_eirp) - min(grant.iap_eirp) >= 10]
class3_locations = ([grant.longitude for grant in class3_grants],
[grant.latitude for grant in class3_grants])
ax.scatter(*class3_locations, c='r', marker='1', s=50,
label='>=10 dB power reduction: %d' % len(class3_grants) )
ax.legend(loc=0)
ax.set_title('ESC Protection')
# Print histogram of power reduction
power_reduction = [max(grant.iap_eirp) - min(grant.iap_eirp)
for grant in grants]
plt.figure()
plt.hist(power_reduction, bins=np.arange(0.1, 50, 1))
plt.xlabel('CBSD power reduction')
plt.ylabel('# of CBSDs')
plt.grid()
#----------------------------------------------------------------
# Script main runner
if __name__ == '__main__':
esc_fads = options.esc_fads.split(',')
sensor_filters = options.sensors.split(',')
esc_impact_sim(options.cbsd_file, esc_fads, sensor_filters,
options.per_sensor, options.output_csv,
force_catb_omni=not options.sectorized_catb)
plt.show(block=True)
|
StarcoderdataPython
|
1657694
|
from src.utils.utils import open_json_file, open_html_file, open_csv_file # noqa
|
StarcoderdataPython
|
3429048
|
import arcade
from constants import *
class MainHand(arcade.Sprite):
def __init__(self, filename, icon_image, player, stats, scale=HELMET_SCALE):
super().__init__(filename, scale)
self.filename = filename
self.icon_image = icon_image
self.player = player
self.stats = stats
def update(self):
if self.player.alive:
self.center_x = self.player.center_x
self.center_y = self.player.center_y
else:
self.kill()
class OffHand(arcade.Sprite):
def __init__(self, filename, icon_image, player, stats, scale=HELMET_SCALE):
super().__init__(filename, scale)
self.filename = filename
self.icon_image = icon_image
self.player = player
self.stats = stats
def update(self):
if self.player.alive:
self.center_x = self.player.center_x
self.center_y = self.player.center_y
else:
self.kill()
|
StarcoderdataPython
|
6583189
|
<reponame>vla3089/adventofcode<gh_stars>0
#!/usr/bin/env python
score = 0
nesting_level = 0
is_garbage_mode = False
garbage_count = 0
skip_next = False
def process_in_garbage_mode(c):
global score
global nesting_level
global is_garbage_mode
global skip_next
global garbage_count
if skip_next:
skip_next = False
else:
if c == '!':
skip_next = True
elif c == '>':
is_garbage_mode = False
else:
garbage_count += 1
def process_in_group_mode(c):
global score
global nesting_level
global is_garbage_mode
global skip_next
if c == '{':
nesting_level += 1
score += nesting_level
elif c == '}':
nesting_level -= 1
elif c == '<':
is_garbage_mode = True
def process_ch(c):
global is_garbage_mode
if is_garbage_mode:
process_in_garbage_mode(c)
else:
process_in_group_mode(c)
with open('input_9.txt', 'r') as f:
while True:
c = f.read(1)
if c:
process_ch(c)
else:
break
print score
print garbage_count
|
StarcoderdataPython
|
8176971
|
from setuptools import setup, find_packages
setup(
name='vtkrishn',
version='0.1.0',
description='Simple Project',
url='https://github.com/vtkrishn/pythonHelper',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
entry_points={
'console_scripts': [
'vtkrishn=vtkrishn:main',
],
},
)
|
StarcoderdataPython
|
1927922
|
<filename>features/steps/check_if_contains_schema_version.py
from behave import then
from pgmigrate import _is_initialized
@then("database contains schema_version")
@then('database contains schema_version in schema "{schema}"')
def step_impl(context, schema='public'):
cur = context.conn.cursor()
assert _is_initialized(schema, cur), 'Non-empty db should be initialized'
|
StarcoderdataPython
|
9678246
|
<gh_stars>0
import nextcord
from .constant import ARBITRE_ID, CIVFR_GUILD_ID
from util.exception import ALEDException
def is_arbitre(member : nextcord.Member, client=None):
if member.guild is None:
if client is None:
raise ALEDException("Client not given for checking if the member is Arbitre")
member = client.get_guild(CIVFR_GUILD_ID)
return ARBITRE_ID in [role.id for role in member.roles]
def is_civfr_guild_or_mp(channel):
return channel.guild.id == CIVFR_GUILD_ID or isinstance(channel, nextcord.DMChannel)
|
StarcoderdataPython
|
11275453
|
'''
Author: Jecosine
Date: 2021-01-22 05:43:45
LastEditTime: 2021-01-22 05:44:02
LastEditors: Jecosine
Description: Test
'''
s = """- Creational
- AbstractFactory
- Builder
- FactoryMethod
- Prototype
- Singleton
- Structural
- AdapterBridge
- Composite
- Decorator
- Facade
- Flyweight
- Proxy
- Behavioral
- ResponsibilityChain
- Command
- Interpreter
- Iterator
- Mediator
- Memento
- Observer
- State
- Strategy
- TemplateMethod
- Visitor"""
l = s.split("\n")
template = {"name": None, "children": []}
root = copy.deepcopy(template)
parent = root
current = []
for i in l:
parent["children"] = current
x = copy.deepcopy(template)
x["name"] = i.strip().split("-")[-1].strip()
if i[0] == " ":
current.append(x)
else:
root["children"].append(x)
parent = x
current = []
|
StarcoderdataPython
|
11242529
|
from .reqmethods import Req, Arrange
class Account(Req, Arrange):
slug = 'account'
def __init__(self, token):
self.setToken(token)
def account(self):
data = self.get(self.slug)
if 'id' in data:
return data
return self.arrangeData(data)
|
StarcoderdataPython
|
3344637
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, <NAME>'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network)
in Python.
"""
from __future__ import division
__author__ = "<NAME>'"
__version__ = "2.1.2"
version_info = tuple([int(num) for num in __version__.split('.')])
__all__ = [
# exceptions
"Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
# constants
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
# classes
"Process", "Popen",
# functions
"pid_exists", "pids", "process_iter", "wait_procs", # proc
"virtual_memory", "swap_memory", # memory
"cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
"net_io_counters", "net_connections", # network
"disk_io_counters", "disk_partitions", "disk_usage", # disk
"users", "boot_time", # others
]
import sys
import os
import time
import signal
import warnings
import errno
import subprocess
try:
import pwd
except ImportError:
pwd = None
from psutil._common import memoize
from psutil._compat import property, callable, long, defaultdict
from psutil._compat import (wraps as _wraps,
PY3 as _PY3)
from psutil._common import (deprecated_method as _deprecated_method,
deprecated as _deprecated,
sdiskio as _nt_sys_diskio,
snetio as _nt_sys_netio)
from psutil._common import (STATUS_RUNNING, # NOQA
STATUS_SLEEPING,
STATUS_DISK_SLEEP,
STATUS_STOPPED,
STATUS_TRACING_STOP,
STATUS_ZOMBIE,
STATUS_DEAD,
STATUS_WAKING,
STATUS_LOCKED,
STATUS_IDLE, # bsd
STATUS_WAITING, # bsd
STATUS_LOCKED) # bsd
from psutil._common import (CONN_ESTABLISHED,
CONN_SYN_SENT,
CONN_SYN_RECV,
CONN_FIN_WAIT1,
CONN_FIN_WAIT2,
CONN_TIME_WAIT,
CONN_CLOSE,
CONN_CLOSE_WAIT,
CONN_LAST_ACK,
CONN_LISTEN,
CONN_CLOSING,
CONN_NONE)
if sys.platform.startswith("linux"):
import psutil._pslinux as _psplatform
from psutil._pslinux import (phymem_buffers, # NOQA
cached_phymem)
from psutil._pslinux import (IOPRIO_CLASS_NONE, # NOQA
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE)
# Linux >= 2.6.36
if _psplatform.HAS_PRLIMIT:
from _psutil_linux import (RLIM_INFINITY, # NOQA
RLIMIT_AS,
RLIMIT_CORE,
RLIMIT_CPU,
RLIMIT_DATA,
RLIMIT_FSIZE,
RLIMIT_LOCKS,
RLIMIT_MEMLOCK,
RLIMIT_NOFILE,
RLIMIT_NPROC,
RLIMIT_RSS,
RLIMIT_STACK)
# Kinda ugly but considerably faster than using hasattr() and
# setattr() against the module object (we are at import time:
# speed matters).
import _psutil_linux
try:
RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
except AttributeError:
pass
try:
RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
except AttributeError:
pass
try:
RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
except AttributeError:
pass
try:
RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
except AttributeError:
pass
try:
RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
except AttributeError:
pass
del _psutil_linux
elif sys.platform.startswith("win32"):
import psutil._pswindows as _psplatform
from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS, # NOQA
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
from psutil._pswindows import CONN_DELETE_TCB # NOQA
elif sys.platform.startswith("darwin"):
import psutil._psosx as _psplatform
elif sys.platform.startswith("freebsd"):
import psutil._psbsd as _psplatform
elif sys.platform.startswith("sunos"):
import psutil._pssunos as _psplatform
from psutil._pssunos import (CONN_IDLE, # NOQA
CONN_BOUND)
else:
raise NotImplementedError('platform %s is not supported' % sys.platform)
__all__.extend(_psplatform.__extra__all__)
_TOTAL_PHYMEM = None
_POSIX = os.name == 'posix'
_WINDOWS = os.name == 'nt'
_timer = getattr(time, 'monotonic', time.time)
# =====================================================================
# --- exceptions
# =====================================================================
class Error(Exception):
"""Base exception class. All other psutil exceptions inherit
from this one.
"""
class NoSuchProcess(Error):
"""Exception raised when a process with a certain PID doesn't
or no longer exists (zombie).
"""
def __init__(self, pid, name=None, msg=None):
Error.__init__(self)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process no longer exists " + details
def __str__(self):
return self.msg
class AccessDenied(Error):
"""Exception raised when permission to perform an action is denied."""
def __init__(self, pid=None, name=None, msg=None):
Error.__init__(self)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if (pid is not None) and (name is not None):
self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg = "(pid=%s)" % self.pid
else:
self.msg = ""
def __str__(self):
return self.msg
class TimeoutExpired(Error):
"""Raised on Process.wait(timeout) if timeout expires and process
is still alive.
"""
def __init__(self, seconds, pid=None, name=None):
Error.__init__(self)
self.seconds = seconds
self.pid = pid
self.name = name
self.msg = "timeout after %s seconds" % seconds
if (pid is not None) and (name is not None):
self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg += " (pid=%s)" % self.pid
def __str__(self):
return self.msg
# push exception classes into platform specific module namespace
_psplatform.NoSuchProcess = NoSuchProcess
_psplatform.AccessDenied = AccessDenied
_psplatform.TimeoutExpired = TimeoutExpired
# =====================================================================
# --- Process class
# =====================================================================
def _assert_pid_not_reused(fun):
"""Decorator which raises NoSuchProcess in case a process is no
longer running or its PID has been reused.
"""
@_wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
return fun(self, *args, **kwargs)
return wrapper
class Process(object):
"""Represents an OS process with the given PID.
If PID is omitted current process PID (os.getpid()) is used.
Raise NoSuchProcess if PID does not exist.
Note that most of the methods of this class do not make sure
the PID of the process being queried has been reused over time.
That means you might end up retrieving an information referring
to another process in case the original one this instance
refers to is gone in the meantime.
The only exceptions for which process identity is pre-emptively
checked and guaranteed are:
- parent()
- children()
- nice() (set)
- ionice() (set)
- rlimit() (set)
- cpu_affinity (set)
- suspend()
- resume()
- send_signal()
- terminate()
- kill()
To prevent this problem for all other methods you can:
- use is_running() before querying the process
- if you're continuously iterating over a set of Process
instances use process_iter() which pre-emptively checks
process identity for every yielded instance
"""
def __init__(self, pid=None):
self._init(pid)
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError('pid must be an integer (got %r)' % pid)
if pid < 0:
raise ValueError('pid must be a positive integer (got %s)'
% pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# we should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = 'no process found with pid %s' % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
def __str__(self):
try:
pid = self.pid
name = repr(self.name())
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__,
self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
def __eq__(self, other):
# Test for equality with another Process object based
# on PID and creation time.
if not isinstance(other, Process):
return NotImplemented
return self._ident == other._ident
def __ne__(self, other):
return not self == other
def __hash__(self):
if self._hash is None:
self._hash = hash(self._ident)
return self._hash
# --- utility methods
def as_dict(self, attrs=[], ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If 'attrs' is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
'ad_value' is the value which gets assigned in case
AccessDenied exception is raised when retrieving that
particular process information.
"""
excluded_names = set(
['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
retdict = dict()
ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])
for name in ls:
if name.startswith('_'):
continue
if name.startswith('set_'):
continue
if name.startswith('get_'):
msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
name = name[4:]
if name in ls:
continue
if name == 'getcwd':
msg = "getcwd() is deprecated; use cwd() instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
name = 'cwd'
if name in ls:
continue
if name in excluded_names:
continue
try:
attr = getattr(self, name)
if callable(attr):
ret = attr()
else:
ret = attr
except AccessDenied:
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
def parent(self):
"""Return the parent process as a Process object pre-emptively
checking whether PID has been reused.
If no parent is known return None.
"""
ppid = self.ppid()
if ppid is not None:
try:
parent = Process(ppid)
if parent.create_time() <= self.create_time():
return parent
# ...else ppid has been reused by another process
except NoSuchProcess:
pass
def is_running(self):
"""Return whether this process is running.
It also checks if PID has been reused by another process in
which case return False.
"""
if self._gone:
return False
try:
# Checking if PID is alive is not enough as the PID might
# have been reused by another process: we also want to
# check process identity.
# Process identity / uniqueness over time is greanted by
# (PID + creation time) and that is verified in __eq__.
return self == Process(self.pid)
except NoSuchProcess:
self._gone = True
return False
# --- actual API
@property
def pid(self):
"""The process PID."""
return self._pid
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if _POSIX:
return self._proc.ppid()
else:
if self._ppid is None:
self._ppid = self._proc.ppid()
return self._ppid
def name(self):
"""The process name. The return value is cached after first call."""
if self._name is None:
name = self._proc.name()
if _POSIX and len(name) >= 15:
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
try:
cmdline = self.cmdline()
except AccessDenied:
pass
else:
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
self._proc._name = name
self._name = name
return self._name
def exe(self):
"""The process executable as an absolute path.
May also be an empty string.
The return value is cached after first call.
"""
def guess_it(fallback):
# try to guess exe from cmdline[0] in absence of a native
# exe representation
cmdline = self.cmdline()
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
exe = cmdline[0] # the possible exe
# Attempt to guess only in case of an absolute path.
# It is not safe otherwise as the process might have
# changed cwd.
if (os.path.isabs(exe)
and os.path.isfile(exe)
and os.access(exe, os.X_OK)):
return exe
if isinstance(fallback, AccessDenied):
raise fallback
return fallback
if self._exe is None:
try:
exe = self._proc.exe()
except AccessDenied:
err = sys.exc_info()[1]
return guess_it(fallback=err)
else:
if not exe:
# underlying implementation can legitimately return an
# empty string; if that's the case we don't want to
# raise AD while guessing from the cmdline
try:
exe = guess_it(fallback=exe)
except AccessDenied:
pass
self._exe = exe
return self._exe
def cmdline(self):
"""The command line this process has been called with."""
return self._proc.cmdline()
def status(self):
"""The process current status as a STATUS_* constant."""
return self._proc.status()
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if _POSIX:
if pwd is None:
# might happen if python was installed from sources
raise ImportError(
"requires pwd module shipped with standard python")
return pwd.getpwuid(self.uids().real).pw_name
else:
return self._proc.username()
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
The return value is cached after first call.
"""
if self._create_time is None:
self._create_time = self._proc.create_time()
return self._create_time
def cwd(self):
"""Process current working directory as an absolute path."""
return self._proc.cwd()
def nice(self, value=None):
"""Get or set process niceness (priority)."""
if value is None:
return self._proc.nice_get()
else:
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
self._proc.nice_set(value)
if _POSIX:
def uids(self):
"""Return process UIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.uids()
def gids(self):
"""Return process GIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.gids()
def terminal(self):
"""The terminal associated with this process, if any,
else None.
"""
return self._proc.terminal()
def num_fds(self):
"""Return the number of file descriptors opened by this
process (POSIX only).
"""
return self._proc.num_fds()
# Linux, BSD and Windows only
if hasattr(_psplatform.Process, "io_counters"):
def io_counters(self):
"""Return process I/O statistics as a
(read_count, write_count, read_bytes, write_bytes)
namedtuple.
Those are the number of read/write calls performed and the
amount of bytes read and written by the process.
"""
return self._proc.io_counters()
# Linux and Windows >= Vista only
if hasattr(_psplatform.Process, "ionice_get"):
def ionice(self, ioclass=None, value=None):
"""Get or set process I/O niceness (priority).
On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
'value' is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
On Windows only 'ioclass' is used and it can be set to 2
(normal), 1 (low) or 0 (very low).
Available on Linux and Windows > Vista only.
"""
if ioclass is None:
if value is not None:
raise ValueError("'ioclass' must be specified")
return self._proc.ionice_get()
else:
return self._proc.ionice_set(ioclass, value)
# Linux only
if hasattr(_psplatform.Process, "rlimit"):
def rlimit(self, resource, limits=None):
"""Get or set process resource limits as a (soft, hard)
tuple.
'resource' is one of the RLIMIT_* constants.
'limits' is supposed to be a (soft, hard) tuple.
See "man prlimit" for further info.
Available on Linux only.
"""
if limits is None:
return self._proc.rlimit(resource)
else:
return self._proc.rlimit(resource, limits)
# Windows and Linux only
if hasattr(_psplatform.Process, "cpu_affinity_get"):
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified 'cpus' must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
"""
if cpus is None:
return self._proc.cpu_affinity_get()
else:
self._proc.cpu_affinity_set(cpus)
if _WINDOWS:
def num_handles(self):
"""Return the number of handles opened by this process
(Windows only).
"""
return self._proc.num_handles()
def num_ctx_switches(self):
"""Return the number of voluntary and involuntary context
switches performed by this process.
"""
return self._proc.num_ctx_switches()
def num_threads(self):
"""Return the number of threads used by this process."""
return self._proc.num_threads()
def threads(self):
"""Return threads opened by process as a list of
(id, user_time, system_time) namedtuples representing
thread id and thread CPU times (user/system).
"""
return self._proc.threads()
@_assert_pid_not_reused
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If recursive is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
if hasattr(_psplatform, 'ppid_map'):
# Windows only: obtain a {pid:ppid, ...} dict for all running
# processes in one shot (faster).
ppid_map = _psplatform.ppid_map()
else:
ppid_map = None
ret = []
if not recursive:
if ppid_map is None:
# 'slow' version, common to all platforms except Windows
for p in process_iter():
try:
if p.ppid() == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= p.create_time():
ret.append(p)
except NoSuchProcess:
pass
else:
# Windows only (faster)
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except NoSuchProcess:
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = defaultdict(list)
if ppid_map is None:
for p in process_iter():
try:
table[p.ppid()].append(p)
except NoSuchProcess:
pass
else:
for pid, ppid in ppid_map.items():
try:
p = Process(pid)
table[ppid].append(p)
except NoSuchProcess:
pass
# At this point we have a mapping table where table[self.pid]
# are the current process' children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
except NoSuchProcess:
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret
def cpu_percent(self, interval=None):
"""Return a float representing the current process CPU
utilization as a percentage.
When interval is 0.0 or None (default) compares process times
to system CPU times elapsed since last call, returning
immediately (non-blocking). That means that the first time
this is called it will return a meaningful 0.0 value.
When interval is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
In this case is recommended for accuracy that this function
be called with at least 0.1 seconds between calls.
Examples:
>>> import psutil
>>> p = psutil.Process(os.getpid())
>>> # blocking
>>> p.cpu_percent(interval=1)
2.0
>>> # non-blocking (percentage since last call)
>>> p.cpu_percent(interval=None)
2.9
>>>
"""
blocking = interval is not None and interval > 0.0
num_cpus = cpu_count()
if _POSIX:
timer = lambda: _timer() * num_cpus
else:
timer = lambda: sum(cpu_times())
if blocking:
st1 = timer()
pt1 = self._proc.cpu_times()
time.sleep(interval)
st2 = timer()
pt2 = self._proc.cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = timer()
pt2 = self._proc.cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# The utilization split between all CPUs.
# Note: a percentage > 100 is legitimate as it can result
# from a process with multiple threads running on different
# CPU cores, see:
# http://stackoverflow.com/questions/1032357
# https://github.com/giampaolo/psutil/issues/474
overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
except ZeroDivisionError:
# interval was too low
return 0.0
else:
return round(overall_percent, 1)
def cpu_times(self):
"""Return a (user, system) namedtuple representing the
accumulated process time, in seconds.
This is the same as os.times() but per-process.
"""
return self._proc.cpu_times()
def memory_info(self):
"""Return a tuple representing RSS (Resident Set Size) and VMS
(Virtual Memory Size) in bytes.
On UNIX RSS and VMS are the same values shown by 'ps'.
On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
columns of taskmgr.exe.
"""
return self._proc.memory_info()
def memory_info_ex(self):
"""Return a namedtuple with variable fields depending on the
platform representing extended memory information about
this process. All numbers are expressed in bytes.
"""
return self._proc.memory_info_ex()
def memory_percent(self):
"""Compare physical system memory to process resident memory
(RSS) and calculate process memory utilization as a percentage.
"""
rss = self._proc.memory_info()[0]
# use cached value if available
total_phymem = _TOTAL_PHYMEM or virtual_memory().total
try:
return (rss / float(total_phymem)) * 100
except ZeroDivisionError:
return 0.0
def memory_maps(self, grouped=True):
"""Return process' mapped memory regions as a list of nameduples
whose fields are variable depending on the platform.
If 'grouped' is True the mapped regions with the same 'path'
are grouped together and the different memory fields are summed.
If 'grouped' is False every mapped region is shown as a single
entity and the namedtuple will also include the mapped region's
address space ('addr') and permission set ('perms').
"""
it = self._proc.memory_maps()
if grouped:
d = {}
for tupl in it:
path = tupl[2]
nums = tupl[3:]
try:
d[path] = map(lambda x, y: x + y, d[path], nums)
except KeyError:
d[path] = nums
nt = _psplatform.pmmap_grouped
return [nt(path, *d[path]) for path in d] # NOQA
else:
nt = _psplatform.pmmap_ext
return [nt(*x) for x in it]
def open_files(self):
"""Return files opened by process as a list of
(path, fd) namedtuples including the absolute file name
and file descriptor number.
"""
return self._proc.open_files()
def connections(self, kind='inet'):
"""Return connections opened by process as a list of
(fd, family, type, laddr, raddr, status) namedtuples.
The 'kind' parameter filters for connections that match the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
"""
return self._proc.connections(kind)
if _POSIX:
def _send_signal(self, sig):
try:
os.kill(self.pid, sig)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ESRCH:
self._gone = True
raise NoSuchProcess(self.pid, self._name)
if err.errno == errno.EPERM:
raise AccessDenied(self.pid, self._name)
raise
@_assert_pid_not_reused
def send_signal(self, sig):
"""Send a signal to process pre-emptively checking whether
PID has been reused (see signal module constants) .
On Windows only SIGTERM is valid and is treated as an alias
for kill().
"""
if _POSIX:
self._send_signal(sig)
else:
if sig == signal.SIGTERM:
self._proc.kill()
else:
raise ValueError("only SIGTERM is supported on Windows")
@_assert_pid_not_reused
def suspend(self):
"""Suspend process execution with SIGSTOP pre-emptively checking
whether PID has been reused.
On Windows this has the effect ot suspending all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGSTOP)
else:
self._proc.suspend()
@_assert_pid_not_reused
def resume(self):
"""Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGCONT)
else:
self._proc.resume()
@_assert_pid_not_reused
def terminate(self):
"""Terminate the process with SIGTERM pre-emptively checking
whether PID has been reused.
On Windows this is an alias for kill().
"""
if _POSIX:
self._send_signal(signal.SIGTERM)
else:
self._proc.kill()
@_assert_pid_not_reused
def kill(self):
"""Kill the current process with SIGKILL pre-emptively checking
whether PID has been reused.
"""
if _POSIX:
self._send_signal(signal.SIGKILL)
else:
self._proc.kill()
def wait(self, timeout=None):
"""Wait for process to terminate and, if process is a children
of os.getpid(), also return its exit code, else None.
If the process is already terminated immediately return None
instead of raising NoSuchProcess.
If timeout (in seconds) is specified and process is still alive
raise TimeoutExpired.
To wait for multiple Process(es) use psutil.wait_procs().
"""
if timeout is not None and not timeout >= 0:
raise ValueError("timeout must be a positive integer")
return self._proc.wait(timeout)
# --- deprecated APIs
_locals = set(locals())
@_deprecated_method(replacement='children')
def get_children(self):
pass
@_deprecated_method(replacement='connections')
def get_connections(self):
pass
if "cpu_affinity" in _locals:
@_deprecated_method(replacement='cpu_affinity')
def get_cpu_affinity(self):
pass
@_deprecated_method(replacement='cpu_affinity')
def set_cpu_affinity(self, cpus):
pass
@_deprecated_method(replacement='cpu_percent')
def get_cpu_percent(self):
pass
@_deprecated_method(replacement='cpu_times')
def get_cpu_times(self):
pass
@_deprecated_method(replacement='cwd')
def getcwd(self):
pass
@_deprecated_method(replacement='memory_info_ex')
def get_ext_memory_info(self):
pass
if "io_counters" in _locals:
@_deprecated_method(replacement='io_counters')
def get_io_counters(self):
pass
if "ionice" in _locals:
@_deprecated_method(replacement='ionice')
def get_ionice(self):
pass
@_deprecated_method(replacement='ionice')
def set_ionice(self, ioclass, value=None):
pass
@_deprecated_method(replacement='memory_info')
def get_memory_info(self):
pass
@_deprecated_method(replacement='memory_maps')
def get_memory_maps(self):
pass
@_deprecated_method(replacement='memory_percent')
def get_memory_percent(self):
pass
@_deprecated_method(replacement='nice')
def get_nice(self):
pass
@_deprecated_method(replacement='num_ctx_switches')
def get_num_ctx_switches(self):
pass
if 'num_fds' in _locals:
@_deprecated_method(replacement='num_fds')
def get_num_fds(self):
pass
if 'num_handles' in _locals:
@_deprecated_method(replacement='num_handles')
def get_num_handles(self):
pass
@_deprecated_method(replacement='num_threads')
def get_num_threads(self):
pass
@_deprecated_method(replacement='open_files')
def get_open_files(self):
pass
if "rlimit" in _locals:
@_deprecated_method(replacement='rlimit')
def get_rlimit(self):
pass
@_deprecated_method(replacement='rlimit')
def set_rlimit(self, resource, limits):
pass
@_deprecated_method(replacement='threads')
def get_threads(self):
pass
@_deprecated_method(replacement='nice')
def set_nice(self, value):
pass
del _locals
# =====================================================================
# --- Popen class
# =====================================================================
class Popen(Process):
"""A more convenient interface to stdlib subprocess module.
It starts a sub process and deals with it exactly as when using
subprocess.Popen class but in addition also provides all the
properties and methods of psutil.Process class as a unified
interface:
>>> import psutil
>>> from subprocess import PIPE
>>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
>>> p.name()
'python'
>>> p.uids()
user(real=1000, effective=1000, saved=1000)
>>> p.username()
'giampaolo'
>>> p.communicate()
('hi\n', None)
>>> p.terminate()
>>> p.wait(timeout=2)
0
>>>
For method names common to both classes such as kill(), terminate()
and wait(), psutil.Process implementation takes precedence.
Unlike subprocess.Popen this class pre-emptively checks wheter PID
has been reused on send_signal(), terminate() and kill() so that
you don't accidentally terminate another process, fixing
http://bugs.python.org/issue6973.
For a complete documentation refer to:
http://docs.python.org/library/subprocess.html
"""
def __init__(self, *args, **kwargs):
# Explicitly avoid to raise NoSuchProcess in case the process
# spawned by subprocess.Popen terminates too quickly, see:
# https://github.com/giampaolo/psutil/issues/193
self.__subproc = subprocess.Popen(*args, **kwargs)
self._init(self.__subproc.pid, _ignore_nsp=True)
def __dir__(self):
return sorted(set(dir(Popen) + dir(subprocess.Popen)))
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return object.__getattribute__(self.__subproc, name)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
% (self.__class__.__name__, name))
def wait(self, timeout=None):
if self.__subproc.returncode is not None:
return self.__subproc.returncode
ret = super(Popen, self).wait(timeout)
self.__subproc.returncode = ret
return ret
# =====================================================================
# --- system processes related functions
# =====================================================================
def pids():
"""Return a list of current running PIDs."""
return _psplatform.pids()
def pid_exists(pid):
"""Return True if given PID exists in the current process list.
This is faster than doing "pid in psutil.pids()" and
should be preferred.
"""
if pid < 0:
return False
elif pid == 0 and _POSIX:
# On POSIX we use os.kill() to determine PID existence.
# According to "man 2 kill" PID 0 has a special meaning
# though: it refers to <<every process in the process
# group of the calling process>> and that is not we want
# to do here.
return pid in pids()
else:
return _psplatform.pid_exists(pid)
_pmap = {}
def process_iter():
"""Return a generator yielding a Process instance for all
running processes.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
Cached Process instances are checked for identity so that you're
safe in case a PID has been reused by another process, in which
case the cached instance is updated.
The sorting order in which processes are yielded is based on
their PIDs.
"""
def add(pid):
proc = Process(pid)
_pmap[proc.pid] = proc
return proc
def remove(pid):
_pmap.pop(pid, None)
a = set(pids())
b = set(_pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
for pid, proc in sorted(list(_pmap.items()) +
list(dict.fromkeys(new_pids).items())):
try:
if proc is None: # new process
yield add(pid)
else:
# use is_running() to check whether PID has been reused by
# another process in which case yield a new Process instance
if proc.is_running():
yield proc
else:
yield add(pid)
except NoSuchProcess:
remove(pid)
except AccessDenied:
# Process creation time can't be determined hence there's
# no way to tell whether the pid of the cached process
# has been reused. Just return the cached version.
yield proc
def wait_procs(procs, timeout=None, callback=None):
"""Convenience function which waits for a list of processes to
terminate.
Return a (gone, alive) tuple indicating which processes
are gone and which ones are still alive.
The gone ones will have a new 'returncode' attribute indicating
process exit status (may be None).
'callback' is a function which gets called every time a process
terminates (a Process instance is passed as callback argument).
Function will return as soon as all processes terminate or when
timeout occurs.
Typical use case is:
- send SIGTERM to a list of processes
- give them some time to terminate
- send SIGKILL to those ones which are still alive
Example:
>>> def on_terminate(proc):
... print("process {} terminated".format(proc))
...
>>> for p in procs:
... p.terminate()
...
>>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
>>> for p in alive:
... p.kill()
"""
def check_gone(proc, timeout):
try:
returncode = proc.wait(timeout=timeout)
except TimeoutExpired:
pass
else:
if returncode is not None or not proc.is_running():
proc.returncode = returncode
gone.add(proc)
if callback is not None:
callback(proc)
if timeout is not None and not timeout >= 0:
msg = "timeout must be a positive integer, got %s" % timeout
raise ValueError(msg)
gone = set()
alive = set(procs)
if callback is not None and not callable(callback):
raise TypeError("callback %r is not a callable" % callable)
if timeout is not None:
deadline = _timer() + timeout
while alive:
if timeout is not None and timeout <= 0:
break
for proc in alive:
# Make sure that every complete iteration (all processes)
# will last max 1 sec.
# We do this because we don't want to wait too long on a
# single process: in case it terminates too late other
# processes may disappear in the meantime and their PID
# reused.
max_timeout = 1.0 / len(alive)
if timeout is not None:
timeout = min((deadline - _timer()), max_timeout)
if timeout <= 0:
break
check_gone(proc, timeout)
else:
check_gone(proc, max_timeout)
alive = alive - gone
if alive:
# Last attempt over processes survived so far.
# timeout == 0 won't make this function wait any further.
for proc in alive:
check_gone(proc, 0)
alive = alive - gone
return (list(gone), list(alive))
# =====================================================================
# --- CPU related functions
# =====================================================================
@memoize
def cpu_count(logical=True):
"""Return the number of logical CPUs in the system (same as
os.cpu_count() in Python 3.4).
If logical is False return the number of physical cores only
(hyper thread CPUs are excluded).
Return None if undetermined.
The return value is cached after first call.
If desired cache can be cleared like this:
>>> psutil.cpu_count.cache_clear()
"""
if logical:
return _psplatform.cpu_count_logical()
else:
return _psplatform.cpu_count_physical()
def cpu_times(percpu=False):
"""Return system-wide CPU times as a namedtuple.
Every CPU time represents the seconds the CPU has spent in the given mode.
The namedtuple's fields availability varies depending on the platform:
- user
- system
- idle
- nice (UNIX)
- iowait (Linux)
- irq (Linux, FreeBSD)
- softirq (Linux)
- steal (Linux >= 2.6.11)
- guest (Linux >= 2.6.24)
- guest_nice (Linux >= 3.2.0)
When percpu is True return a list of nameduples for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
if not percpu:
return _psplatform.cpu_times()
else:
return _psplatform.per_cpu_times()
_last_cpu_times = cpu_times()
_last_per_cpu_times = cpu_times(percpu=True)
def cpu_percent(interval=None, percpu=False):
"""Return a float representing the current system-wide CPU
utilization as a percentage.
When interval is > 0.0 compares system CPU times elapsed before
and after the interval (blocking).
When interval is 0.0 or None compares system CPU times elapsed
since last call or module import, returning immediately (non
blocking). That means the first time this is called it will
return a meaningless 0.0 value which you should ignore.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
When percpu is True returns a list of floats representing the
utilization as a percentage for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
Examples:
>>> # blocking, system-wide
>>> psutil.cpu_percent(interval=1)
2.0
>>>
>>> # blocking, per-cpu
>>> psutil.cpu_percent(interval=1, percpu=True)
[2.0, 1.0]
>>>
>>> # non-blocking (percentage since last call)
>>> psutil.cpu_percent(interval=None)
2.9
>>>
"""
global _last_cpu_times
global _last_per_cpu_times
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
t1_all = sum(t1)
t1_busy = t1_all - t1.idle
t2_all = sum(t2)
t2_busy = t2_all - t2.idle
# this usually indicates a float precision issue
if t2_busy <= t1_busy:
return 0.0
busy_delta = t2_busy - t1_busy
all_delta = t2_all - t1_all
busy_perc = (busy_delta / all_delta) * 100
return round(busy_perc, 1)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times
_last_cpu_times = cpu_times()
return calculate(t1, _last_cpu_times)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times
_last_per_cpu_times = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times):
ret.append(calculate(t1, t2))
return ret
# Use separate global vars for cpu_times_percent() so that it's
# independent from cpu_percent() and they can both be used within
# the same program.
_last_cpu_times_2 = _last_cpu_times
_last_per_cpu_times_2 = _last_per_cpu_times
def cpu_times_percent(interval=None, percpu=False):
"""Same as cpu_percent() but provides utilization percentages
for each specific CPU time as is returned by cpu_times().
For instance, on Linux we'll get:
>>> cpu_times_percent()
cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
>>>
interval and percpu arguments have the same meaning as in
cpu_percent().
"""
global _last_cpu_times_2
global _last_per_cpu_times_2
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
nums = []
all_delta = sum(t2) - sum(t1)
for field in t1._fields:
field_delta = getattr(t2, field) - getattr(t1, field)
try:
field_perc = (100 * field_delta) / all_delta
except ZeroDivisionError:
field_perc = 0.0
field_perc = round(field_perc, 1)
if _WINDOWS:
# XXX
# Work around:
# https://github.com/giampaolo/psutil/issues/392
# CPU times are always supposed to increase over time
# or at least remain the same and that's because time
# cannot go backwards.
# Surprisingly sometimes this might not be the case on
# Windows where 'system' CPU time can be smaller
# compared to the previous call, resulting in corrupted
# percentages (< 0 or > 100).
# I really don't know what to do about that except
# forcing the value to 0 or 100.
if field_perc > 100.0:
field_perc = 100.0
elif field_perc < 0.0:
field_perc = 0.0
nums.append(field_perc)
return _psplatform.scputimes(*nums)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times_2
_last_cpu_times_2 = cpu_times()
return calculate(t1, _last_cpu_times_2)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times_2
_last_per_cpu_times_2 = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times_2):
ret.append(calculate(t1, t2))
return ret
# =====================================================================
# --- system memory related functions
# =====================================================================
def virtual_memory():
"""Return statistics about system memory usage as a namedtuple
including the following fields, expressed in bytes:
- total:
total physical memory available.
- available:
the actual amount of available memory that can be given
instantly to processes that request more memory in bytes; this
is calculated by summing different memory values depending on
the platform (e.g. free + buffers + cached on Linux) and it is
supposed to be used to monitor actual memory usage in a cross
platform fashion.
- percent:
the percentage usage calculated as (total - available) / total * 100
- used:
memory used, calculated differently depending on the platform and
designed for informational purposes only:
OSX: active + inactive + wired
BSD: active + wired + cached
LINUX: total - free
- free:
memory not being used at all (zeroed) that is readily available;
note that this doesn't reflect the actual memory available
(use 'available' instead)
Platform-specific fields:
- active (UNIX):
memory currently in use or very recently used, and so it is in RAM.
- inactive (UNIX):
memory that is marked as not used.
- buffers (BSD, Linux):
cache for things like file system metadata.
- cached (BSD, OSX):
cache for various things.
- wired (OSX, BSD):
memory that is marked to always stay in RAM. It is never moved to disk.
- shared (BSD):
memory that may be simultaneously accessed by multiple processes.
The sum of 'used' and 'available' does not necessarily equal total.
On Windows 'available' and 'free' are the same.
"""
global _TOTAL_PHYMEM
ret = _psplatform.virtual_memory()
# cached for later use in Process.memory_percent()
_TOTAL_PHYMEM = ret.total
return ret
def swap_memory():
"""Return system swap memory statistics as a namedtuple including
the following fields:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0.
"""
return _psplatform.swap_memory()
# =====================================================================
# --- disks/paritions related functions
# =====================================================================
def disk_usage(path):
"""Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage.
"""
return _psplatform.disk_usage(path)
def disk_partitions(all=False):
"""Return mounted partitions as a list of
(device, mountpoint, fstype, opts) namedtuple.
'opts' field is a raw string separated by commas indicating mount
options which may vary depending on the platform.
If "all" parameter is False return physical devices only and ignore
all others.
"""
return _psplatform.disk_partitions(all)
def disk_io_counters(perdisk=False):
"""Return system disk I/O statistics as a namedtuple including
the following fields:
- read_count: number of reads
- write_count: number of writes
- read_bytes: number of bytes read
- write_bytes: number of bytes written
- read_time: time spent reading from disk (in milliseconds)
- write_time: time spent writing to disk (in milliseconds)
If perdisk is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedutuple
described above as the values.
On recent Windows versions 'diskperf -y' command may need to be
executed first otherwise this function won't find any disk.
"""
rawdict = _psplatform.disk_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any physical disk")
if perdisk:
for disk, fields in rawdict.items():
rawdict[disk] = _nt_sys_diskio(*fields)
return rawdict
else:
return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])
# =====================================================================
# --- network related functions
# =====================================================================
def net_io_counters(pernic=False):
"""Return network I/O statistics as a namedtuple including
the following fields:
- bytes_sent: number of bytes sent
- bytes_recv: number of bytes received
- packets_sent: number of packets sent
- packets_recv: number of packets received
- errin: total number of errors while receiving
- errout: total number of errors while sending
- dropin: total number of incoming packets which were dropped
- dropout: total number of outgoing packets which were dropped
(always 0 on OSX and BSD)
If pernic is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
"""
rawdict = _psplatform.net_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any network interface")
if pernic:
for nic, fields in rawdict.items():
rawdict[nic] = _nt_sys_netio(*fields)
return rawdict
else:
return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])
def net_connections(kind='inet'):
"""Return system-wide connections as a list of
(fd, family, type, laddr, raddr, status, pid) namedtuples.
In case of limited privileges 'fd' and 'pid' may be set to -1
and None respectively.
The 'kind' parameter filters for connections that fit the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
"""
return _psplatform.net_connections(kind)
# =====================================================================
# --- other system related functions
# =====================================================================
def boot_time():
"""Return the system boot time expressed in seconds since the epoch.
This is also available as psutil.BOOT_TIME.
"""
# Note: we are not caching this because it is subject to
# system clock updates.
return _psplatform.boot_time()
def users():
"""Return users currently connected on the system as a list of
namedtuples including the following fields.
- user: the name of the user
- terminal: the tty or pseudo-tty associated with the user, if any.
- host: the host name associated with the entry, if any.
- started: the creation time as a floating point number expressed in
seconds since the epoch.
"""
return _psplatform.users()
# =====================================================================
# --- deprecated functions
# =====================================================================
@_deprecated(replacement="psutil.pids()")
def get_pid_list():
return pids()
@_deprecated(replacement="list(process_iter())")
def get_process_list():
return list(process_iter())
@_deprecated(replacement="psutil.users()")
def get_users():
return users()
@_deprecated(replacement="psutil.virtual_memory()")
def phymem_usage():
"""Return the amount of total, used and free physical memory
on the system in bytes plus the percentage usage.
Deprecated; use psutil.virtual_memory() instead.
"""
return virtual_memory()
@_deprecated(replacement="psutil.swap_memory()")
def virtmem_usage():
return swap_memory()
@_deprecated(replacement="psutil.phymem_usage().free")
def avail_phymem():
return phymem_usage().free
@_deprecated(replacement="psutil.phymem_usage().used")
def used_phymem():
return phymem_usage().used
@_deprecated(replacement="psutil.virtmem_usage().total")
def total_virtmem():
return virtmem_usage().total
@_deprecated(replacement="psutil.virtmem_usage().used")
def used_virtmem():
return virtmem_usage().used
@_deprecated(replacement="psutil.virtmem_usage().free")
def avail_virtmem():
return virtmem_usage().free
@_deprecated(replacement="psutil.net_io_counters()")
def network_io_counters(pernic=False):
return net_io_counters(pernic)
def test():
"""List info of all currently running processes emulating ps aux
output.
"""
import datetime
from psutil._compat import print_
today_day = datetime.date.today()
templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s %s"
attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
'create_time', 'memory_info']
if _POSIX:
attrs.append('uids')
attrs.append('terminal')
print_(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
"START", "TIME", "COMMAND"))
for p in process_iter():
try:
pinfo = p.as_dict(attrs, ad_value='')
except NoSuchProcess:
pass
else:
if pinfo['create_time']:
ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
if ctime.date() == today_day:
ctime = ctime.strftime("%H:%M")
else:
ctime = ctime.strftime("%b%d")
else:
ctime = ''
cputime = time.strftime("%M:%S",
time.localtime(sum(pinfo['cpu_times'])))
try:
user = p.username()
except KeyError:
if _POSIX:
if pinfo['uids']:
user = str(pinfo['uids'].real)
else:
user = ''
else:
raise
except Error:
user = ''
if _WINDOWS and '\\' in user:
user = user.split('\\')[1]
vms = pinfo['memory_info'] and \
int(pinfo['memory_info'].vms / 1024) or '?'
rss = pinfo['memory_info'] and \
int(pinfo['memory_info'].rss / 1024) or '?'
memp = pinfo['memory_percent'] and \
round(pinfo['memory_percent'], 1) or '?'
print_(templ % (user[:10],
pinfo['pid'],
pinfo['cpu_percent'],
memp,
vms,
rss,
pinfo.get('terminal', '') or '?',
ctime,
cputime,
pinfo['name'].strip() or '?'))
def _replace_module():
"""Dirty hack to replace the module object in order to access
deprecated module constants, see:
http://www.dr-josiah.com/2013/12/properties-on-python-modules.html
"""
class ModuleWrapper(object):
def __repr__(self):
return repr(self._module)
__str__ = __repr__
@property
def NUM_CPUS(self):
msg = "NUM_CPUS constant is deprecated; use cpu_count() instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return cpu_count()
@property
def BOOT_TIME(self):
msg = "BOOT_TIME constant is deprecated; use boot_time() instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return boot_time()
@property
def TOTAL_PHYMEM(self):
msg = "TOTAL_PHYMEM constant is deprecated; " \
"use virtual_memory().total instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return virtual_memory().total
mod = ModuleWrapper()
mod.__dict__ = globals()
mod._module = sys.modules[__name__]
sys.modules[__name__] = mod
_replace_module()
del property, memoize, division, _replace_module
if sys.version_info < (3, 0):
del num
if __name__ == "__main__":
test()
|
StarcoderdataPython
|
1795123
|
<reponame>icemac/icemac.ab.importer<filename>src/icemac/ab/importer/browser/wizard/tests/test_reader.py
from icemac.addressbook.interfaces import IPhoneNumber
import zope.component.hooks
def test_reader__ReaderSettings__1(address_book, browser, ImportFileFactory):
"""It has an empty import file readers list on empty import file."""
ImportFileFactory(address_book, u'empty.csv', [''])
browser.login('mgr')
browser.open(browser.IMPORTER_FILE_IMPORT_URL)
# There is no import file reader, as the CSV reader expects a first line
# containing the field names:
assert [] == browser.getControl('Import file format').displayOptions
def test_reader__ReaderSettings__2(address_book, browser, ImportFileFactory):
"""It allows to import multiple addresses and numbers."""
ImportFileFactory(address_book, u'multi.csv', [
'last_name,1st_phone,2nd_phone,3rd_phone',
'One,112,110,108',
'Two,,116116,',
'Three,1111,,2222',
'Four,,,'])
browser.login('mgr')
browser.open(browser.IMPORTER_FILE_IMPORT_URL)
assert '1' == browser.getControl('person sub-entities').value
browser.getControl('person sub-entities').value = '3'
browser.getControl('Next').click()
assert browser.IMPORTER_IMPORT_MAP_URL == browser.url
browser.getControl('last name').displayValue = [
'last_name (One, Two, Three)']
browser.getControl('number', index=0).displayValue = [
'1st_phone (112, 1111)']
browser.getControl('number', index=1).displayValue = [
'2nd_phone (110, 116116)']
browser.getControl('number', index=2).displayValue = [
'3rd_phone (108, 2222)']
browser.getControl('Next').click()
# The review step shows the import result, the first phone number is the
# default one:
assert browser.IMPORTER_IMPORT_REVIEW_URL == browser.url
assert [
'first name',
'last name',
'birth date',
'keywords',
'notes',
'address prefix',
'street',
'city',
'zip',
'country',
'address prefix',
'street',
'city',
'zip',
'country',
'address prefix',
'street',
'city',
'zip',
'country',
'number',
'number',
'number',
'e-mail address',
'e-mail address',
'e-mail address',
'URL',
'URL',
'URL',
'One',
'DE',
'112',
'110',
'108',
'Two',
'DE',
'116116',
'Three',
'DE',
'1111',
'2222',
'Four',
'DE'] == browser.etree.xpath('//th/text() | //td//text()')
# After completing the import the imported data can be inspected:
browser.getControl('Next').click()
assert browser.IMPORTER_IMPORT_COMPLETE_URL == browser.url
browser.getControl('Complete').click()
assert browser.IMPORTER_OVERVIEW_URL == browser.url
zope.component.hooks.setSite(address_book)
# The first phone number is the main one and it is displayed as the main
# phone number in the form below, too:
p1 = address_book['Person']
assert u'One' == p1.get_name()
assert '112' == p1.default_phone_number.number
assert (['112', '110', '108'] ==
[x.number for x in p1.values() if IPhoneNumber.providedBy(x)])
# When there is no first one an empty main number is created:
p2 = address_book['Person-2']
assert u'Two' == p2.get_name()
assert None is p2.default_phone_number.number
assert ([None, '116116'] ==
[x.number for x in p2.values() if IPhoneNumber.providedBy(x)])
# Empty non-main phone numbers are not created, so the missing 2nd number
# is left out and the third one gets the one and only non-main phone
# number:
p3 = address_book['Person-3']
assert u'Three' == p3.get_name()
assert '1111' == p3.default_phone_number.number
assert (['1111', '2222'] ==
[x.number for x in p3.values() if IPhoneNumber.providedBy(x)])
# But there must be a default number which is always created:
p4 = address_book['Person-4']
assert u'Four' == p4.get_name()
assert None is p4.default_phone_number.number
assert ([None] ==
[x.number for x in p4.values() if IPhoneNumber.providedBy(x)])
|
StarcoderdataPython
|
6427710
|
from pprint import pprint
points = [
(10, 41, 23),
(22, 30, 29),
(11, 42, 5),
(20, 32, 4),
(12, 40, 12),
(21, 36, 23)
]
print('points', points)
|
StarcoderdataPython
|
5040953
|
<reponame>BDI-ENIB/hermin-I<gh_stars>1-10
#import python Module
import os
import os.path
import csv
import time
import shutil
#import Dolmen Module
import Windows
import Widgets
import Graph
import Config
import Sensors
import Error
count=0 # current line in CSV
state_communication=False #if True => start decoding
def currentTime(): # return the current time
return str(time.strftime('%H.' '%M.' '%S'))
def currentDate(): # return the current date
return str(time.strftime('%Y_' '%m_' '%d_'))
def decodingCSV(): #open and decoding CSV file
global count
#import csv
filename = open(Config.CSV, 'r', encoding='latin1')
reader = csv.reader(filename, delimiter=';')
lines = [line for line in reader] #import list of CSV lines
filename.close()
# add and find the CSV line to decode
if lines==[] and count==0: #if CSV is empty
return
if (len(lines)>count):
if lines[count]==['stop']:
state_set_communication(Config.start_button,Config.stop_button,False,Config.currentMode,Config.figure.file,False)
#find time
for j in range(0,len(lines[count])-1):
if str(lines[count][j])=='0' and str(lines[count][j+1])=='time (s)': # time sensor detected
if str(lines[count][j+2])!="": # if time data not empty
for sensor in Config.sensors_list_set_time:
sensor.x.append(float(lines[count][j+2]))
#decoding current line for each sensors
for sensor in Config.sensors:
sensor.decoding(lines[count])
#verifing if all sensors has been treated
for sensor in Config.sensors:
sensor.verifing()
#update line
count+=1
else:
return
def fileExist(fileToTest): # detect if file in folder exist
try:
with open(fileToTest,'r',encoding='latin1') as filename:
return True #file exist
except: #no file
Config.Log.InfoSaveLog("warning",str(fileToTest + "not exist"))
return False # return error file no found
def updateOffline(i,start_button,stop_button): # decoding csv and updating graph in Offline mode
global state_communication
if(state_communication==True and fileExist(Config.figure.file)==True): #if you click on the start button of fire_interface windows and csv name exist
decodingCSV() # decoding csv
if(state_communication==True and fileExist(Config.figure.file)==True):
for sensor in Config.sensors:
sensor.graph.animate()# update graph
else : #if no CSV file
if fileExist(Config.figure.file)==False :
Windows.messageShowwarning("Open Filename", "Corrupted CSV file or not found")
Config.Log.InfoSaveLog("info",'Corrupted CSV file or not found')
def updateOnline(i,start_button,stop_button): # decoding csv and updating graph in Online mode
global state_communication
if(state_communication==True ): #if you click on the start button of fire_interface windows and csv name exist
decodingCSV() # decoding csv
for sensor in Config.sensors:
sensor.graph.animate()# update graph
else :#if no CSV file
if fileExist(Config.figure.file)==False :
Windows.messageShowwarning("Open Filename", "Corrupted CSV file or not found")
Config.Log.InfoSaveLog("info",'Corrupted CSV file or not found')
def initFigure(): #init and reset graph
global count,state_communication
#disable communication
state_communication = False
#reset current line in CSV
count=0
#reset CSV file
os.remove(Config.CSV)
filename = open(Config.CSV, 'w')
filename.close()
#init each graphe
for sensor in Config.sensors:
sensor.graph.initGraph()
def report_Function(): #report generation
global state_communication,save
if (state_communication == False): # if decoding is stopped
#check if is save folder exist
if not os.path.exists(Config.SAVE_REPORT_FOLDER):
Config.Log.InfoSaveLog("error",str(Config.SAVE_REPORT_FOLDER + " folder not found => creation"))
os.makedirs(Config.SAVE_REPORT_FOLDER)
if not os.path.exists(Config.SAVE_REPORT_FOLDER + '/'+ str(Config.NAME_SAVE_FOLDER)):
Config.Log.InfoSaveLog("error",str(Config.NAME_SAVE_FOLDER + " folder not found => creation"))
os.makedirs(Config.SAVE_REPORT_FOLDER + '/'+ str(Config.NAME_SAVE_FOLDER))
#save report
shutil.copy(Config.figure.file,Config.SAVE_REPORT_FOLDER + '/'+ str(Config.NAME_SAVE_FOLDER) + '/' )
shutil.copy(Config.CSV,Config.SAVE_REPORT_FOLDER + '/'+ str(Config.NAME_SAVE_FOLDER) + '/' ) #copy csv file in save report folder
Config.figure.saveFig(Config.SAVE_REPORT_FOLDER,Config.NAME_SAVE_FOLDER,Config.NAME_SAVE_FIGURE) #save figure in save report folder
Windows.messageShowinfo("Report generation","Report generation successfully created.")
#print and save in log
Config.Log.InfoSaveLog("info",str("report generation in " + Config.SAVE_REPORT_FOLDER + "/" + Config.NAME_SAVE_FOLDER))
print("report generation in " + Config.SAVE_REPORT_FOLDER + "/" + Config.NAME_SAVE_FOLDER)
else : # if decoding is not stopped
#print and save in log
Windows.messageShowwarning("Report generation warning","Warning : please stop the data receive before to generate the report.")
Config.Log.InfoSaveLog("info",str("trying to generate report before stopping decoding"))
def state_set_communication(start_button,stop_button,state,currentMode,frame,askToStop): # mode => true :ask to and start stop decoding, False : start and stop with no ask
global state_communication
#if the user click on the start button
if(state==True):
#initFigure()
#enable decoding
state_communication=True
Config.Log.InfoSaveLog("info",'start decoding')
#communication with c++
config=open(Config.CONFIG_TXT, "w")
config.write("true")
config.write("\n")
config.write(str(currentMode))
config.write("\n")
config.write(frame)
config.close()
#set button start ans stop state
#disable start button
start_button.disable()
#enable stop button
stop_button.enable()
#if the user click on the stop button
elif(state==False):
#check if ask stop decoding if true ask confirmation
if askToStop== True:
if (Windows.messageAskyesno("End of data receive", "Do you want to stop the data receive ?")):
Config.Log.InfoSaveLog("info",'stop decoding')
#enable stop button
start_button.disable()
#disable stop button
stop_button.disable()
print("stop decoding")
#disable decoding
state_communication=False
#communication with c++
config=open(Config.CONFIG_TXT, "w")
config.write("false")
config.write("\n")
config.write(str(currentMode))
if askToStop == True:
Windows.messageShowinfo("","Don't forget to gererate\nreport if you want")
if askToStop==False :
print("end CSV file")
Config.Log.InfoSaveLog("info","end CSV file")
#disable start button
start_button.disable()
#disable stop button
stop_button.disable()
Windows.messageShowinfo("","End CSV file\nDon't forget to gererate\nreport if you want")
def add_sensor_save_Function(add_sensor_interface,sensor_add_name):
save_condition = True
if(sensor_add_name.getEntry()==""): #if no name given
Config.Log.InfoSaveLog("warning",'add new sensor : no name given')
Windows.messageShowerror("Name error","Please enter the sensor's name")
save_condition=False
#check that the sensor does not already exist
if(os.path.isfile(sensor_add_name.getEntry().lower() + ".hpp") or os.path.isfile(sensor_add_name.getEntry().lower() + ".cpp")):
Config.Log.InfoSaveLog("warning",str('sensor name ' + sensor_add_name.getEntry().lower() +' already exit'))
Windows.messageShowerror("Name error",sensor_add_name.getEntry().lower() + " sensor already exist. Please change the sensor's name")
save_condition=False
#if no error, create the hpp and the cpp
if(save_condition==True):
# Creation of sensor.hpp
hpp = open(sensor_add_name.getEntry().lower() + ".hpp", "x")
hpp.write("#ifndef DOLMEN_"+ sensor_add_name.getEntry().upper() +"_HPP")
hpp.write("\n#define DOLMEN_" + sensor_add_name.getEntry().upper() +"_HPP 1")
hpp.write("\n")
hpp.write("\n#include <string>")
hpp.write('\n#include "sensor.hpp"')
hpp.write("\n")
hpp.write("\nnamespace dolmen")
hpp.write("\n{")
hpp.write("\n class " + sensor_add_name.getEntry().capitalize() + " : public Sensor")
hpp.write("\n {")
hpp.write("\n public :")
hpp.write("\n " + sensor_add_name.getEntry().capitalize() + " (int id, std::string name);")
hpp.write("\n")
hpp.write("\n void decoding(const std::string data) override;")
hpp.write("\n")
hpp.write("\n std::string getColumnIdentifiers() override")
hpp.write("\n {")
hpp.write("\n return " + '"' + sensor_add_name.getEntry().capitalize() + '"'+ ";")
hpp.write("\n }")
hpp.write("\n")
hpp.write("\n int getNbAttr() override")
hpp.write("\n {")
hpp.write("\n return 3;")
hpp.write("\n }")
hpp.write("\n };")
hpp.write("\n}")
hpp.write("\n")
hpp.write("\n#endif")
hpp.close()
# Creation of sensor.cpp
cpp = open(sensor_add_name.getEntry().lower() + ".cpp", "x")
cpp.write('#include "' + sensor_add_name.getEntry().lower() + '.hpp"')
cpp.write("\n")
cpp.write("\nnamespace dolmen")
cpp.write("\n{")
cpp.write("\n " + sensor_add_name.getEntry().capitalize() + "::" + sensor_add_name.getEntry().capitalize() + " (int id, std::string name):Sensor{id,name}{}")
cpp.write("\n")
cpp.write("\n void " + sensor_add_name.getEntry().capitalize() +"::decoding(const std::string data)")
cpp.write("\n {")
cpp.write("\n //insert here the decoding method of your sensor, you can check others sensors to see how we created the previous ones")
cpp.write("\n }")
cpp.write("\n} /* dolmen */")
cpp.close()
#print save in log and return
Config.Log.InfoSaveLog("info",str(' sensor ' + str(sensor_add_name.getEntry()).lower() +" generated"))
Windows.messageShowinfo("Sensor generation",sensor_add_name.getEntry().lower() + """ sensor generation successfully created. Do not forget to complete the decoding function of this class in the c++ code.
\nfor the python code add a sensor in Config.py and eventually create a graph to display it""")
#return in sensors management windows
Config.sensors_management_Function(add_sensor_interface)
|
StarcoderdataPython
|
239419
|
<filename>ServidorSenha/ServidorSenha.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
"""
Servidor de Senha
Módulo com classe senha.
"""
from pymongo import MongoClient
from hmac import compare_digest as comparador
import crypt
from xmlrpc.server import SimpleXMLRPCServer
import xmlrpc.client
import socket
class ServidorSenha(object):
"""docstring for ServidorSenha."""
def __init__(self):
#ip e porta do banco de dados com senha
self.client= MongoClient('localhost',27017)
#banco de dados da aplicação
self.db=self.client.zeus
#colecao onde as senhas estão armazenadas
self.colecao=self.db.senhas
self.on="online"
def get_colecao():
return self.senha_auth
def login(self,usuario,senha):
"""Valida nome e usuário para login"""
dados=self.colecao.find_one({"login":usuario})
print("chegou aqui")
if(dados is None):
print("Usuário não encontrado")
return False,False
else:
senha=crypt.crypt(senha,dados['senha'])
if(comparador(senha,dados['senha'])):
print("usuário logado")
return True,dados['root']
else:
#senha ou usuário invalido
print("erro no login")
return False,False
#comparador(dados.senha)
def novo_usuario(self,usuario,senha,root):
"""Cria e insere um usuário no banco"""
if("True" in root):
estado=1
else:
estado=""
dados={"login":usuario,"senha":crypt.crypt(senha),"root":bool(estado)}
try:
self.colecao.insert_one(dados)
#sucesso ao criar um usuário.
return True
except Exception as e:
print("erro ao criar o usuário")
print(e)
return False
def on(self):
return "Estou acessível"
#porta onde o mesmo está sendo executado
porta=8000
#instancia a classe de senha
#SenhaAuth= ServidorSenha()
#Chama de procedimendo remoto RPC
#ip="172.18.1.34"
ip="127.0.0.1"
server= SimpleXMLRPCServer((ip,porta))
print("Servidor de senha executando na porta",porta)
print("O ip definido está como: ",ip)
#registra a instância do Servidor de senha para RPC
server.register_instance(ServidorSenha())
#deixa o servidor de senha em loop
server.serve_forever()
# #senha.login("magno","<PASSWORD>")
# senha.cria_usuario("zeus","4<PASSWORD>","1")
# #senha.valida_senha("zeus","<PASSWORD>")
|
StarcoderdataPython
|
1997433
|
<filename>tests/components/rfxtrx/test_init.py
"""The tests for the Rfxtrx component."""
from unittest.mock import call
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.components.rfxtrx.const import EVENT_RFXTRX_EVENT
from homeassistant.core import callback
from homeassistant.helpers.device_registry import (
DeviceRegistry,
async_get_registry as async_get_device_registry,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
async def test_valid_config(hass):
"""Test configuration."""
assert await async_setup_component(
hass,
"rfxtrx",
{
"rfxtrx": {
"device": "/dev/serial/by-id/usb"
+ "-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
}
},
)
async def test_valid_config2(hass):
"""Test configuration."""
assert await async_setup_component(
hass,
"rfxtrx",
{
"rfxtrx": {
"device": "/dev/serial/by-id/usb"
+ "-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
"debug": True,
}
},
)
async def test_invalid_config(hass):
"""Test configuration."""
assert not await async_setup_component(hass, "rfxtrx", {"rfxtrx": {}})
assert not await async_setup_component(
hass,
"rfxtrx",
{
"rfxtrx": {
"device": "/dev/serial/by-id/usb"
+ "-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
"invalid_key": True,
}
},
)
async def test_fire_event(hass, rfxtrx):
"""Test fire event."""
entry_data = create_rfx_test_cfg(
device="/dev/serial/by-id/usb-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
automatic_add=True,
devices={
"0b1100cd0213c7f210010f51": {"fire_event": True},
"0716000100900970": {"fire_event": True},
},
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
device_registry: DeviceRegistry = await async_get_device_registry(hass)
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
assert event.event_type == "rfxtrx_event"
calls.append(event.data)
hass.bus.async_listen(EVENT_RFXTRX_EVENT, record_event)
await rfxtrx.signal("0b1100cd0213c7f210010f51")
await rfxtrx.signal("0716000100900970")
device_id_1 = device_registry.async_get_device(
identifiers={("rfxtrx", "11", "0", "213c7f2:16")}
)
assert device_id_1
device_id_2 = device_registry.async_get_device(
identifiers={("rfxtrx", "16", "0", "00:90")}
)
assert device_id_2
assert calls == [
{
"packet_type": 17,
"sub_type": 0,
"type_string": "AC",
"id_string": "213c7f2:16",
"data": "0b1100cd0213c7f210010f51",
"values": {"Command": "On", "Rssi numeric": 5},
"device_id": device_id_1.id,
},
{
"packet_type": 22,
"sub_type": 0,
"type_string": "Byron SX",
"id_string": "00:90",
"data": "0716000100900970",
"values": {"Command": "Chime", "Rssi numeric": 7, "Sound": 9},
"device_id": device_id_2.id,
},
]
async def test_send(hass, rfxtrx):
"""Test configuration."""
entry_data = create_rfx_test_cfg(device="/dev/null", devices={})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.services.async_call(
"rfxtrx", "send", {"event": "0a520802060101ff0f0269"}, blocking=True
)
assert rfxtrx.transport.send.mock_calls == [
call(bytearray(b"\x0a\x52\x08\x02\x06\x01\x01\xff\x0f\x02\x69"))
]
|
StarcoderdataPython
|
6512284
|
# <NAME>'s Solution:
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums or len(nums) == 0:
return 0
nums.sort()
sum = 0
for i in range(0,len(nums)-1,2):
sum += min(nums[i],nums[i+1])
return sum
# Xidong's Solution:
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
return sum(nums[0::2])
|
StarcoderdataPython
|
11328733
|
<filename>share_file/__init__.py
# -*- coding: utf-8 -*-
import threading
from os import path
from mimetypes import types_map
from urllib import request
from urllib.error import URLError
from urllib.parse import quote
from fman import (
DirectoryPaneCommand,
show_alert,
show_status_message,
clear_status_message,
YES,
NO,
)
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
template = "ShareFile\n\n{0}\n\n{1}"
class ShareFile(DirectoryPaneCommand):
def __call__(self):
filepath = self.pane.get_file_under_cursor()
if not filepath:
return
filepath = filepath.replace('file://', '')
filename = path.basename(filepath)
if not path.isfile(filepath):
msg = "ShareFile: '{0}' is not a file, please select a file.".format(filename)
show_status_message(msg, timeout_secs=3)
return
msg = template.format("Share file '{0}'?".format(filename), "")
response = show_alert(msg, buttons=NO|YES, default_button=YES)
if response == YES:
# upload in separate thread so we don't block UI, especially when
# uploading large (MB) files.
t = UploadThread(1, filepath, filename)
t.start()
class UploadThread(threading.Thread):
def __init__(self, tid, filepath, filename):
threading.Thread.__init__(self)
self.thread_id = tid
self.filepath = filepath
self.filename = quote(filename)
def run(self):
_, ext = path.splitext(self.filename)
try:
mime_type = types_map[ext]
except KeyError:
mime_type = "application/octet-stream"
with open(self.filepath, 'rb') as f:
data = f.read()
headers = {
"Accept" : "*/*",
"Accept-Encoding" : "gzip,deflate",
"Accept-Language" : "en-US,en;q=0.8",
"Connection" : "keep-alive",
"Content-Length" : len(data),
"Content-Type" : mime_type,
"Host" : "transfer.sh",
"Origin" : "https://transfer.sh",
"Referer" : "https://transfer.sh/",
"User-Agent" : "Mozilla/5.0",
"X_FILENAME" : self.filename
}
url = "https://transfer.sh/" + self.filename
req = request.Request(url, data=data, headers=headers, method="PUT")
try:
show_status_message("ShareFile: uploading file...")
with request.urlopen(req, timeout=10) as resp:
if resp.status == 200:
body = resp.read()
share_link = body.decode('utf-8').strip('\n')
msg = template.format(share_link, "")
else:
msg = template.format("Could not upload file",
str(resp.status) + " " + resp.reason)
clear_status_message()
show_alert(msg)
except URLError as e:
msg = template.format(e.reason, "")
show_alert(msg)
|
StarcoderdataPython
|
11293852
|
"""
Framework-specific classes/functions/objects that rely on the `ply` framework.
"""
from . import lex, yacc
import traceback
__all__ = [
"lex",
"yacc",
"start_console",
]
def _check_lexer_interface(x):
if not hasattr(x, 'token'):
return False
if not hasattr(x.token, '__call__'):
return False
if not hasattr(x, 'input'):
return False
if not hasattr(x.input, '__call__'):
return False
return True
def _check_parser_interface(x):
return hasattr(x, 'parse') and hasattr(x.parse, '__call__')
def _lexer_process(lexer, text):
ret = []
lexer.input(text)
while True:
t = lexer.token()
if t is None:
return ret
# *MUST* print and append the tokens in a loop,
# so that output is in sync with the processing of input.
# This is a requirement for console responsiveness
print t
ret.append(t)
def _parser_process(parser, *args, **kwargs):
# NOTE
# Interestingly, if the parser is a plier LRParser already bound to a
# lexer, calling the parse method with another lexer would quietly
# override the existing binding.
ret = parser.parse(*args, **kwargs)
print ret
return ret
def start_console(lp, prompt=None, is_lexer=None):
"""
Interactive console for tokenizing/parsing.
Given a lexer/parser, start an interactive console to print debug
information while tokenizing/parsing the input. Return the last
tokenized/parsed result when the console is exited by EOF (<Ctrl-d> on a
blank line for UNIX-like systems, and <Ctrl-z> for Windows).
If the 'is_lexer' argument is True, the first argument is treated as a
lexer; if it is False, then the first argument is treated as a parser. In
both cases, the object is checked for the minimal interface to satisfy the
use as lexer/parser.
For an object to be a parser, it must have a callable attribute 'parse';
for it to be a lexer, it must have a callable attribute 'input', and a
callable attribute 'token'. This is based on the minimal interface used
by `ply.yacc`, as documented in:
http://www.dabeaz.com/ply/ply.html
Section '4.20 Miscellaneous Issues'
If 'is_lexer' is None, then the use of the first argument is automatically
determined. It is first checked for the minimal parser interface. If it
does not satisfy the minimal parser interface, it is then checked for the
minimal lexer interface. If both checks failed, an exception is raised.
"""
if is_lexer is None:
if _check_parser_interface(lp):
is_lexer = False
elif _check_lexer_interface(lp):
is_lexer = True
else:
# neither minimal interfaces are satisfied
raise Exception(
"Neither of the minimal interfaces for parser nor lexer "
"could be satisfied by the first argument.")
elif is_lexer is False:
if not _check_parser_interface(lp):
raise Exception(
"The first argument does not satisfy "
"the minimal interface for parser.")
elif is_lexer is True:
if not _check_lexer_interface(lp):
raise Exception(
"The first argument does not satisfy "
"the minimal interface for lexer.")
else:
raise Exception(
"The value of 'is_lexer', {}, is not recognized. "
"Use True, False, or None to treat the object as "
"lexer, parser, or automatically determined.".format(is_lexer))
process = _lexer_process if is_lexer else _parser_process
prompt = prompt or ('LEX > ' if is_lexer else 'PARSE > ')
ret = None
while True:
try:
s = raw_input(prompt)
except EOFError as eof:
return ret
try:
ret = process(lp, s)
except Exception as e:
traceback.print_exc()
print e
ret = None
continue
|
StarcoderdataPython
|
3434431
|
<reponame>litxio/ptghci-engine
from pygments import highlight
from pygments.lexers.haskell import HaskellLexer
from pygments.formatters import Terminal256Formatter
from pygments.styles import get_style_by_name
def hl(s, config, style_name=None):
if style_name:
style = get_style_by_name(style_name)
else:
style = get_style_by_name(config.style)
# Don't try to highlight if the string already has escape sequences
if '\033[' in s:
return s
else:
# Sometimes highlight adds an extra newline, so we remove it
return highlight(s, HaskellLexer(),
Terminal256Formatter(style=style)).strip()
|
StarcoderdataPython
|
8162030
|
import difflib
import filecmp
from dataclasses import dataclass
from pathlib import Path
from typing import List
@dataclass
class ComparisonResult:
diffs: List[str]
class FileComparator:
def compare(self, file1: Path, file2: Path) -> ComparisonResult:
equal = filecmp.cmp(str(file1), str(file2))
if equal:
return ComparisonResult([])
content1 = file1.read_text(encoding='utf-8').split("\n")
content2 = file2.read_text(encoding='utf-8').split("\n")
diffs = difflib.unified_diff(content1, content2)
return ComparisonResult([*diffs])
|
StarcoderdataPython
|
346403
|
<reponame>lim0606/pytorch-ardae-vae
import torch
import torch.autograd
from torch.autograd import Function
import torch.nn.functional as F
'''
https://pytorch.org/docs/stable/notes/extending.html
'''
class AuxLossForGradFunction(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
def forward(ctx, input, grad):
ctx.save_for_backward(input, grad.detach())
return torch.sum(torch.zeros_like(input))
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, grad = ctx.saved_tensors
grad_input = grad_grad = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad
return grad_input, grad_grad
aux_loss_for_grad = AuxLossForGradFunction.apply
''' test '''
#import ipdb
if __name__ == '__main__':
batch_size = 10
input_dim = 20
input = torch.randn(batch_size, input_dim, dtype=torch.double, requires_grad=True)
grad = torch.randn(batch_size, input_dim, dtype=torch.double, requires_grad=False)
target = torch.randn(batch_size, input_dim, dtype=torch.double)
#loss = F.mse_loss(input, target)
#loss.backward()
#ipdb.set_trace()
loss = aux_loss_for_grad(input, grad)
loss.backward()
#ipdb.set_trace()
print(input.grad)
print(grad)
print(torch.allclose(input.grad, grad))
|
StarcoderdataPython
|
178688
|
<gh_stars>0
from django.conf.urls import patterns, include, url
from account import views
urlpatterns = patterns('',
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.user_logout, name='user_logout'),
)
|
StarcoderdataPython
|
11363307
|
"""
Test on images split into directories. This assumes we've split
our videos into frames and moved them to their respective folders
and trained our model.
Based on:
https://keras.io/preprocessing/image/
and
https://keras.io/applications/
"""
from train_custom_cnn import get_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten, concatenate, Lambda, Input, Dropout, Dense, MaxPooling2D, Conv2D
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, CSVLogger
from tensorflow.keras.utils import plot_model
import tensorflow as tf
from data import DataSet
import time
import os.path
data = DataSet()
# Helper: Save the model.
checkpointer = ModelCheckpoint(
filepath=os.path.join('data', 'checkpoints', 'inception.{epoch:03d}-{accuracy:.2f}.hdf5'),
verbose=1,
save_best_only=True)
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)
# Helper: TensorBoard
tensorboard = TensorBoard(log_dir=os.path.join('data', 'logs'))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('data', 'logs', 'InceptionV3' + '-' + 'training-' + \
str(timestamp) + '.log'))
def crop_center(img):
cropx = 89
cropy = 89
y, x, z = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy, startx:startx+cropx]-96
def subtract_mean(img):
return img-96
def get_test_generator():
context_datagen = ImageDataGenerator()
fovea_datagen = ImageDataGenerator(
preprocessing_function=crop_center)
context_generator = context_datagen.flow_from_directory(
os.path.join('data', 'test'),
target_size=(89, 89),
batch_size=32,
classes=data.classes,
class_mode='categorical',
interpolation='bilinear')
fovea_generator = fovea_datagen.flow_from_directory(
os.path.join('data', 'test'),
target_size=(89, 89),
batch_size=32,
classes=data.classes,
class_mode='categorical')
while True:
f = fovea_generator.next()
c = context_generator.next()
yield [f[0], c[0]], f[1]
def test_model(model, test_generator, nb_steps, callbacks=[]):
# train_generator, validation_generator = generators
model.evaluate(
test_generator,
verbose=1,
steps=nb_steps,
callbacks=callbacks)
return model
def main(weights_file):
model = get_model()
print(model.summary())
test_generator = get_test_generator()
# if weights_file is None:
# print("Loading network from ImageNet weights.")
# # Get and train the top layers.
# model = freeze_all_but_top(model)
# model = train_model(model, 10, generators)
# else:
print("Loading saved model: %s." % weights_file)
model.load_weights(weights_file)
callbacks = [checkpointer, tensorboard, csv_logger]
model = test_model(model, test_generator, 100)
if __name__ == '__main__':
weights_file = 'data/checkpoints/SF_MultiRes.1233-0.79.hdf5'
main(weights_file)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.