metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonasstraub/essensfindung",
"score": 3
} |
#### File: db/crud/user.py
```python
from typing import Union
from sqlalchemy.orm import Session
from db.base import Person
from schemes import scheme_user
from tools.hashing import Hasher
def create_user(db: Session, person: scheme_user.UserCreate) -> Person:
"""Create / Add Person to the Database with hashed password
Args:
db (Session): Session to the DB
person (scheme_user.UserCreate): The person to create
Returns:
Person: Return the created person
"""
db_person = Person(email=person.email, hashed_password=Hasher.get_password_hash(person.password))
db.add(db_person)
db.commit()
db.refresh(db_person)
return db_person
def get_user_by_mail(db: Session, email: str) -> Union[Person, None]:
"""Get the person from the Databse if one found
Args:
db (Session): Session to the DB
email (str): eMail to filter
Returns:
Person | None
"""
return db.query(Person).filter(Person.email == email).first()
def update_user(db: Session, current_user: scheme_user.UserBase, new_user: scheme_user.UserCreate) -> Person:
"""Update the User in the Database. You can change the Mail or Password
Args:
db (Session): Session to the DB
current_user (scheme_user.UserBase): User to Update
new_user (scheme_user.UserCreate): Contains the updated values for the User
Returns:
Person: Return the new DB values
"""
db_new_user = Person(email=new_user.email, hashed_password=Hasher.get_password_hash(new_user.password))
db.query(Person).filter(Person.email == current_user.email).update(
{Person.email: db_new_user.email, Person.hashed_password: db_new_user.hashed_password}
)
db.commit()
return db_new_user
def delete_user(db: Session, user: scheme_user.UserBase) -> int:
"""Remove the Person with the given email
Args:
db (Session): Session to the DB
email (str): Mail from the Person to search
Returns:
int: Number of effekted rows
"""
rows = db.query(Person).filter(Person.email == user.email).delete()
db.commit()
return rows
```
#### File: essensfindung/tests/test_db.py
```python
import pytest
from sqlalchemy import create_engine
from sqlalchemy import exc
from sqlalchemy.orm import sessionmaker
from db.base import Base
from db.crud.bewertung import create_bewertung
from db.crud.bewertung import delete_bewertung
from db.crud.bewertung import get_all_user_bewertungen
from db.crud.bewertung import get_bewertung_from_user_to_rest
from db.crud.bewertung import update_bewertung
from db.crud.restaurant import create_restaurant
from db.crud.restaurant import delete_restaurant
from db.crud.restaurant import get_all_restaurants
from db.crud.restaurant import get_restaurant_by_id
from db.crud.user import create_user
from db.crud.user import delete_user
from db.crud.user import get_user_by_mail
from db.crud.user import update_user
from schemes import scheme_rest
from schemes import scheme_user
from tools.hashing import Hasher
SQLALCHEMY_DATABASE_URL = "sqlite:///./tests/test_db.db"
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
# Use connect_args parameter only with sqlite
SessionTesting = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base.metadata.create_all(bind=engine)
@pytest.mark.filterwarnings("ignore")
@pytest.fixture(scope="function")
def db_session():
connection = engine.connect()
transaction = connection.begin()
session = SessionTesting(bind=connection)
yield session # use the session in tests.
session.close()
transaction.rollback()
connection.close()
def test_restaurant(db_session: SessionTesting):
# Add two Restaurants
# add the first restaurant
rest_add = scheme_rest.RestaurantBase(place_id="1234")
rest_return = create_restaurant(db_session, rest_add)
assert rest_add == scheme_rest.RestaurantBase(**rest_return.__dict__)
# add the second restaurant
rest_add_2 = scheme_rest.RestaurantBase(place_id="567")
rest_return = create_restaurant(db_session, rest_add_2)
assert rest_add_2 == scheme_rest.RestaurantBase(**rest_return.__dict__)
# Get one Restaurant
rest_return = get_restaurant_by_id(db_session, rest_add.place_id)
assert rest_add == scheme_rest.RestaurantBase(**rest_return.__dict__)
# Get all Restaurants
rests_return = get_all_restaurants(db_session)
rests_return_schemes = [scheme_rest.RestaurantBase(**rest.__dict__) for rest in rests_return]
assert rests_return_schemes == [rest_add, rest_add_2]
# Delete that Restaurant
affected_rows = delete_restaurant(db_session, rest_add)
assert affected_rows == 1
assert get_restaurant_by_id(db_session, rest_add.place_id) is None
# Check if you cant add the same restaurant twice
with pytest.raises(exc.SQLAlchemyError):
create_restaurant(db_session, rest_add_2)
def test_user(db_session: SessionTesting):
# Add two users
# ...first user
user_add = scheme_user.UserCreate(email="<EMAIL>", password="<PASSWORD>")
user_ret = create_user(db_session, user_add)
assert user_add.email == user_ret.email
assert Hasher.verify_password(user_add.password, user_ret.hashed_password)
# ..second user
user_add_2 = scheme_user.UserCreate(email="<EMAIL>", password="<PASSWORD>")
user_ret = create_user(db_session, user_add_2)
assert user_add_2.email == user_ret.email
assert Hasher.verify_password(user_add_2.password, user_ret.hashed_password)
# Update User password
user_add.password = "<PASSWORD>"
user_ret = update_user(db_session, user_add, user_add)
assert user_add.email == user_ret.email
assert Hasher.verify_password(user_add.password, user_ret.hashed_password)
# Update User email
tmp_user = user_add.copy()
user_add.email = "<EMAIL>"
user_ret = update_user(db_session, tmp_user, user_add)
assert user_add.email == user_ret.email
assert Hasher.verify_password(user_add.password, user_ret.hashed_password)
# Get one User
user_ret = get_user_by_mail(db_session, user_add.email)
assert user_add.email == user_ret.email
assert Hasher.verify_password(user_add.password, user_ret.hashed_password)
# Delete one User
assert 1 == delete_user(db_session, user_add_2)
# Chek if only one user with the same email can be added
with pytest.raises(exc.SQLAlchemyError):
create_user(db_session, user_add)
def test_bewertung(db_session: SessionTesting):
fake_user = scheme_user.UserBase(email="<EMAIL>")
fake_rest = scheme_rest.RestaurantBase(place_id="000000")
user_add_1 = scheme_user.UserCreate(email="<EMAIL>", password="<PASSWORD>")
user_add_2 = scheme_user.UserCreate(email="<EMAIL>", password="<PASSWORD>")
rest_add_1 = scheme_rest.RestaurantBase(place_id="1234")
rest_add_2 = scheme_rest.RestaurantBase(place_id="5678")
# Add user
create_user(db_session, user_add_1)
create_user(db_session, user_add_2)
# Add restaurant
create_restaurant(db_session, rest_add_1)
create_restaurant(db_session, rest_add_2)
# Add assessment to user1 and rest1
assessment_add_1_1 = scheme_rest.RestBewertungCreate(
comment="This is a comment", rating=1.5, person=user_add_1, restaurant=rest_add_1
)
assessment_ret = create_bewertung(db_session, assessment_add_1_1)
assert assessment_ret.kommentar == assessment_add_1_1.comment
assert assessment_ret.rating == assessment_add_1_1.rating
assert assessment_ret.zeitstempel is not None
# Add assessment to user1 and rest2
assessment_add_1_2 = scheme_rest.RestBewertungCreate(
comment="This is a comment for rest 2", rating=2.5, person=user_add_1, restaurant=rest_add_2
)
assessment_ret = create_bewertung(db_session, assessment_add_1_2)
assert assessment_ret.kommentar == assessment_add_1_2.comment
assert assessment_ret.rating == assessment_add_1_2.rating
assert assessment_ret.zeitstempel is not None
# Add assessment to user2 and rest2
assessment_add_2_2 = scheme_rest.RestBewertungCreate(
comment="This is a comment 2", rating=3.5, person=user_add_2, restaurant=rest_add_2
)
assessment_ret = create_bewertung(db_session, assessment_add_2_2)
assert assessment_ret.kommentar == assessment_add_2_2.comment
assert assessment_ret.rating == assessment_add_2_2.rating
assert assessment_ret.zeitstempel is not None
# Get all assessments
assessments_ret = get_all_user_bewertungen(db_session, user_add_1)
assert len(assessments_ret) == 2
assessments_ret = get_all_user_bewertungen(db_session, user_add_2)
assert len(assessments_ret) == 1
# Get one assessment from one user to one rest
assessment_ret = get_bewertung_from_user_to_rest(db_session, user_add_1, rest_add_1)
assert assessment_ret.kommentar == assessment_add_1_1.comment
assert assessment_ret.rating == assessment_add_1_1.rating
assert assessment_ret.zeitstempel is not None
# Update assessment
updated_1_1 = assessment_add_1_1.copy()
updated_1_1.comment = "UPDATED"
updated_1_1.rating = 0
assessment_ret = update_bewertung(db_session, assessment_add_1_1, updated_1_1)
assert assessment_ret.kommentar == updated_1_1.comment
assert assessment_ret.rating == updated_1_1.rating
assert assessment_ret.person_email == updated_1_1.person.email
assert assessment_ret.place_id == updated_1_1.restaurant.place_id
# Try to get assessments that does not exist
assessment_ret = get_all_user_bewertungen(db_session, fake_user)
assert assessment_ret is None
assessment_ret = get_bewertung_from_user_to_rest(db_session, fake_user, rest_add_1)
assert assessment_ret is None
assessment_ret = get_bewertung_from_user_to_rest(db_session, user_add_1, fake_rest)
assert assessment_ret is None
# Try to add assessments with invalid user and restaurant
with pytest.raises(exc.SQLAlchemyError):
assessment_ret = create_bewertung(
db_session,
scheme_rest.RestBewertungCreate(comment="none", rating=0, person=fake_user, restaurant=rest_add_1),
)
with pytest.raises(exc.SQLAlchemyError):
assessment_ret = create_bewertung(
db_session,
scheme_rest.RestBewertungCreate(
comment="none", rating=0, person=scheme_user.UserBase(email=user_add_1.email), restaurant=fake_rest
),
)
# Delete Assessments
assert 1 == delete_bewertung(db_session, user_add_1, rest_add_1)
assert get_bewertung_from_user_to_rest(db_session, user_add_1, rest_add_1) is None
assert 0 == delete_bewertung(db_session, user_add_1, rest_add_1)
assert 0 == delete_bewertung(db_session, fake_user, rest_add_2)
assert 0 == delete_bewertung(db_session, user_add_1, fake_rest)
# Test if only one comment for the same restaurant an user are possible
with pytest.raises(exc.IntegrityError):
create_bewertung(db_session, assessment_add_2_2)
```
#### File: essensfindung/tests/test_gapi.py
```python
import json
from typing import List
import httpx
import pytest
from pytest_httpx import HTTPXMock
from pytest_mock import MockerFixture
from schemes import Cuisine
from schemes.scheme_rest import Restaurant
from tools import gapi
@pytest.fixture
def fake_nearby_search() -> dict:
with open("tests/example_nearby_search.json", "r", encoding="utf8") as file:
return json.load(file)
@pytest.fixture
def fake_nearby_search_restaurants(fake_nearby_search: str) -> List[Restaurant]:
return [Restaurant(**value) for value in fake_nearby_search.get("results")]
@pytest.fixture
def fake_place_details() -> dict:
with open("tests/example_place_details.json", "r", encoding="utf8") as file:
return json.load(file)
@pytest.fixture
def fake_restaurants() -> List[Restaurant]:
with open("tests/example_restaurants.json", "r", encoding="utf8") as file:
fake_restaurants = json.load(file)
return [Restaurant(**value) for value in fake_restaurants]
@pytest.mark.parametrize("status_code", [100, 200, 300, 400])
def test_nearby_search(
mocker: MockerFixture,
httpx_mock: HTTPXMock,
status_code: int,
fake_nearby_search: dict,
fake_nearby_search_restaurants: List[Restaurant],
):
# Fake Datas
params: dict = {
"keyword": Cuisine.DOENER.value,
"location": "42,42",
"opennow": True,
"radius": "42",
"type": "restaurant",
"language": "de",
}
# Mock httpx responses
httpx_mock.add_response(status_code=status_code, json=fake_nearby_search)
# Mock other functions
mocker.patch("configuration.config.Setting.GOOGLE_API_KEY", "42")
if status_code != 200:
with pytest.raises(httpx.HTTPStatusError):
gapi.nearby_search(params)
else:
restaurants = gapi.nearby_search(params)
assert fake_nearby_search_restaurants == restaurants
def test_place_details(
httpx_mock: HTTPXMock,
fake_place_details: dict,
fake_restaurants: List[Restaurant],
fake_nearby_search_restaurants: List[Restaurant],
mocker: MockerFixture,
):
# Mock httpx responses
for fake_place_detail in fake_place_details:
url = f"https://maps.googleapis.com/maps/api/place/details/json?key=42&place_id={fake_place_detail['result']['place_id']}"
httpx_mock.add_response(status_code=200, json=fake_place_detail, url=url)
# Mock other functions
mocker.patch("configuration.config.Setting.GOOGLE_API_KEY", "42")
restaurants = gapi.place_details(fake_nearby_search_restaurants)
assert fake_restaurants == restaurants
```
#### File: essensfindung/views/restaurant.py
```python
import enum
from schemes import scheme_rest
import fastapi
from starlette.requests import Request
from starlette.templating import Jinja2Templates
templates = Jinja2Templates("templates")
router = fastapi.APIRouter()
@router.get("/findrestaurant")
async def findrestaurant(request: Request, rest_name: str, costs : float, cuisine : str):
#api.Search_restaurant(...)
restaurant = scheme_rest.Restaurant(place_id="PlaceID", name=rest_name, geometry=scheme_rest.Geometry(location=scheme_rest.LocationRest(lat="47.7007", lng="9.562", adr=cuisine)),
maps_url="https://maps.google.com/?cid=10544281732087259755", rating=4.0, own_rating=costs, phone_number="07541", homepage="http://www.alpha-fn.de/")
return templates.TemplateResponse("restaurant/restaurant_result.html", {"request": request, "restaurant": restaurant})
``` |
{
"source": "jonasstr/kita",
"score": 2
} |
#### File: kita/kit_dl/assistant.py
```python
import getpass
import os
from colorama import Fore, Style
from colorama import init
import click
import kit_dl.misc.utils as utils
class Assistant:
def __init__(self, yaml, dao):
# Initialize colorama.
init()
self.yaml = yaml
self.dao = dao
def echo(self, text, is_prompt=False):
"""Forwards the given text to click.echo() and optionally applies a different style to the text."""
color = Fore.CYAN if is_prompt else Style.RESET_ALL
if is_prompt:
text = "> " + text
click.echo(color + text)
def prompt(self, text):
"""Forwards the given text to click.echo() and adds cyan color and a '>' symbol
to the start of the string.
"""
return click.prompt(Fore.CYAN + "> " + text)
def confirm(self, text, default=False):
suffix = " (Y/n) [y]: " if default else " (y/N) [n]: "
return click.confirm(
Fore.CYAN + "> " + text, default=default, show_default=False, prompt_suffix=suffix
)
def setup_user(self):
"""Starts the setup assistant for setting up the user.yml file.
Saves the login credentials of the user and the root path for downloading assignments.
"""
from tkinter import filedialog
# user.yml already exists.
if os.path.isfile(self.dao.user_yml_path):
if not self.confirm("Kita is already set up. Overwrite existing config?"):
return False
self.echo(
"\nWelcome to the kit-dl setup utility.\n\nPlease enter values for the following "
"settings (just press Enter to\naccept a default value, if one is given in brackets).\n"
)
data = {}
data["user_name"] = self.prompt("Enter your correct ilias user name").strip()
data["password"] = self.select_password()
self.echo(
"\nChoose a location for saving your assignments. If you already\n"
"downloaded assignments manually please choose your KIT folder\nfor auto-detection."
)
self.echo("Select the root path for your assignments from the dialog window:", is_prompt=True)
root_path = os.path.abspath(filedialog.askdirectory())
data["destination"] = {}
data["destination"]["root_path"] = root_path
# Set default rename format.
data["destination"]["rename_format"] = "Blatt$$"
self.dao.create_user(data)
self.echo("Saved root folder '{}'.".format(utils.reformat(root_path)))
return True
def select_password(self):
password = getpass.getpass("> Enter your ilias password: ").strip()
while getpass.getpass("> Please confirm the password: ").strip() != password:
self.echo("The passwords do not match." + Fore.CYAN)
password = getpass.getpass("> Enter your ilias password: ").strip()
return password
def setup_config(self):
"""Starts the setup assistant for setting up the config.yml file."""
self.dao.load_config()
if not self.is_user_setup():
return False
root_path = self.dao.user_data["destination"]["root_path"]
assignment_folders = self.detected_assignment_folders(root_path)
added_courses = []
if assignment_folders:
added_courses = self.show_kit_folder_detected_dialog(assignment_folders, root_path)
self.show_confirm_all_courses_dialog(
(course for course in self.dao.config_data), added_courses, root_path
)
return True
def is_user_setup(self):
if os.path.isfile(self.dao.user_yml_path):
self.dao.load_user()
root_path = self.dao.user_data["destination"]["root_path"]
if not os.path.isdir(root_path):
self.echo(
"\nKit-dl has not been configured correctly (root_path not found).\n"
"Use 'kit-dl setup --user' instead."
)
return False
return True
def show_kit_folder_detected_dialog(self, assignment_folders, root_path):
"""Asks the user to confirm the download locations for the given courses."""
self.echo("\nPossible KIT folder detected:")
added_courses = []
for selection in assignment_folders:
full_path = os.path.join(root_path, selection["folder_name"])
message = utils.reformat(
"Save {} assignments to '{}'?".format(selection["course_key"].upper(), full_path)
)
if self.confirm(message, default=True):
added_courses.append(selection["course_key"])
self.dao.config_data[selection["course_key"]]["path"] = selection["folder_name"]
self.dao.dump_config()
return added_courses
def show_confirm_all_courses_dialog(self, assignment_folders, added_courses, root_path):
if not added_courses:
download_dir = self.create_download_folder(assignment_folders, root_path)
click.echo("Assignments will be saved to '{}'".format(download_dir))
return
self.update_selected_courses(added_courses)
from tkinter import filedialog
while self.choice and not self.confirm(
"Are these all courses: {}?".format(self.selected), default=True
):
course_key = self.show_select_folder_manually_dialog(self.choice, "Which courses are missing?")
selected_path = filedialog.askdirectory()
self.show_assignments_save_location_dialog(course_key, selected_path)
added_courses.append(course_key.lower())
self.update_selected_courses(added_courses)
def update_selected_courses(self, added_courses):
self.selected = ", ".join(course.upper() for course in added_courses)
self.choice = ", ".join(key.upper() for key in self.dao.config_data.keys() if key not in added_courses)
def show_assignments_save_location_dialog(self, course_key, selected_path):
self.echo(
"{} assignments will be saved to '{}'.".format(course_key.upper(), utils.reformat(selected_path))
)
self.dao.config_data[course_key]["path"] = selected_path
self.dao.dump_config()
def show_select_folder_manually_dialog(self, choice, prompt_msg):
"""Shows the setup dialog for adding the location of additional assignments."""
course_key = self.prompt("{} Choose from {}".format(prompt_msg, choice))
while not course_key.lower() in self.dao.config_data.keys():
self.echo("Error: invalid input")
course_key = self.prompt("{} Choose from {}".format(prompt_msg, choice))
self.echo("Choose a location for saving your {} courses:".format(course_key.upper()), is_prompt=True)
return course_key
def create_download_folder(self, course_keys, root_path):
"""
:param course_keys:
:param root_path:
"""
download_dir = os.path.join(root_path, "Downloads")
os.makedirs(download_dir, exist_ok=True)
for key in course_keys:
if key in self.dao.config_data:
new_key = self.dao.config_data[key]["name"].replace("/", "-").replace("\\", "-")
course_dir = os.path.join(download_dir, new_key)
os.makedirs(course_dir, exist_ok=True)
self.dao.config_data[key]["path"] = course_dir
self.dao.dump_config()
return download_dir
def detected_assignment_folders(self, root_path):
course_folders = next(os.walk(root_path))[1]
assignment_folders = []
for folder_name in course_folders:
sub_folders = next(os.walk(os.path.join(root_path, folder_name)))[1]
result = self.search_for_assignments_folder(folder_name, sub_folders)
if result:
assignment_folders.append(result)
return assignment_folders
def search_for_assignments_folder(self, folder_name, sub_folders):
"""Searches for a possible folder containing the assignments based on the folder name."""
for course_key in self.dao.config_data:
# Folder has been found.
if course_key == folder_name.lower() or self.are_similar(
folder_name, course_key, self.dao.config_data[course_key]["name"]
):
sub_folder_name = self.found_assignments_sub_folder(folder_name, sub_folders)
return (
{"course_key": course_key, "folder_name": sub_folder_name}
if sub_folder_name
else {"course_key": course_key, "folder_name": folder_name}
)
def found_assignments_sub_folder(self, course_folder_name, sub_folders):
for sub_folder in sub_folders:
name_list = ["assignments", "blätter", "übungen", "übungsblätter"]
# Check whether the name of the sub-folder is either one of the above names.
if any(x in sub_folder.lower() for x in name_list):
return os.path.join(course_folder_name, sub_folder)
def are_similar(self, folder_name, course_key, course_name):
"""Checks whether a given folder name is similar to the full name of a config.yml course
or is equal to the course key (e.g. la)."""
if (
folder_name.lower() == course_key
or folder_name.startswith(course_name)
or course_name.startswith(folder_name)
):
return True
course_suffixes = {"I": 1, "II": 2, "III": 3}
for suffix in course_suffixes:
if folder_name.endswith(suffix) or course_name.endswith(suffix):
return (
folder_name.replace(suffix, str(course_suffixes[suffix])) == course_name
or course_name.replace(suffix, str(course_suffixes[suffix])) == folder_name
)
```
#### File: kit_dl/misc/utils.py
```python
def reformat(value):
chars = {"Ä": "Ae", "Ö": "Oe", "Ü": "Ue", "ä": "ae", "ö": "oe", "ü": "ue", "\\": "/"}
for char in chars:
value = value.replace(char, chars[char])
return value
```
#### File: kita/tests/test_logger.py
```python
import unittest.mock as mock
from selenium.common.exceptions import TimeoutException
from kit_dl.misc.logger import ProgressLogger, SilentProgressLogger
from tests.base import BaseUnitTest
class TestLogger(BaseUnitTest):
@mock.patch("builtins.print")
def test_silent_logger_single_update(self, mock_print):
with SilentProgressLogger("LA") as logger:
logger.update(1)
self.assert_print_once_called_running("Updating LA: 1..", mock_print)
@mock.patch("builtins.print")
def test_silent_logger_multiple_updates(self, mock_print):
with SilentProgressLogger("LA") as logger:
logger.update(1)
logger.update(2)
self.assert_print_called_running("Updating LA: 1, 2..", mock_print)
@mock.patch("builtins.print")
def test_silent_logger_done(self, mock_print):
with SilentProgressLogger("LA") as logger:
logger.update(1)
self.assert_print_called_done("\rUpdating LA: 1, done.", mock_print)
@mock.patch("builtins.print")
def test_silent_logger_exception_during_second_update_should_be_handled(self, mock_print):
try:
with SilentProgressLogger("LA") as logger:
logger.update(1)
logger.update(2)
raise TimeoutException("indicating assignment 2 could not be found.")
except TimeoutException:
self.assert_print_called_done("\rUpdating LA: 1, done.", mock_print)
@mock.patch("builtins.print")
def test_silent_logger_exception_during_first_update_should_print_up_to_date(self, mock_print):
try:
with SilentProgressLogger("LA") as logger:
logger.update(1)
raise TimeoutException("indicating assignments are already up to date.")
except TimeoutException:
self.assert_print_called_done("\rUpdating LA: already up to date.", mock_print)
@mock.patch("builtins.print")
def test_progress_logger_single_update(self, mock_print):
with ProgressLogger("LA", "Blatt$$") as logger:
logger.update(1)
self.assert_print_once_called_running("Downloading 'Blatt01' from LA", mock_print)
@mock.patch("builtins.print")
def test_progress_logger_multiple_updates(self, mock_print):
with ProgressLogger("LA", "Blatt$$") as logger:
logger.update(1)
self.assert_print_called_running("Downloading 'Blatt01' from LA", mock_print)
logger.update(2)
self.assert_print_called_running("Downloading 'Blatt02' from LA", mock_print)
@mock.patch("builtins.print")
def test_progress_logger_done(self, mock_print):
with ProgressLogger("LA", "Blatt$$") as logger:
logger.update(1)
logger.update(2)
self.assert_print_called_done(", done.", mock_print)
``` |
{
"source": "JonasSuni/geopack",
"score": 2
} |
#### File: geopack/geopack/t04.py
```python
import numpy as np
# t04 is identical to t01 except for several factors.
def t04(parmod,ps,x,y,z):
"""
A data-based model of the external (i.e., without earth's contribution) part of the
magnetospheric magnetic field, calibrated by
(1) solar wind pressure pdyn (nanopascals),
(2) dst (nanotesla),
(3) byimf,
(4) bzimf (nanotesla)
(5-10) indices w1 - w6, calculated as time integrals from the beginning of a storm
see the reference (3) below, for a detailed definition of those variables
:param parmod: The elements are explained above.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
Computed as a sum of contributions from principal field sources.
Assembled: March 25, 2004; Updated: August 2 & 31, December 27, 2004.
A bug eliminated March 14, 2005 (might cause compilation problems with some fortran compilers)
Attention: The model is based on data taken sunward from x=-15Re, and hence becomes invalid at larger tailward distances !!! *
REFERENCES:
(1) <NAME>, A new data-based model of the near magnetosphere magnetic field:
1. Mathematical structure.
2. Parameterization and fitting to observations. JGR v. 107(A8), 1176/1179, doi:10.1029/2001JA000219/220, 2002.
(2) <NAME>, <NAME>, <NAME>, Storm-time distortion of the
inner magnetosphere: How severe can it get ? JGR v. 108(A5), 1209, doi:10.1029/2002JA009808, 2003.
(3) <NAME> and <NAME>, Modeling the dynamics of the inner magnetosphere during
strong geomagnetic storms, J. Geophys. Res., v. 110 (A3), A03208, doi: 10.1029/2004JA010798, 2005.
"""
a = np.array([
1.00000,5.44118,0.891995,9.09684,0.00000,-7.18972,12.2700,
-4.89408,0.00000,0.870536,1.36081,0.00000,0.688650,0.602330,
0.00000,0.316346,1.22728,-0.363620E-01,-0.405821,0.452536,
0.755831,0.215662,0.152759,5.96235,23.2036,11.2994,69.9596,
0.989596,-0.132131E-01,0.985681,0.344212E-01,1.02389,0.207867,
1.51220,0.682715E-01,1.84714,1.76977,1.37690,0.696350,0.343280,
3.28846,111.293,5.82287,4.39664,0.383403,0.648176,0.318752E-01,
0.581168,1.15070,0.843004,0.394732,0.846509,0.916555,0.550920,
0.180725,0.898772,0.387365,2.26596,1.29123,0.436819,1.28211,
1.33199,.405553,1.6229,.699074,1.26131,2.42297,.537116,.619441])
iopgen,ioptt,iopb,iopr = [0.]*4
pdyn=parmod[0]
dst_ast=parmod[1]*0.8-13*np.sqrt(pdyn)
bximf,byimf,bzimf=[0.,parmod[2],parmod[3]]
w1,w2,w3,w4,w5,w6 = parmod[4:10]
pss,xx,yy,zz = [ps,x,y,z]
return extern(iopgen,ioptt,iopb,iopr,a,69,pdyn,dst_ast,bximf,byimf,bzimf,
w1,w2,w3,w4,w5,w6,pss,xx,yy,zz)
def extern(iopgen,iopt,iopb,iopr,a,ntot,pdyn,dst,bximf,byimf,bzimf,w1,w2,w3,w4,w5,w6,ps,x,y,z):
"""
:param iopgen: general option flag:
iopgen=0 - calculate total field
iopgen=1 - dipole shielding only
iopgen=2 - tail field only
iopgen=3 - birkeland field only
iopgen=4 - ring current field only
iopgen=5 - interconnection field only
:param iopt: tail field flag:
iopt=0 - both modes
iopt=1 - mode 1 only
iopt=2 - mode 2 only
:param iopb: birkeland field flag:
iopb=0 - all 4 terms
iopb=1 - region 1, modes 1 and 2
iopb=2 - region 2, modes 1 and 2
:param iopr: ring current flag:
iopr=0 - both src and prc
iopr=1 - src only
iopr=2 - prc only
"""
# common /tail/ dxshift1,dxshift2,d,deltady ! the common blocks forward nonlinear parameters
# common /birkpar/ xkappa1,xkappa2
# common /rcpar/ sc_sy,sc_as,phi
# common /g/ g
# common /rh0/ rh0
global dxshift1, dxshift2, d, deltady
global xkappa1, xkappa2
global sc_sy, sc_pr, phi
global g
global rh0
a0_a,a0_s0,a0_x0 = [34.586,1.1960,3.4397] # Shue et al. parameters
dsig = 0.005
rh0,rh2 = [8.0,-5.2]
xappa = (pdyn/2.)**a[22] # overall scaling parameter
rh0 = 7.5 # tail hinging distance
g = 35.0 # tail warping parameter
xappa3=xappa**3
xx=x*xappa
yy=y*xappa
zz=z*xappa
sps=np.sin(ps)
x0=a0_x0/xappa
am=a0_a/xappa
s0=a0_s0
# Calculate "imf" components outside the magnetopause layer (hence begin with "o")
# They are needed only if the point (x,y,z) is within the transition magnetopause layer or outside the magnetosphere:
factimf=a[19]
oimfx=0.
oimfy=byimf*factimf
oimfz=bzimf*factimf
r=np.sqrt(x**2+y**2+z**2)
xss=x
zss=z
# begin iterative search of unwarped coords (to find sigma)
dd = 1.
while dd > 1e-6:
xsold=xss
zsold=zss
rh=rh0+rh2*(zss/r)**2
sinpsas=sps/(1+(r/rh)**3)**0.33333333
cospsas=np.sqrt(1-sinpsas**2)
zss=x*sinpsas+z*cospsas
xss=x*cospsas-z*sinpsas
dd=np.abs(xss-xsold)+np.abs(zss-zsold)
rho2=y**2+zss**2
asq=am**2
xmxm=am+xss-x0
if xmxm < 0: xmxm = 0 # the boundary is a cylinder tailward of x=x0-am
axx0=xmxm**2
aro=asq+rho2
sigma=np.sqrt((aro+axx0+np.sqrt((aro+axx0)**2-4.*asq*axx0))/(2.*asq))
# Now, there are three possible cases:
# (1) inside the magnetosphere
# (2) in the boundary layer
# (3) outside the magnetosphere and b.layer
# First of all, consider the cases (1) and (2):
if sigma < (s0+dsig): # cases (1) or (2); calculate the model field (with the potential "penetrated" interconnection field):
bxcf,bycf,bzcf = [0.]*3
if iopgen <= 1:
cfx,cfy,cfz = shlcar3x3(xx,yy,zz,ps) # dipole shielding field
bxcf=cfx*xappa3
bycf=cfy*xappa3
bzcf=cfz*xappa3
bxt1,byt1,bzt1,bxt2,byt2,bzt2 = [0.]*6
if (iopgen == 0) | (iopgen == 2):
dstt = -20.
if dst < dstt: dstt = dst
znam = np.abs(dstt)**0.37
dxshift1=a[23]-a[24]/znam
dxshift2=a[25]-a[26]/znam
d=a[35]*np.exp(-w1/a[36])+a[68]
deltady=4.7
bxt1,byt1,bzt1,bxt2,byt2,bzt2 = deformed(iopt,ps,xx,yy,zz)
bxr11,byr11,bzr11, bxr12,byr12,bzr12, bxr21,byr21,bzr21, bxr22,byr22,bzr22 = [0.]*12
if (iopgen == 0) | (iopgen == 3):
znam = np.abs(dst)
if dst >= -20: znam = 20.
xkappa1=a[31]*(znam/20)**a[32]
xkappa2=a[33]*(znam/20)**a[34]
# Birkeland field (two modes for r1 and two modes for r2)
bxr11,byr11,bzr11, bxr12,byr12,bzr12, bxr21,byr21,bzr21, bxr22,byr22,bzr22 = \
birk_tot(iopb,ps,xx,yy,zz)
bxsrc,bysrc,bzsrc, bxprc,byprc,bzprc = [0.]*6
if (iopgen == 0) | (iopgen == 4):
phi=a[37]
znam=np.abs(dst)
if dst >= -20: znam = 20
sc_sy=a[27]*(20/znam)**a[28]*xappa
sc_pr=a[29]*(20/znam)**a[30]*xappa
# shielded ring current (src and prc)
bxsrc,bysrc,bzsrc, bxprc,byprc,bzprc = full_rc(iopr,ps,xx,yy,zz)
hximf,hyimf,hzimf = [0.]*3
if (iopgen == 0) | (iopgen == 5):
# These are components of the penetrated field per unit of the penetration coefficient.
# In other words, these are derivatives of the penetration field components with respect
# to the penetration coefficient. We assume that only the transverse component of the
# field penetrates inside.
hximf,hyimf,hzimf = [0.,byimf,bzimf]
# Now, add up all the components:
dlp1=(pdyn/2)**a[20]
dlp2=(pdyn/2)**a[21]
tamp1=a[1]+a[2]*dlp1+a[3]*a[38]*w1/np.sqrt(w1**2+a[38]**2)+a[4]*dst
tamp2=a[5]+a[6]*dlp2+a[7]*a[39]*w2/np.sqrt(w2**2+a[39]**2)+a[8]*dst
a_src=a[9] +a[10]*a[40]*w3/np.sqrt(w3**2+a[40]**2)+a[11]*dst
a_prc=a[12]+a[13]*a[41]*w4/np.sqrt(w4**2+a[41]**2)+a[14]*dst
a_r11=a[15]+a[16]*a[42]*w5/np.sqrt(w5**2+a[42]**2)
a_r21=a[17]+a[18]*a[43]*w6/np.sqrt(w6**2+a[43]**2)
bbx=a[0]*bxcf + tamp1*bxt1+tamp2*bxt2 + a_src*bxsrc+a_prc*bxprc + a_r11*bxr11+a_r21*bxr21 + a[19]*hximf
bby=a[0]*bycf + tamp1*byt1+tamp2*byt2 + a_src*bysrc+a_prc*byprc + a_r11*byr11+a_r21*byr21 + a[19]*hyimf
bbz=a[0]*bzcf + tamp1*bzt1+tamp2*bzt2 + a_src*bzsrc+a_prc*bzprc + a_r11*bzr11+a_r21*bzr21 + a[19]*hzimf
# And we have the total external field.
# Now, let us check whether we have the case (1). if yes - we are done:
if sigma < (s0-dsig): # (x,y,z) is inside the magnetosphere
bx,by,bz = [bbx,bby,bbz]
else: # this is the most complex case: we are inside the interpolation region
fint=0.5*(1.-(sigma-s0)/dsig)
fext=0.5*(1.+(sigma-s0)/dsig)
qx,qy,qz = dipole(ps,x,y,z)
bx=(bbx+qx)*fint+oimfx*fext -qx
by=(bby+qy)*fint+oimfy*fext -qy
bz=(bbz+qz)*fint+oimfz*fext -qz
# The cases (1) and (2) are exhausted; the only remaining possibility is now the case (3):
else:
qx,qy,qz = dipole(ps,x,y,z)
bx=oimfx-qx
by=oimfy-qy
bz=oimfz-qz
return bx,by,bz
def shlcar3x3(x,y,z, ps):
"""
This subroutine returns the shielding field for the earth's dipole, represented by
2x3x3=18 "cartesian" harmonics, tilted with respect to the z=0 plane (nb#4, p.74)
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:param ps: geo-dipole tilt angle in radius.
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# The 36 coefficients enter in pairs in the amplitudes of the "cartesian" harmonics (A(1)-A(36).
# The 14 nonlinear parameters (A(37)-A(50) are the scales Pi,Ri,Qi,and Si entering the arguments of exponents, sines, and cosines in each of the
# 18 "cartesian" harmonics plus two tilt angles for the cartesian harmonics (one for the psi=0 mode and another for the psi=90 mode)
a = np.array([
-901.2327248,895.8011176,817.6208321,-845.5880889,-83.73539535,
86.58542841,336.8781402,-329.3619944,-311.2947120,308.6011161,
31.94469304,-31.30824526,125.8739681,-372.3384278,-235.4720434,
286.7594095,21.86305585,-27.42344605,-150.4874688,2.669338538,
1.395023949,-.5540427503,-56.85224007,3.681827033,-43.48705106,
5.103131905,1.073551279,-.6673083508,12.21404266,4.177465543,
5.799964188,-.3977802319,-1.044652977,.5703560010,3.536082962,
-3.222069852,9.620648151,6.082014949,27.75216226,12.44199571,
5.122226936,6.982039615,20.12149582,6.150973118,4.663639687,
15.73319647,2.303504968,5.840511214,.8385953499E-01,.3477844929])
p1,p2,p3, r1,r2,r3, q1,q2,q3, s1,s2,s3 = a[36:48]
t1,t2 = a[48:50]
cps=np.cos(ps)
sps=np.sin(ps)
s2ps=2*cps # modified here (sin(2*ps) instead of sin(3*ps))
st1=np.sin(ps*t1)
ct1=np.cos(ps*t1)
st2=np.sin(ps*t2)
ct2=np.cos(ps*t2)
x1=x*ct1-z*st1
z1=x*st1+z*ct1
x2=x*ct2-z*st2
z2=x*st2+z*ct2
# make the terms in the 1st sum ("perpendicular" symmetry):
# i=1:
sqpr= np.sqrt(1/p1**2+1/r1**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx1 =-sqpr*expr*cyp*szr
hy1 = expr/p1*syp*szr
fz1 =-expr*cyp/r1*czr
hx1 = fx1*ct1+fz1*st1
hz1 =-fx1*st1+fz1*ct1
sqpr= np.sqrt(1/p1**2+1/r2**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx2 =-sqpr*expr*cyp*szr
hy2 = expr/p1*syp*szr
fz2 =-expr*cyp/r2*czr
hx2 = fx2*ct1+fz2*st1
hz2 =-fx2*st1+fz2*ct1
sqpr= np.sqrt(1/p1**2+1/r3**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx3 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy3 = expr/p1*syp*(z1*czr+x1/r3*szr/sqpr)
fz3 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx3 = fx3*ct1+fz3*st1
hz3 =-fx3*st1+fz3*ct1
# i=2:
sqpr= np.sqrt(1/p2**2+1/r1**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx4 =-sqpr*expr*cyp*szr
hy4 = expr/p2*syp*szr
fz4 =-expr*cyp/r1*czr
hx4 = fx4*ct1+fz4*st1
hz4 =-fx4*st1+fz4*ct1
sqpr= np.sqrt(1/p2**2+1/r2**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx5 =-sqpr*expr*cyp*szr
hy5 = expr/p2*syp*szr
fz5 =-expr*cyp/r2*czr
hx5 = fx5*ct1+fz5*st1
hz5 =-fx5*st1+fz5*ct1
sqpr= np.sqrt(1/p2**2+1/r3**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx6 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy6 = expr/p2*syp*(z1*czr+x1/r3*szr/sqpr)
fz6 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx6 = fx6*ct1+fz6*st1
hz6 =-fx6*st1+fz6*ct1
# i=3:
sqpr= np.sqrt(1/p3**2+1/r1**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx7 =-sqpr*expr*cyp*szr
hy7 = expr/p3*syp*szr
fz7 =-expr*cyp/r1*czr
hx7 = fx7*ct1+fz7*st1
hz7 =-fx7*st1+fz7*ct1
sqpr= np.sqrt(1/p3**2+1/r2**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx8 =-sqpr*expr*cyp*szr
hy8 = expr/p3*syp*szr
fz8 =-expr*cyp/r2*czr
hx8 = fx8*ct1+fz8*st1
hz8 =-fx8*st1+fz8*ct1
sqpr= np.sqrt(1/p3**2+1/r3**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx9 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy9 = expr/p3*syp*(z1*czr+x1/r3*szr/sqpr)
fz9 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx9 = fx9*ct1+fz9*st1
hz9 =-fx9*st1+fz9*ct1
a1=a[0]+a[1]*cps
a2=a[2]+a[3]*cps
a3=a[4]+a[5]*cps
a4=a[6]+a[7]*cps
a5=a[8]+a[9]*cps
a6=a[10]+a[11]*cps
a7=a[12]+a[13]*cps
a8=a[14]+a[15]*cps
a9=a[16]+a[17]*cps
bx=a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
# make the terms in the 2nd sum ("parallel" symmetry):
# i=1
sqqs= np.sqrt(1/q1**2+1/s1**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx1 =-sqqs*exqs*cyq*czs *sps
hy1 = exqs/q1*syq*czs *sps
fz1 = exqs*cyq/s1*szs *sps
hx1 = fx1*ct2+fz1*st2
hz1 =-fx1*st2+fz1*ct2
sqqs= np.sqrt(1/q1**2+1/s2**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx2 =-sqqs*exqs*cyq*czs *sps
hy2 = exqs/q1*syq*czs *sps
fz2 = exqs*cyq/s2*szs *sps
hx2 = fx2*ct2+fz2*st2
hz2 =-fx2*st2+fz2*ct2
sqqs= np.sqrt(1/q1**2+1/s3**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx3 =-sqqs*exqs*cyq*czs *sps
hy3 = exqs/q1*syq*czs *sps
fz3 = exqs*cyq/s3*szs *sps
hx3 = fx3*ct2+fz3*st2
hz3 =-fx3*st2+fz3*ct2
# i=2:
sqqs= np.sqrt(1/q2**2+1/s1**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx4 =-sqqs*exqs*cyq*czs *sps
hy4 = exqs/q2*syq*czs *sps
fz4 = exqs*cyq/s1*szs *sps
hx4 = fx4*ct2+fz4*st2
hz4 =-fx4*st2+fz4*ct2
sqqs= np.sqrt(1/q2**2+1/s2**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx5 =-sqqs*exqs*cyq*czs *sps
hy5 = exqs/q2*syq*czs *sps
fz5 = exqs*cyq/s2*szs *sps
hx5 = fx5*ct2+fz5*st2
hz5 =-fx5*st2+fz5*ct2
sqqs= np.sqrt(1/q2**2+1/s3**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx6 =-sqqs*exqs*cyq*czs *sps
hy6 = exqs/q2*syq*czs *sps
fz6 = exqs*cyq/s3*szs *sps
hx6 = fx6*ct2+fz6*st2
hz6 =-fx6*st2+fz6*ct2
# i=3:
sqqs= np.sqrt(1/q3**2+1/s1**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx7 =-sqqs*exqs*cyq*czs *sps
hy7 = exqs/q3*syq*czs *sps
fz7 = exqs*cyq/s1*szs *sps
hx7 = fx7*ct2+fz7*st2
hz7 =-fx7*st2+fz7*ct2
sqqs= np.sqrt(1/q3**2+1/s2**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx8 =-sqqs*exqs*cyq*czs *sps
hy8 = exqs/q3*syq*czs *sps
fz8 = exqs*cyq/s2*szs *sps
hx8 = fx8*ct2+fz8*st2
hz8 =-fx8*st2+fz8*ct2
sqqs= np.sqrt(1/q3**2+1/s3**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx9 =-sqqs*exqs*cyq*czs *sps
hy9 = exqs/q3*syq*czs *sps
fz9 = exqs*cyq/s3*szs *sps
hx9 = fx9*ct2+fz9*st2
hz9 =-fx9*st2+fz9*ct2
a1=a[18]+a[19]*s2ps
a2=a[20]+a[21]*s2ps
a3=a[22]+a[23]*s2ps
a4=a[24]+a[25]*s2ps
a5=a[26]+a[27]*s2ps
a6=a[28]+a[29]*s2ps
a7=a[30]+a[31]*s2ps
a8=a[32]+a[33]*s2ps
a9=a[34]+a[35]*s2ps
bx=bx+a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=by+a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=bz+a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
return bx, by, bz
def deformed(iopt, ps, x,y,z):
"""
Calculates gsm components of two unit-amplitude tail field modes, taking into account
both effects of dipole tilt: warping in y-z (done by the subroutine warped) and bending
in x-z (done by this subroutine)
:param iopt: tail field mode flag: iopt=0 - the two tail modes are added up; iopt=1 - mode 1 only; iopt=2 - mode 2 only
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return:
"""
# rh0,rh1,rh2, and ieps control the tilt-related deformation of the tail field
# common /rh0/ rh0
global rh0
rh2,ieps = [-5.2,3]
sps = np.sin(ps)
r2 = x**2+y**2+z**2
r = np.sqrt(r2)
zr = z/r
rh = rh0+rh2*zr**2
drhdr = -zr/r*2*rh2*zr
drhdz = 2*rh2*zr/r
rrh = r/rh
f = 1/(1+rrh**ieps)**(1/ieps)
dfdr = -rrh**(ieps-1)*f**(ieps+1)/rh
dfdrh = -rrh*dfdr
spsas = sps*f
cpsas = np.sqrt(1-spsas**2)
xas = x*cpsas-z*spsas
zas = x*spsas+z*cpsas
facps = sps/cpsas*(dfdr+dfdrh*drhdr)/r
psasx = facps*x
psasy = facps*y
psasz = facps*z+sps/cpsas*dfdrh*drhdz
dxasdx = cpsas-zas*psasx
dxasdy =-zas*psasy
dxasdz =-spsas-zas*psasz
dzasdx = spsas+xas*psasx
dzasdy = xas*psasy
dzasdz = cpsas+xas*psasz
fac1 = dxasdz*dzasdy-dxasdy*dzasdz
fac2 = dxasdx*dzasdz-dxasdz*dzasdx
fac3 = dzasdx*dxasdy-dxasdx*dzasdy
# deform:
bxas1,byas1,bzas1, bxas2,byas2,bzas2 = warped(iopt,ps,xas,y,zas)
bx1=bxas1*dzasdz-bzas1*dxasdz +byas1*fac1
by1=byas1*fac2
bz1=bzas1*dxasdx-bxas1*dzasdx +byas1*fac3
bx2=bxas2*dzasdz-bzas2*dxasdz +byas2*fac1
by2=byas2*fac2
bz2=bzas2*dxasdx-bxas2*dzasdx +byas2*fac3
return bx1,by1,bz1, bx2,by2,bz2
def warped(iopt, ps, x,y,z):
"""
Calculates GSM components of the warped field for two tail unit modes. The warping deformation
is imposed on the unwarped field, computed by the subroutine "unwarped". The warping parameter
g was obtained by least squares fitting to the entire dataset.
:param iopt: tail field mode flag: iopt=0 - the two tail modes are added up; iopt=1 - mode 1 only; iopt=2 - mode 2 only
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return:
"""
# common /g/ g
global g
dgdx,xl,dxldx = [0.,20,0]
sps=np.sin(ps)
rho2=y**2+z**2
rho=np.sqrt(rho2)
if (y == 0) & (z == 0):
phi=0.
cphi=1.
sphi=0.
else:
phi=np.arctan2(z,y)
cphi=y/rho
sphi=z/rho
rr4l4=rho/(rho2**2+xl**4)
f=phi+g*rho2*rr4l4*cphi*sps
dfdphi=1-g*rho2*rr4l4*sphi*sps
dfdrho=g*rr4l4**2*(3*xl**4-rho2**2)*cphi*sps
dfdx=rr4l4*cphi*sps*(dgdx*rho2-g*rho*rr4l4*4*xl**3*dxldx)
cf=np.cos(f)
sf=np.sin(f)
yas=rho*cf
zas=rho*sf
bx_as1,by_as1,bz_as1, bx_as2,by_as2,bz_as2 = unwarped(iopt,x,yas,zas)
brho_as = by_as1*cf+bz_as1*sf # deform the 1st mode
bphi_as = -by_as1*sf+bz_as1*cf
brho_s = brho_as*dfdphi
bphi_s = bphi_as-rho*(bx_as1*dfdx+brho_as*dfdrho)
bx1 = bx_as1*dfdphi
by1 = brho_s*cphi-bphi_s*sphi
bz1 = brho_s*sphi+bphi_s*cphi # done
brho_as = by_as2*cf+bz_as2*sf # deform the 2nd mode
bphi_as = -by_as2*sf+bz_as2*cf
brho_s = brho_as*dfdphi
bphi_s = bphi_as-rho*(bx_as2*dfdx+brho_as*dfdrho)
bx2 = bx_as2*dfdphi
by2 = brho_s*cphi-bphi_s*sphi
bz2 = brho_s*sphi+bphi_s*cphi # done
return bx1,by1,bz1, bx2,by2,bz2
def unwarped(iopt, x,y,z):
"""
Calculates GSM components of the shielded field of two tail modes with unit amplitudes, without any
warping or bending. Nonlinear parameters of the modes are forwarded here via a common block /tail/.
:param iopt: tail field mode flag: iopt=0 - the two tail modes are added up; iopt=1 - mode 1 only; iopt=2 - mode 2 only
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return:
"""
# common /tail/ dxshift1,dxshift2,d,deltady
global dxshift1, dxshift2, d, deltady
deltadx1,alpha1,xshift1 = [1.,1.1,6]
deltadx2,alpha2,xshift2 = [0.,.25,4]
a1 = np.array([
-25.45869857,57.35899080,317.5501869,-2.626756717,-93.38053698,
-199.6467926,-858.8129729,34.09192395,845.4214929,-29.07463068,
47.10678547,-128.9797943,-781.7512093,6.165038619,167.8905046,
492.0680410,1654.724031,-46.77337920,-1635.922669,40.86186772,
-.1349775602,-.9661991179e-01,-.1662302354,.002810467517,.2487355077,
.1025565237,-14.41750229,-.8185333989,11.07693629,.7569503173,
-9.655264745,112.2446542,777.5948964,-5.745008536,-83.03921993,
-490.2278695,-1155.004209,39.08023320,1172.780574,-39.44349797,
-14.07211198,-40.41201127,-313.2277343,2.203920979,8.232835341,
197.7065115,391.2733948,-18.57424451,-437.2779053,23.04976898,
11.75673963,13.60497313,4.691927060,18.20923547,27.59044809,
6.677425469,1.398283308,2.839005878,31.24817706,24.53577264])
a2 = np.array([
-287187.1962,4970.499233,410490.1952,-1347.839052,-386370.3240,
3317.983750,-143462.3895,5706.513767,171176.2904,250.8882750,
-506570.8891,5733.592632,397975.5842,9771.762168,-941834.2436,
7990.975260,54313.10318,447.5388060,528046.3449,12751.04453,
-21920.98301,-21.05075617,31971.07875,3012.641612,-301822.9103,
-3601.107387,1797.577552,-6.315855803,142578.8406,13161.93640,
804184.8410,-14168.99698,-851926.6360,-1890.885671,972475.6869,
-8571.862853,26432.49197,-2554.752298,-482308.3431,-4391.473324,
105155.9160,-1134.622050,-74353.53091,-5382.670711,695055.0788,
-916.3365144,-12111.06667,67.20923358,-367200.9285,-21414.14421,
14.75567902,20.75638190,59.78601609,16.86431444,32.58482365,
23.69472951,17.24977936,13.64902647,68.40989058,11.67828167])
xm1,xm2 = [-12.,-12]
bx1,by1,bz1, bx2,by2,bz2 = [0.]*6
if iopt < 2: # iopt = 0 or 1
xsc1 = (x-xshift1-dxshift1)*alpha1-xm1*(alpha1-1)
ysc1 = y*alpha1
zsc1 = z*alpha1
d0sc1 = d*alpha1 # here we use a single value d0 of the thickness for both modes
fx1,fy1,fz1 = taildisk(d0sc1,deltadx1,deltady,xsc1,ysc1,zsc1)
hx1,hy1,hz1 = shlcar5x5(a1,x,y,z,dxshift1)
bx1=fx1+hx1
by1=fy1+hy1
bz1=fz1+hz1
if iopt != 1: # iop = 0 or 2
xsc2 = (x-xshift2-dxshift2)*alpha2-xm2*(alpha2-1)
ysc2 = y*alpha2
zsc2 = z*alpha2
d0sc2 = d*alpha2 # here we use a single value d0 of the thickness for both modes
fx2,fy2,fz2 = taildisk(d0sc2,deltadx2,deltady,xsc2,ysc2,zsc2)
hx2,hy2,hz2 = shlcar5x5(a2,x,y,z,dxshift2)
bx2=fx2+hx2
by2=fy2+hy2
bz2=fz2+hz2
return bx1,by1,bz1, bx2,by2,bz2
def taildisk(d0,deltadx,deltady, x,y,z):
"""
This subroutine computes the components of the tail current field, similar to that described by
Tsyganenko and peredo (1994). The difference is that now we use spacewarping, as described in
our paper on modeling Birkeland currents (Tsyganenko and stern, 1996) instead of shearing it in
the spirit of the T89 tail model.
:param d0:
:param deltadx:
:param deltady:
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
f = np.array([-71.09346626,-1014.308601,-1272.939359,-3224.935936,-44546.86232])
b = np.array([10.90101242,12.68393898,13.51791954,14.86775017,15.12306404])
c = np.array([.7954069972,.6716601849,1.174866319,2.565249920,10.01986790])
rho=np.sqrt(x**2+y**2)
drhodx=x/rho
drhody=y/rho
dex=np.exp(x/7)
d=d0+deltady*(y/20)**2+deltadx*dex # The last term (introduced 10/11/2000) makes the sheet thicken sunward, to avoid problems in the subsolar region
dddy=deltady*y*0.005
dddx=deltadx/7*dex
dzeta=np.sqrt(z**2+d**2) # this is the same simple way to spread out the sheet, as that used in t89
ddzetadx=d*dddx/dzeta
ddzetady=d*dddy/dzeta
ddzetadz=z/dzeta
dbx,dby,dbz = [0.0,0,0]
for i in range(5):
bi=b[i]
ci=c[i]
s1=np.sqrt((rho+bi)**2+(dzeta+ci)**2)
s2=np.sqrt((rho-bi)**2+(dzeta+ci)**2)
ds1drho=(rho+bi)/s1
ds2drho=(rho-bi)/s2
ds1ddz=(dzeta+ci)/s1
ds2ddz=(dzeta+ci)/s2
ds1dx=ds1drho*drhodx+ds1ddz*ddzetadx
ds1dy=ds1drho*drhody+ds1ddz*ddzetady
ds1dz= ds1ddz*ddzetadz
ds2dx=ds2drho*drhodx+ds2ddz*ddzetadx
ds2dy=ds2drho*drhody+ds2ddz*ddzetady
ds2dz= ds2ddz*ddzetadz
s1ts2=s1*s2
s1ps2=s1+s2
s1ps2sq=s1ps2**2
fac1=np.sqrt(s1ps2sq-(2*bi)**2)
asas=fac1/(s1ts2*s1ps2sq)
dasds1=(1/(fac1*s2)-asas/s1ps2*(s2*s2+s1*(3*s1+4*s2)))/(s1*s1ps2)
dasds2=(1/(fac1*s1)-asas/s1ps2*(s1*s1+s2*(3*s2+4*s1)))/(s2*s1ps2)
dasdx=dasds1*ds1dx+dasds2*ds2dx
dasdy=dasds1*ds1dy+dasds2*ds2dy
dasdz=dasds1*ds1dz+dasds2*ds2dz
dbx=dbx-f[i]*x*dasdz
dby=dby-f[i]*y*dasdz
dbz=dbz+f[i]*(2*asas+x*dasdx+y*dasdy)
return dbx, dby, dbz
def shlcar5x5(a,x,y,z,dshift):
"""
This code returns the shielding field represented by 5x5=25 "cartesian" harmonics
:param a:
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:param dshift:
:return:
"""
# The nlin coefficients are the amplitudes of the "cartesian" harmonics (a(1)-a(nlin).
# The nnp nonlinear parameters (a(nlin+1)-a(ntot) are the scales pi and ri entering the arguments of exponents, sines,
# and cosines in each of the nlin "cartesian" harmonics
dhx,dhy,dhz = [0.]*3
l=0
for i in range(5):
rp=1/a[50+i]
cypi=np.cos(y*rp)
sypi=np.sin(y*rp)
for k in range(5):
rr=1/a[55+k]
szrk=np.sin(z*rr)
czrk=np.cos(z*rr)
sqpr=np.sqrt(rp**2+rr**2)
epr= np.exp(x*sqpr)
dbx=-sqpr*epr*cypi*szrk
dby= rp*epr*sypi*szrk
dbz=-rr*epr*cypi*czrk
coef=a[l]+a[l+1]*dshift
l += 2
dhx=dhx+coef*dbx
dhy=dhy+coef*dby
dhz=dhz+coef*dbz
return dhx,dhy,dhz
def birk_tot(iopb, ps, x,y,z):
"""
:param iopb: birkeland field mode flag:
iopb=0 - all components; iopb=1 - region 1, modes 1 & 2; iopb=2 - region 2, modes 1 & 2
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx11,by11,bz11, bx12,by12,bz12, bx21,by21,bz21, bx22,by22,bz22.
"""
# common /birkpar/ xkappa1,xkappa2 ! input parameters, specified from s/r extall
# common /dphi_b_rho0/ dphi,b,rho_0,xkappa ! parameters, controlling the day-night asymmetry of f.a.c.
global xkappa1, xkappa2
global dphi, b, rho_0, xkappa
sh11 = np.array([
46488.84663,-15541.95244,-23210.09824,-32625.03856,-109894.4551,
-71415.32808,58168.94612,55564.87578,-22890.60626,-6056.763968,
5091.368100,239.7001538,-13899.49253,4648.016991,6971.310672,
9699.351891,32633.34599,21028.48811,-17395.96190,-16461.11037,
7447.621471,2528.844345,-1934.094784,-588.3108359,-32588.88216,
10894.11453,16238.25044,22925.60557,77251.11274,50375.97787,
-40763.78048,-39088.60660,15546.53559,3559.617561,-3187.730438,
309.1487975,88.22153914,-243.0721938,-63.63543051,191.1109142,
69.94451996,-187.9539415,-49.89923833,104.0902848,-120.2459738,
253.5572433,89.25456949,-205.6516252,-44.93654156,124.7026309,
32.53005523,-98.85321751,-36.51904756,98.88241690,24.88493459,
-55.04058524,61.14493565,-128.4224895,-45.35023460,105.0548704,
-43.66748755,119.3284161,31.38442798,-92.87946767,-33.52716686,
89.98992001,25.87341323,-48.86305045,59.69362881,-126.5353789,
-44.39474251,101.5196856,59.41537992,41.18892281,80.86101200,
3.066809418,7.893523804,30.56212082,10.36861082,8.222335945,
19.97575641,2.050148531,4.992657093,2.300564232,.2256245602,-.05841594319])
sh12 = np.array([
210260.4816,-1443587.401,-1468919.281,281939.2993,-1131124.839,
729331.7943,2573541.307,304616.7457,468887.5847,181554.7517,
-1300722.650,-257012.8601,645888.8041,-2048126.412,-2529093.041,
571093.7972,-2115508.353,1122035.951,4489168.802,75234.22743,
823905.6909,147926.6121,-2276322.876,-155528.5992,-858076.2979,
3474422.388,3986279.931,-834613.9747,3250625.781,-1818680.377,
-7040468.986,-414359.6073,-1295117.666,-346320.6487,3565527.409,
430091.9496,-.1565573462,7.377619826,.4115646037,-6.146078880,
3.808028815,-.5232034932,1.454841807,-12.32274869,-4.466974237,
-2.941184626,-.6172620658,12.64613490,1.494922012,-21.35489898,
-1.652256960,16.81799898,-1.404079922,-24.09369677,-10.99900839,
45.94237820,2.248579894,31.91234041,7.575026816,-45.80833339,
-1.507664976,14.60016998,1.348516288,-11.05980247,-5.402866968,
31.69094514,12.28261196,-37.55354174,4.155626879,-33.70159657,
-8.437907434,36.22672602,145.0262164,70.73187036,85.51110098,
21.47490989,24.34554406,31.34405345,4.655207476,5.747889264,
7.802304187,1.844169801,4.867254550,2.941393119,.1379899178,.06607020029])
sh21 = np.array([
162294.6224,503885.1125,-27057.67122,-531450.1339,84747.05678,
-237142.1712,84133.61490,259530.0402,69196.05160,-189093.5264,
-19278.55134,195724.5034,-263082.6367,-818899.6923,43061.10073,
863506.6932,-139707.9428,389984.8850,-135167.5555,-426286.9206,
-109504.0387,295258.3531,30415.07087,-305502.9405,100785.3400,
315010.9567,-15999.50673,-332052.2548,54964.34639,-152808.3750,
51024.67566,166720.0603,40389.67945,-106257.7272,-11126.14442,
109876.2047,2.978695024,558.6019011,2.685592939,-338.0004730,
-81.99724090,-444.1102659,89.44617716,212.0849592,-32.58562625,
-982.7336105,-35.10860935,567.8931751,-1.917212423,-260.2023543,
-1.023821735,157.5533477,23.00200055,232.0603673,-36.79100036,
-111.9110936,18.05429984,447.0481000,15.10187415,-258.7297813,
-1.032340149,-298.6402478,-1.676201415,180.5856487,64.52313024,
209.0160857,-53.85574010,-98.52164290,14.35891214,536.7666279,
20.09318806,-309.7349530,58.54144539,67.45226850,97.92374406,
4.752449760,10.46824379,32.91856110,12.05124381,9.962933904,
15.91258637,1.804233877,6.578149088,2.515223491,.1930034238,-.02261109942])
sh22 = np.array([
-131287.8986,-631927.6885,-318797.4173,616785.8782,-50027.36189,
863099.9833,47680.20240,-1053367.944,-501120.3811,-174400.9476,
222328.6873,333551.7374,-389338.7841,-1995527.467,-982971.3024,
1960434.268,297239.7137,2676525.168,-147113.4775,-3358059.979,
-2106979.191,-462827.1322,1017607.960,1039018.475,520266.9296,
2627427.473,1301981.763,-2577171.706,-238071.9956,-3539781.111,
94628.16420,4411304.724,2598205.733,637504.9351,-1234794.298,
-1372562.403,-2.646186796,-31.10055575,2.295799273,19.20203279,
30.01931202,-302.1028550,-14.78310655,162.1561899,.4943938056,
176.8089129,-.2444921680,-100.6148929,9.172262228,137.4303440,
-8.451613443,-84.20684224,-167.3354083,1321.830393,76.89928813,
-705.7586223,18.28186732,-770.1665162,-9.084224422,436.3368157,
-6.374255638,-107.2730177,6.080451222,65.53843753,143.2872994,
-1028.009017,-64.22739330,547.8536586,-20.58928632,597.3893669,
10.17964133,-337.7800252,159.3532209,76.34445954,84.74398828,
12.76722651,27.63870691,32.69873634,5.145153451,6.310949163,
6.996159733,1.971629939,4.436299219,2.904964304,.1486276863,.06859991529])
xkappa=xkappa1 # forwarded in birk_1n2
x_sc=xkappa1-1.1 # forwarded in birk_shl
bx11,by11,bz11, bx12,by12,bz12, bx21,by21,bz21, bx22,by22,bz22 = [0]*12
if (iopb == 0) | (iopb == 1):
fx11,fy11,fz11 = birk_1n2(1,1,ps,x,y,z) # region 1, mode 1
hx11,hy11,hz11 = birk_shl(sh11,ps,x_sc,x,y,z)
bx11=fx11+hx11
by11=fy11+hy11
bz11=fz11+hz11
fx12,fy12,fz12 = birk_1n2(1,2,ps,x,y,z) # region 1, mode 2
hx12,hy12,hz12 = birk_shl(sh12,ps,x_sc,x,y,z)
bx12=fx12+hx12
by12=fy12+hy12
bz12=fz12+hz12
xkappa=xkappa2 # forwarded in birk_1n2
x_sc=xkappa2-1.0 # forwarded in birk_shl
if (iopb == 0) | (iopb == 2):
fx21,fy21,fz21 = birk_1n2(2,1,ps,x,y,z) # region 2, mode 1
hx21,hy21,hz21 = birk_shl(sh21,ps,x_sc,x,y,z)
bx21=fx21+hx21
by21=fy21+hy21
bz21=fz21+hz21
fx22,fy22,fz22 = birk_1n2(2,2,ps,x,y,z) # region 2, mode 2
hx22,hy22,hz22 = birk_shl(sh22,ps,x_sc,x,y,z)
bx22=fx22+hx22
by22=fy22+hy22
bz22=fz22+hz22
return bx11,by11,bz11, bx12,by12,bz12, bx21,by21,bz21, bx22,by22,bz22
def birk_1n2(numb,mode,ps,x,y,z): # NB# 6, p.60
"""
Calculates components of region 1/2 field in spherical coords. Derived from the s/r dipdef2c
(which does the same job, but input/output there was in spherical coords, while here we use cartesian ones)
:param numb: numb=1 (2) for region 1 (2) currents
:param mode: mode=1 yields simple sinusoidal mlt variation, with maximum current at dawn/dusk meridian
while mode=2 yields the second harmonic.
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# common /dphi_b_rho0/ dphi,b,rho_0,xkappa ! these parameters control day-night asymmetry of f.a.c., as follows:
# (1) dphi: half-difference (in radians) between day and night latitude of fac oval at ionospheric altitude; typical value: 0.06
# (2) b: an asymmetry factor at high-altitudes; for b=0, the only asymmetry is that from dphi; typical values: 0.35-0.70
# (3) rho_0: a fixed parameter, defining the distance rho, at which the latitude shift gradually saturates and stops increasing; its value was assumed fixed, equal to 7.0.
# (4) xkappa: an overall scaling factor, which can be used for changing the size of the f.a.c. oval
global dtheta, m, dphi, b, rho_0, xkappa
# parameters of the tilt-dependent deformation of the untilted F.A.C. field
beta = 0.9
rh = 10.
eps = 3.
b=0.5
rho_0=7.0
a11 = np.array([
.1618068350, -.1797957553, 2.999642482, -.9322708978, -.6811059760,
.2099057262, -8.358815746, -14.86033550, .3838362986, -16.30945494,
4.537022847, 2.685836007, 27.97833029, 6.330871059, 1.876532361,
18.95619213, .9651528100, .4217195118, -.08957770020, -1.823555887,
.7457045438, -.5785916524, -1.010200918, .01112389357, .09572927448,
-.3599292276, 8.713700514, .9763932955, 3.834602998, 2.492118385, .7113544659])
a12 = np.array([
.7058026940, -.2845938535, 5.715471266, -2.472820880, -.7738802408,
.3478293930, -11.37653694, -38.64768867, .6932927651, -212.4017288,
4.944204937, 3.071270411, 33.05882281, 7.387533799, 2.366769108,
79.22572682, .6154290178, .5592050551, -.1796585105, -1.654932210,
.7309108776, -.4926292779, -1.130266095, -.009613974555, .1484586169,
-.2215347198, 7.883592948, .02768251655, 2.950280953, 1.212634762, .5567714182])
a21 = np.array([
.1278764024, -.2320034273, 1.805623266, -32.37241440, -.9931490648,
.3175085630, -2.492465814, -16.21600096, .2695393416, -6.752691265,
3.971794901, 14.54477563, 41.10158386, 7.912889730, 1.258297372,
9.583547721, 1.014141963, .5104134759, -.1790430468, -1.756358428,
.7561986717, -.6775248254, -.04014016420, .01446794851, .1200521731,
-.2203584559, 4.508963850, .8221623576, 1.779933730, 1.102649543, .8867880020])
a22 = np.array([
.4036015198, -.3302974212, 2.827730930, -45.44405830, -1.611103927,
.4927112073, -.003258457559, -49.59014949, .3796217108, -233.7884098,
4.312666980, 18.05051709, 28.95320323, 11.09948019, .7471649558,
67.10246193, .5667096597, .6468519751, -.1560665317, -1.460805289,
.7719653528, -.6658988668, .2515179349E-05, .02426021891, .1195003324,
-.2625739255, 4.377172556, .2421190547, 2.503482679, 1.071587299, .7247997430])
m=mode
if numb == 1:
dphi=0.055
dtheta=0.06
elif numb == 2:
dphi=0.030
dtheta=0.09
else:
raise ValueError
xsc=x*xkappa
ysc=y*xkappa
zsc=z*xkappa
rho=np.sqrt(xsc**2+zsc**2)
rsc=np.sqrt(xsc**2+ysc**2+zsc**2) # scaled
rho2=rho_0**2
if (xsc == 0) & (zsc == 0):
phi=0.
else:
phi=np.arctan2(-zsc,xsc) # from cartesian to cylindrical (rho,phi,y)
sphic=np.sin(phi)
cphic=np.cos(phi) # "c" means "cylindrical", to distinguish from spherical phi
brack=dphi+b*rho2/(rho2+1)*(rho**2-1)/(rho2+rho**2)
r1rh=(rsc-1)/rh
psias=beta*ps/(1+r1rh**eps)**(1/eps)
phis=phi-brack*np.sin(phi) -psias
dphisphi=1-brack*np.cos(phi)
dphisrho=-2*b*rho2*rho/(rho2+rho**2)**2*np.sin(phi) \
+beta*ps*r1rh**(eps-1)*rho/(rh*rsc*(1+r1rh**eps)**(1/eps+1))
dphisdy= beta*ps*r1rh**(eps-1)*ysc/(rh*rsc*(1+r1rh**eps)**(1/eps+1))
sphics=np.sin(phis)
cphics=np.cos(phis)
xs= rho*cphics
zs=-rho*sphics
if numb ==1:
if mode == 1: [bxs,byas,bzs] = twocones(a11,xs,ysc,zs)
elif mode == 2: [bxs,byas,bzs] = twocones(a12,xs,ysc,zs)
else: raise ValueError
else:
if mode == 1: [bxs,byas,bzs] = twocones(a21,xs,ysc,zs)
elif mode == 2: [bxs,byas,bzs] = twocones(a22,xs,ysc,zs)
else: raise ValueError
brhoas = bxs*cphics-bzs*sphics
bphias = -bxs*sphics-bzs*cphics
brho_s=brhoas*dphisphi *xkappa # scaling
bphi_s=(bphias-rho*(byas*dphisdy+brhoas*dphisrho)) *xkappa
by_s=byas*dphisphi *xkappa
bx=brho_s*cphic-bphi_s*sphic
by=by_s
bz=-brho_s*sphic-bphi_s*cphic
return bx,by,bz
def twocones (a,x,y,z):
"""
Adds fields from two cones (northern and southern), with a proper symmetry of the current and field,
corresponding to the region 1 Birkeland currents. (NB #6, p.58).
:param a:
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
bxn,byn,bzn = one_cone(a,x, y, z)
bxs,bys,bzs = one_cone(a,x,-y,-z)
bx=bxn-bxs
by=byn+bys
bz=bzn+bzs
return bx,by,bz
def one_cone(a,x,y,z):
"""
Returns field components for a deformed conical current system, fitted to a Biosavart field.
Here only the northern cone is taken into account.
:param a: dimension a(31)
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# common /dtheta/ dtheta
# common /modenum/ m
global dtheta, m
# just for numerical differentiation
dr = 1e-6
dt = 1e-6
theta0=a[30]
rho2=x**2+y**2
rho=np.sqrt(rho2)
r=np.sqrt(rho2+z**2)
theta=np.arctan2(rho,z)
phi=np.arctan2(y,x)
# make the deformation of coordinates:
rs=r_s(a,r,theta)
thetas=theta_s(a,r,theta)
phis=phi
# calculate field components at the new position (asterisked):
btast,bfast = fialcos(rs,thetas,phis,m,theta0,dtheta) # mode #m
# now transform b{r,t,f}_ast by the deformation tensor:
# first of all, find the derivatives:
drsdr=(r_s(a,r+dr,theta)-r_s(a,r-dr,theta))/(2*dr)
drsdt=(r_s(a,r,theta+dt)-r_s(a,r,theta-dt))/(2*dt)
dtsdr=(theta_s(a,r+dr,theta)-theta_s(a,r-dr,theta))/(2*dr)
dtsdt=(theta_s(a,r,theta+dt)-theta_s(a,r,theta-dt))/(2*dt)
stsst=np.sin(thetas)/np.sin(theta)
rsr=rs/r
br =-rsr/r*stsst*btast*drsdt # NB#6, p.43 brast does not enter here
btheta = rsr*stsst*btast*drsdr # (it is identically zero in our case)
bphi = rsr*bfast*(drsdr*dtsdt-drsdt*dtsdr)
s=rho/r
c=z/r
sf=y/rho
cf=x/rho
be=br*s+btheta*c
bx=a[0]*(be*cf-bphi*sf)
by=a[0]*(be*sf+bphi*cf)
bz=a[0]*(br*c-btheta*s)
return bx,by,bz
def r_s(a,r,theta):
# dimension a(31)
return r+a[1]/r+a[2]*r/np.sqrt(r**2+a[10]**2)+a[3]*r/(r**2+a[11]**2) \
+(a[4]+a[5]/r+a[6]*r/np.sqrt(r**2+a[12]**2)+a[7]*r/(r**2+a[13]**2))*np.cos(theta) \
+(a[8]*r/np.sqrt(r**2+a[14]**2)+a[9]*r/(r**2+a[15]**2)**2)*np.cos(2*theta)
def theta_s(a,r,theta):
# dimension a(31)
return theta+(a[16]+a[17]/r+a[18]/r**2+a[19]*r/np.sqrt(r**2+a[26]**2))*np.sin(theta) \
+(a[20]+a[21]*r/np.sqrt(r**2+a[27]**2)+a[22]*r/(r**2+a[28]**2))*np.sin(2*theta) \
+(a[23]+a[24]/r+a[25]*r/(r**2+a[29]**2))*np.sin(3*theta)
def fialcos(r,theta,phi,n,theta0,dt):
"""
Conical model of Birkeland current field; based on the old s/r fialco (of 1990-91) NB of 1985-86-88,
note of March 5, but here both input and output are in spherical CDS.
:param r:
:param theta:
:param phi:
:param n:
:param theta0:
:param dt:
:return: btheta,bphi.
"""
# btn, and bpn are the arrays of btheta and bphi (btn(i), bpn(i) correspond to i-th mode).
# only first n mode amplitudes are computed (n<=10).
# theta0 is the angular half-width of the cone, dt is the angular h.-w. of the current layer
# note: br=0 (because only radial currents are present in this model)
# dimension btn(10),bpn(10),ccos(10),ssin(10)
btn = np.empty(10)
bpn = np.empty(10)
ccos = np.empty(10)
ssin = np.empty(10)
sinte=np.sin(theta)
ro=r*sinte
coste=np.cos(theta)
sinfi=np.sin(phi)
cosfi=np.cos(phi)
tg=sinte/(1+coste) # tan(theta/2)
ctg=sinte/(1-coste) # cot(theta/2)
tetanp=theta0+dt
tetanm=theta0-dt
if theta >= tetanm:
tgp=np.tan(tetanp*0.5)
tgm=np.tan(tetanm*0.5)
tgm2=tgm*tgm
tgp2=tgp*tgp
[cosm1, sinm1] = [1.,0]
tm = 1
[tgm2m,tgp2m] = [1.,1]
for m in range(1,n+1):
tm=tm*tg
ccos[m-1]=cosm1*cosfi-sinm1*sinfi
ssin[m-1]=sinm1*cosfi+cosm1*sinfi
cosm1=ccos[m-1]
sinm1=ssin[m-1]
if theta < tetanm:
t=tm
dtt=0.5*m*tm*(tg+ctg)
dtt0=0
elif theta < tetanp:
tgm2m=tgm2m*tgm2
fc=1/(tgp-tgm)
fc1=1/(2*m+1)
tgm2m1=tgm2m*tgm
tg21=1+tg*tg
t=fc*(tm*(tgp-tg)+fc1*(tm*tg-tgm2m1/tm))
dtt=0.5*m*fc*tg21*(tm/tg*(tgp-tg)-fc1*(tm-tgm2m1/(tm*tg)))
dtt0=0.5*fc*((tgp+tgm)*(tm*tg-fc1*(tm*tg-tgm2m1/tm))+tm*(1-tgp*tgm)-(1+tgm2)*tgm2m/tm)
else:
tgp2m=tgp2m*tgp2
tgm2m=tgm2m*tgm2
fc=1/(tgp-tgm)
fc1=1/(2*m+1)
t=fc*fc1*(tgp2m*tgp-tgm2m*tgm)/tm
dtt=-t*m*0.5*(tg+ctg)
btn[m-1]=m*t*ccos[m-1]/ro
bpn[m-1]=-dtt*ssin[m-1]/r
btheta=btn[n-1] *800.
bphi =bpn[n-1] *800.
return btheta, bphi
def birk_shl(a,ps,x_sc, x,y,z):
"""
B due to the Birkeland current shield.
:param a: coefficient.
:param ps: geo-dipole tilt angle in radius.
:param x_sc:
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
cps=np.cos(ps)
sps=np.sin(ps)
s3ps=2*cps
pst1=ps*a[84]
pst2=ps*a[85]
st1=np.sin(pst1)
ct1=np.cos(pst1)
st2=np.sin(pst2)
ct2=np.cos(pst2)
x1=x*ct1-z*st1
z1=x*st1+z*ct1
x2=x*ct2-z*st2
z2=x*st2+z*ct2
l=0
[bx,by,bz] = [0,0,0]
for m in range(1,3): # m=1 is for the 1st sum ("perp." symmetry) and m=2 is for the second sum ("parall." symmetry)
for i in range(1,4):
p = a[71 + i]
q = a[77 + i]
cypi = np.cos(y/p)
cyqi = np.cos(y/q)
sypi = np.sin(y/p)
syqi = np.sin(y/q)
for k in range(1,4):
r=a[74+k]
s=a[80+k]
szrk=np.sin(z1/r)
czsk=np.cos(z2/s)
czrk=np.cos(z1/r)
szsk=np.sin(z2/s)
sqpr=np.sqrt(1/p**2+1/r**2)
sqqs=np.sqrt(1/q**2+1/s**2)
epr=np.exp(x1*sqpr)
eqs=np.exp(x2*sqqs)
for n in range(1,3): # n=1 is for the first part of each coefficient and n=2 is for the second one
for nn in range(1,3): # nn = 1,2 further splits the coefficients into 2 parts, to take into account the scale factor dependence
if m == 1:
fx = -sqpr*epr*cypi*szrk
fy = epr*sypi*szrk/p
fz = -epr*cypi*czrk/r
if n == 1:
if nn == 1:
[hx,hy,hz] = [fx,fy,fz]
else:
[hx,hy,hz] = [fx*x_sc, fy*x_sc, fz*x_sc]
else:
if nn == 1:
[hx,hy,hz] = [fx*cps, fy*cps, fz*cps]
else:
[hx,hy,hz] = [fx*cps*x_sc, fy*cps*x_sc, fz*cps*x_sc]
else: # m == 2
fx = -sps*sqqs*eqs*cyqi*czsk
fy = sps/q*eqs*syqi*czsk
fz = sps/s*eqs*cyqi*szsk
if n == 1:
if nn == 1:
[hx,hy,hz] = [fx,fy,fz]
else:
[hx,hy,hz] = [fx*x_sc, fy*x_sc, fz*x_sc]
else:
if nn == 1:
[hx,hy,hz] = [fx*s3ps,fy*s3ps,fz*s3ps]
else:
[hx,hy,hz] = [fx*s3ps*x_sc, fy*s3ps*x_sc, fz*s3ps*x_sc]
l=l+1
if m == 1:
hxr = hx*ct1+hz*st1
hzr = -hx*st1+hz*ct1
else:
hxr = hx*ct2+hz*st2
hzr = -hx*st2+hz*ct2
bx = bx+hxr*a[l-1]
by = by+hy *a[l-1]
bz = bz+hzr*a[l-1]
return bx,by,bz
def full_rc(iopr,ps,x,y,z):
"""
Calculates GSM field components of the symmetric (src) and partial (prc) components of the ring current
:param iopr: a ring current calculation flag (for least-squares fitting only):
iopr=0 - both src and prc fields are calculated; opr=1 - src only; opr=2 - prc only
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return:
"""
# src provides a depression of -28 nt at earth
# prc corresponds to the pressure difference of 2 npa between midnight and noon ring current particle pressure and yields a depression of -17 nt at x=-6re
# sc_sy and sc_pr are scaling factors for the symmetric and partial components: values larger than 1 result in spatially larger currents
# phi is the rotation angle in radians of the partial ring current (measured from midnight toward dusk)
# common /rcpar/ sc_sy,sc_pr,phi
global sc_sy, sc_pr, phi
# corrected values(as of may 2006)
c_sy = np.array([ # sy short for symmetric
-957.2534900,-817.5450246,583.2991249,758.8568270,
13.17029064,68.94173502,-15.29764089,-53.43151590,27.34311724,
149.5252826,-11.00696044,-179.7031814,953.0914774,817.2340042,
-581.0791366,-757.5387665,-13.10602697,-68.58155678,15.22447386,
53.15535633,-27.07982637,-149.1413391,10.91433279,179.3251739,
-6.028703251,1.303196101,-1.345909343,-1.138296330,-0.06642634348,
-0.3795246458,.07487833559,.2891156371,-.5506314391,-.4443105812,
0.2273682152,0.01086886655,-9.130025352,1.118684840,1.110838825,
.1219761512,-.06263009645,-.1896093743,.03434321042,.01523060688,
-.4913171541,-.2264814165,-.04791374574,.1981955976,-68.32678140,
-48.72036263,14.03247808,16.56233733,2.369921099,6.200577111,
-1.415841250,-0.8184867835,-3.401307527,-8.490692287,3.217860767,
-9.037752107,66.09298105,48.23198578,-13.67277141,-16.27028909,
-2.309299411,-6.016572391,1.381468849,0.7935312553,3.436934845,
8.260038635,-3.136213782,8.833214943,8.041075485,8.024818618,
35.54861873,12.55415215,1.738167799,3.721685353,23.06768025,
6.871230562,6.806229878,21.35990364,1.687412298,3.500885177,
0.3498952546,0.6595919814 ])
c_pr = np.array([ # pr short for partial
-64820.58481, -63965.62048, 66267.93413, 135049.7504, -36.56316878,
124.6614669, 56.75637955, -87.56841077, 5848.631425, 4981.097722,
-6233.712207, -10986.40188, 68716.52057, 65682.69473, -69673.32198,
-138829.3568, 43.45817708, -117.9565488, -62.14836263, 79.83651604,
-6211.451069, -5151.633113, 6544.481271, 11353.03491, 23.72352603,
-256.4846331, 25.77629189, 145.2377187, -4.472639098, -3.554312754,
2.936973114, 2.682302576, 2.728979958, 26.43396781, -9.312348296,
-29.65427726, -247.5855336, -206.9111326, 74.25277664, 106.4069993,
15.45391072, 16.35943569, -5.965177750, -6.079451700, 115.6748385,
-35.27377307, -32.28763497, -32.53122151, 93.74409310, 84.25677504,
-29.23010465, -43.79485175, -6.434679514, -6.620247951, 2.443524317,
2.266538956, -43.82903825, 6.904117876, 12.24289401, 17.62014361,
152.3078796, 124.5505289, -44.58690290, -63.02382410, -8.999368955,
-9.693774119, 3.510930306, 3.770949738, -77.96705716, 22.07730961,
20.46491655, 18.67728847, 9.451290614, 9.313661792, 644.7620970,
418.2515954, 7.183754387, 35.62128817, 19.43180682, 39.57218411,
15.69384715, 7.123215241, 2.300635346, 21.90881131, -.01775839370, .3996346710])
hxsrc,hysrc,hzsrc, hxprc,hyprc,hzprc = src_prc(iopr, sc_sy,sc_pr, phi, ps, x,y,z)
x_sc=sc_sy-1
fsx,fsy,fsz = [0.]*3
if (iopr == 0) | (iopr == 1):
fsx,fsy,fsz = rc_shield(c_sy,ps,x_sc, x,y,z)
x_sc=sc_pr-1
fpx,fpy,fpz = [0.]*3
if (iopr == 0) | (iopr == 2):
fpx,fpy,fpz = rc_shield(c_pr,ps,x_sc, x,y,z)
bxsrc=hxsrc+fsx
bysrc=hysrc+fsy
bzsrc=hzsrc+fsz
bxprc=hxprc+fpx
byprc=hyprc+fpy
bzprc=hzprc+fpz
return bxsrc,bysrc,bzsrc,bxprc,byprc,bzprc
def src_prc(iopr,sc_sy,sc_pr,phi,ps, x,y,z):
"""
Returns field components from a model ring current, including its symmetric part and a partial ring current,
closed via birkeland currents. based on results, described in a paper "modeling the inner magnetosphere:
asymmetric ring current and region 2 birkeland currents revisited" (jgr, dec.2000).
:param iopr: a ring current calculation flag (for least-squares fitting only):
iopr=0 - both src and prc fields are calculated; opr=1 - src only; opr=2 - prc only
:param sc_sy, sc_pr: scale factors for the above components; taking sc<1 or sc>1 makes the currents shrink or expand, respectively.
:param phi: the rotation angle (radians) of the partial ring current (measured from midnight toward dusk)
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bxsrc,bysrc,bzsrc, bxprc,byprc,bzprc. Field components in GSM system, in nT. For the symmetric part and partial ring current.
"""
# 1. transform to tilted coordinates (i.e., sm coordinates):
cps=np.cos(ps)
sps=np.sin(ps)
xt=x*cps-z*sps
zt=z*cps+x*sps
# 2. scale the coordinates for the symmetric and partial rc components:
xts=xt/sc_sy # symmetric
yts=y /sc_sy
zts=zt/sc_sy
xta=xt/sc_pr # partial
yta=y /sc_pr
zta=zt/sc_pr
# 3. calculate components of the total field in the tilted (solar-magnetic) coordinate system:
# only for least squares fitting:
bxs,bys,bzs = [0.]*3
bxa_s,bya_s,bza_s = [0.]*3
bxa_qr,bya_qr,bza_q = [0.]*3
# 3a. symmetric field:
if iopr <= 1:
bxs,bys,bzs = rc_symm(xts,yts,zts)
if (iopr == 0) | (iopr == 2):
bxa_s,bya_s,bza_s = prc_symm(xta,yta,zta)
# 3b. rotate the scaled sm coordinates by phi around zsm axis and calculate quadrupole prc field in those coords:
cp=np.cos(phi)
sp=np.sin(phi)
xr=xta*cp-yta*sp
yr=xta*sp+yta*cp
if (iopr == 0) | (iopr == 2):
bxa_qr,bya_qr,bza_q = prc_quad(xr,yr,zta)
# 3c.transform the quadrupole field components back to the sm coords:
bxa_q= bxa_qr*cp+bya_qr*sp
bya_q=-bxa_qr*sp+bya_qr*cp
# 3d. find the total field of prc (symm.+quadr.) in the sm coords:
bxp=bxa_s+bxa_q
byp=bya_s+bya_q
bzp=bza_s+bza_q
# 4. transform the fields of both parts of the ring current back to the gsm system:
bxsrc=bxs*cps+bzs*sps # symmetric rc
bysrc=bys
bzsrc=bzs*cps-bxs*sps
bxprc=bxp*cps+bzp*sps # partial rc
byprc=byp
bzprc=bzp*cps-bxp*sps
return bxsrc,bysrc,bzsrc, bxprc,byprc,bzprc
def rc_symm(x,y,z):
"""
Calculates the field components from a model ring current, due to its symmetric part.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# ds=sin(theta) at the boundary of the linearity region; dc=sqrt(1-ds**2); drd=1/(2*d)
ds = 1e-2
dc = 0.99994999875
d = 1e-4
drd = 5e3
rho2=x**2+y**2
r2=rho2+z**2
r=np.sqrt(r2)
rp=r+d
rm=r-d
sint=np.sqrt(rho2)/r
cost=z/r
# too close to the z-axis; using a linear approximation a_phi~sint to avoid the singularity problem
if sint < ds:
a=ap(r,ds,dc)/ds
dardr=(rp*ap(rp,ds,dc)-rm*ap(rm,ds,dc))*drd
fxy=z*(2*a-dardr)/(r*r2)
bx=fxy*x
by=fxy*y
bz=(2*a*cost**2+dardr*sint**2)/r
else:
theta=np.arctan2(sint,cost)
tp=theta+d
tm=theta-d
sintp=np.sin(tp)
sintm=np.sin(tm)
costp=np.cos(tp)
costm=np.cos(tm)
br=(sintp*ap(r,sintp,costp)-sintm*ap(r,sintm,costm))/(r*sint)*drd
bt=(rm*ap(rm,sint,cost)-rp*ap(rp,sint,cost))/r*drd
fxy=(br+bt*cost/sint)/r
bx=fxy*x
by=fxy*y
bz=br*cost-bt*sint
return bx, by, bz
def ap(r,sint,cost):
"""
Calculates azimuthal component of the vector potential of the symmetric part of the model ring current.
:param r:
:param sint:
:param cost:
:return:
"""
# Updated 04/20/06 (nb#9, p.37)
a1,a2,rrc1,dd1,rrc2,dd2,p1,r1,dr1,dla1,p2,r2,dr2,dla2,p3,r3,dr3 = [
-456.5289941,375.9055332,4.274684950,2.439528329,3.367557287,
3.146382545,-0.2291904607,3.746064740,1.508802177,0.5873525737,
0.1556236119,4.993638842,3.324180497,0.4368407663,0.1855957207,
2.969226745,2.243367377]
# indicates whether we are too close to the axis of symmetry, where the inversion of dipolar coordinates becomes inaccurate
prox = False
sint1=sint
cost1=cost
# too close to z-axis; use linear interpolation between sint=0 & sint=0.01
if (sint1 < 1.e-2):
sint1=1.e-2
cost1=0.99994999875
prox=True
alpha=sint1**2/r # r,theta -> alpha,gamma
gamma=cost1/r**2
arg1=-((r-r1)/dr1)**2-(cost1/dla1)**2
arg2=-((r-r2)/dr2)**2-(cost1/dla2)**2
arg3=-((r-r3)/dr3)**2
if arg1 < -500: # to prevent "floating underflow" crashes
dexp1=0.
else:
dexp1=np.exp(arg1)
if arg2 < -500: # to prevent "floating underflow" crashes
dexp2=0.
else:
dexp2=np.exp(arg2)
if arg3 < -500: # to prevent "floating underflow" crashes
dexp3=0.
else:
dexp3=np.exp(arg3)
# alpha -> alpha_s (deformed)
alpha_s=alpha*(1+p1*dexp1+p2*dexp2+p3*dexp3)
gamma_s=gamma
gammas2=gamma_s**2
# alpha_s,gamma_s -> rs,sints,costs
alsqh=alpha_s**2/2
f=64/27*gammas2+alsqh**2
q=(np.sqrt(f)+alsqh)**(1/3)
c=q-4*gammas2**(1/3)/(3*q)
if c < 0: c=0
g=np.sqrt(c**2+4*gammas2**(1/3))
rs=4/((np.sqrt(2*g-c)+np.sqrt(c))*(g+c))
costs=gamma_s*rs**2
sints=np.sqrt(1-costs**2)
rhos=rs*sints
rhos2=rhos**2
zs=rs*costs
# TODO looks like this part is repetative.
p=(rrc1+rhos)**2+zs**2+dd1**2
xk2=4*rrc1*rhos/p
xk=np.sqrt(xk2)
xkrho12=xk*np.sqrt(rhos) # see nb#4, p.3
xk2s = 1-xk2
dl = np.log(1/xk2s)
elk = 1.38629436112 + xk2s*(0.09666344259+xk2s*(0.03590092383+xk2s*(0.03742563713+xk2s*0.01451196212))) \
+ dl*(0.5+xk2s*(0.12498593597+xk2s*(0.06880248576+xk2s*(0.03328355346+xk2s*0.00441787012))))
ele = 1+xk2s*(0.44325141463+xk2s*(0.0626060122+xk2s*(0.04757383546+xk2s*0.01736506451))) \
+ dl*xk2s*(0.2499836831+xk2s*(0.09200180037+xk2s*(0.04069697526+xk2s*0.00526449639)))
aphi1=((1-xk2*0.5)*elk-ele)/xkrho12
p=(rrc2+rhos)**2+zs**2+dd2**2
xk2=4*rrc2*rhos/p
xk=np.sqrt(xk2)
xkrho12=xk*np.sqrt(rhos) # see nb#4, p.3
xk2s = 1-xk2
dl = np.log(1/xk2s)
elk = 1.38629436112 + xk2s*(0.09666344259+xk2s*(0.03590092383+xk2s*(0.03742563713+xk2s*0.01451196212))) \
+ dl*(0.5+xk2s*(0.12498593597+xk2s*(0.06880248576+xk2s*(0.03328355346+xk2s*0.00441787012))))
ele = 1+xk2s*(0.44325141463+xk2s*(0.0626060122+xk2s*(0.04757383546+xk2s*0.01736506451))) \
+ dl*xk2s*(0.2499836831+xk2s*(0.09200180037+xk2s*(0.04069697526+xk2s*0.00526449639)))
aphi2=((1-xk2*0.5)*elk-ele)/xkrho12
ap=a1*aphi1+a2*aphi2
if prox:
ap=ap*sint/sint1 # linear interpolation, if too close to the z-axis
return ap
def prc_symm(x,y,z):
"""
Calculates the field components from a model ring current, due to a partial ring current.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# ds=sin(theta) at the boundary of the linearity region; dc=sqrt(1-ds**2); drd=1/(2*d)
ds = 1e-2
dc = 0.99994999875
d = 1e-4
drd = 5e3
rho2=x**2+y**2
r2=rho2+z**2
r=np.sqrt(r2)
rp=r+d
rm=r-d
sint=np.sqrt(rho2)/r
cost=z/r
# too close to the z-axis; using a linear approximation a_phi~sint to avoid the singularity problem
if sint < ds:
a=apprc(r,ds,dc)/ds
dardr=(rp*apprc(rp,ds,dc)-rm*apprc(rm,ds,dc))*drd
fxy=z*(2*a-dardr)/(r*r2)
bx=fxy*x
by=fxy*y
bz=(2*a*cost**2+dardr*sint**2)/r
else:
theta=np.arctan2(sint,cost)
tp=theta+d
tm=theta-d
sintp=np.sin(tp)
sintm=np.sin(tm)
costp=np.cos(tp)
costm=np.cos(tm)
br=(sintp*apprc(r,sintp,costp)-sintm*apprc(r,sintm,costm))/(r*sint)*drd
bt=(rm*apprc(rm,sint,cost)-rp*apprc(rp,sint,cost))/r*drd
fxy=(br+bt*cost/sint)/r
bx=fxy*x
by=fxy*y
bz=br*cost-bt*sint
return bx, by, bz
def apprc(r,sint,cost):
"""
Calculates azimuthal component of the vector potential of the symmetric part of the model partial ring current.
:param r:
:param sint:
:param cost:
:return:
"""
a1,a2,rrc1,dd1,rrc2,dd2,p1,alpha1,dal1,beta1,dg1,p2,alpha2,dal2,beta2,dg2,beta3,p3, \
alpha3,dal3,beta4,dg3,beta5,q0,q1,alpha4,dal4,dg4,q2,alpha5,dal5,dg5,beta6,beta7 = [
-80.11202281,12.58246758,6.560486035,1.930711037,3.827208119,
.7789990504,.3058309043,.1817139853,.1257532909,3.422509402,
.04742939676,-4.800458958,-.02845643596,.2188114228,2.545944574,
.00813272793,.35868244,103.1601001,-.00764731187,.1046487459,
2.958863546,.01172314188,.4382872938,.01134908150,14.51339943,
.2647095287,.07091230197,.01512963586,6.861329631,.1677400816,
.04433648846,.05553741389,.7665599464,.7277854652]
prox=False
sint1=sint
cost1=cost
# too close to z-axis; use linear interpolation between sint=0 & sint=0.01
if (sint1 < 1.e-2):
sint1=1.e-2
cost1=0.99994999875
prox=True
alpha=sint1**2/r # r,theta -> alpha,gamma
gamma=cost1/r**2
arg1=-(gamma/dg1)**2
arg2=-((alpha-alpha4)/dal4)**2-(gamma/dg4)**2
if arg1 < -500: # to prevent "floating underflow" crashes
dexp1=0.
else:
dexp1=np.exp(arg1)
if arg2 < -500: # to prevent "floating underflow" crashes
dexp2=0.
else:
dexp2=np.exp(arg2)
# alpha -> alpha_s (deformed)
alpha_s = alpha*(1 + p1/(1+((alpha-alpha1)/dal1)**2)**beta1*dexp1
+ p2*(alpha-alpha2)/(1+((alpha-alpha2)/dal2)**2)**beta2/(1+(gamma/dg2)**2)**beta3
+ p3*(alpha-alpha3)**2/(1.+((alpha-alpha3)/dal3)**2)**beta4/(1+(gamma/dg3)**2)**beta5)
# gamma -> gamma_s (deformed)
gamma_s = gamma*(1 + q0 + q1*(alpha-alpha4)*dexp2
+ q2*(alpha-alpha5)/(1+((alpha-alpha5)/dal5)**2)**beta6/(1+(gamma/dg5)**2)**beta7)
gammas2 = gamma_s**2
# alpha_s,gamma_s -> rs,sints,costs
alsqh=alpha_s**2/2.
f=64./27.*gammas2+alsqh**2
q=(np.sqrt(f)+alsqh)**(1/3)
c=q-4.*gammas2**(1/3)/(3.*q)
if c < 0: c=0
g=np.sqrt(c**2+4*gammas2**(1/3))
rs=4./((np.sqrt(2*g-c)+np.sqrt(c))*(g+c))
costs=gamma_s*rs**2
sints=np.sqrt(1-costs**2)
rhos=rs*sints
rhos2=rhos**2
zs=rs*costs
# TODO looks like this part is repetative.
p=(rrc1+rhos)**2+zs**2+dd1**2
xk2=4*rrc1*rhos/p
xk=np.sqrt(xk2)
xkrho12=xk*np.sqrt(rhos) # see nb#4, p.3
xk2s = 1-xk2
dl = np.log(1/xk2s)
elk = 1.38629436112 + xk2s*(0.09666344259+xk2s*(0.03590092383+xk2s*(0.03742563713+xk2s*0.01451196212))) \
+ dl*(0.5+xk2s*(0.12498593597+xk2s*(0.06880248576+xk2s*(0.03328355346+xk2s*0.00441787012))))
ele = 1 + xk2s*(0.44325141463+xk2s*(0.0626060122+xk2s*(0.04757383546+xk2s*0.01736506451))) \
+ dl*xk2s*(0.2499836831+xk2s*(0.09200180037+xk2s*(0.04069697526+xk2s*0.00526449639)))
aphi1=((1-xk2*0.5)*elk-ele)/xkrho12
p=(rrc2+rhos)**2+zs**2+dd2**2
xk2=4*rrc2*rhos/p
xk=np.sqrt(xk2)
xkrho12=xk*np.sqrt(rhos) # see nb#4, p.3
xk2s = 1-xk2
dl = np.log(1/xk2s)
elk = 1.38629436112 + xk2s*(0.09666344259+xk2s*(0.03590092383+xk2s*(0.03742563713+xk2s*0.01451196212))) \
+ dl*(0.5+xk2s*(0.12498593597+xk2s*(0.06880248576+xk2s*(0.03328355346+xk2s*0.00441787012))))
ele = 1 + xk2s*(0.44325141463+xk2s*(0.0626060122+xk2s*(0.04757383546+xk2s*0.01736506451))) \
+ dl*xk2s*(0.2499836831+xk2s*(0.09200180037+xk2s*(0.04069697526+xk2s*0.00526449639)))
aphi2=((1-xk2*0.5)*elk-ele)/xkrho12
apprc=a1*aphi1+a2*aphi2
if prox:
apprc=apprc*sint/sint1 # linear interpolation, if too close to the z-axis
return apprc
def prc_quad(x,y,z):
"""
Calculates components of the field from the "quadrupole" component of the partial ring current.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
d = 1e-4
dd = 2e-4
ds = 1e-2
dc = 0.99994999875
rho2=x**2+y**2
r=np.sqrt(rho2+z**2)
rho=np.sqrt(rho2)
sint=rho/r
cost=z/r
rp=r+d
rm=r-d
if sint > ds:
cphi=x/rho
sphi=y/rho
br=br_prc_q(r,sint,cost)
bt=bt_prc_q(r,sint,cost)
dbrr=(br_prc_q(rp,sint,cost)-br_prc_q(rm,sint,cost))/dd
theta=np.arctan2(sint,cost)
tp=theta+d
tm=theta-d
sintp=np.sin(tp)
costp=np.cos(tp)
sintm=np.sin(tm)
costm=np.cos(tm)
dbtt=(bt_prc_q(r,sintp,costp)-bt_prc_q(r,sintm,costm))/dd
bx=sint*(br+(br+r*dbrr+dbtt)*sphi**2)+cost*bt
by=-sint*sphi*cphi*(br+r*dbrr+dbtt)
bz=(br*cost-bt*sint)*cphi
else:
st=ds
ct=dc
if z < 0: ct=-dc
theta=np.arctan2(st,ct)
tp=theta+d
tm=theta-d
sintp=np.sin(tp)
costp=np.cos(tp)
sintm=np.sin(tm)
costm=np.cos(tm)
br=br_prc_q(r,st,ct)
bt=bt_prc_q(r,st,ct)
dbrr=(br_prc_q(rp,st,ct)-br_prc_q(rm,st,ct))/dd
dbtt=(bt_prc_q(r,sintp,costp)-bt_prc_q(r,sintm,costm))/dd
fcxy=r*dbrr+dbtt
bx=(br*(x**2+2.*y**2)+fcxy*y**2)/(r*st)**2+bt*cost
by=-(br+fcxy)*x*y/(r*st)**2
bz=(br*cost/st-bt)*x/r
return bx,by,bz
def br_prc_q(r,sint,cost):
"""
Calculates the radial component of the "quadrupole" part of the model partial ring current.
:param r:
:param sint:
:param cost:
:return:
"""
a1 = -21.2666329
a2 = 32.24527521
a3 = -6.062894078
a4 = 7.515660734
a5 = 233.7341288
a6 = -227.1195714
a7 = 8.483233889
a8 = 16.80642754
a9 = -24.63534184
a10 = 9.067120578
a11 = -1.052686913
a12 = -12.08384538
a13 = 18.61969572
a14 = -12.71686069
a15 = 47017.35679
a16 = -50646.71204
a17 = 7746.058231
a18 = 1.531069371
xk1 = 2.318824273
al1 = 0.1417519429
dal1 = 0.6388013110e-02
b1 = 5.303934488
be1 = 4.213397467
xk2 = 0.7955534018
al2 = 0.1401142771
dal2 = 0.2306094179e-01
b2 = 3.462235072
be2 = 2.568743010
xk3 = 3.477425908
xk4 = 1.922155110
al3 = 0.1485233485
dal3 = 0.2319676273e-01
b3 = 7.830223587
be3 = 8.492933868
al4 = 0.1295221828
dal4 = 0.01753008801
dg1 = 0.01125504083
al5 = 0.1811846095
dal5 = 0.04841237481
dg2 = 0.01981805097
c1 = 6.557801891
c2 = 6.348576071
c3 = 5.744436687
al6 = 0.2265212965
dal6 = 0.1301957209
drm = 0.5654023158
sint2=sint**2
cost2=cost**2
sc=sint*cost
alpha=sint2/r
gamma=cost/r**2
f,fa,fs = ffs(alpha,al1,dal1)
d1=sc*f**xk1/((r/b1)**be1+1.)
d2=d1*cost2
f,fa,fs = ffs(alpha,al2,dal2)
d3=sc*fs**xk2/((r/b2)**be2+1.)
d4=d3*cost2
f,fa,fs = ffs(alpha,al3,dal3)
d5=sc*(alpha**xk3)*(fs**xk4)/((r/b3)**be3+1.)
d6=d5*cost2
arga=((alpha-al4)/dal4)**2+1.
argg=1.+(gamma/dg1)**2
d7=sc/arga/argg
d8=d7/arga
d9=d8/arga
d10=d9/arga
arga=((alpha-al5)/dal5)**2+1.
argg=1.+(gamma/dg2)**2
d11=sc/arga/argg
d12=d11/arga
d13=d12/arga
d14=d13/arga
d15=sc/(r**4+c1**4)
d16=sc/(r**4+c2**4)*cost2
d17=sc/(r**4+c3**4)*cost2**2
f,fa,fs = ffs(alpha,al6,dal6)
d18=sc*fs/(1.+((r-1.2)/drm)**2)
br_prc_q=a1*d1+a2*d2+a3*d3+a4*d4+a5*d5+a6*d6+a7*d7+a8*d8+a9*d9+ \
a10*d10+a11*d11+a12*d12+a13*d13+a14*d14+a15*d15+a16*d16+a17*d17+a18*d18
return br_prc_q
def bt_prc_q(r,sint,cost):
"""
Calculates the theta component of the "quadrupole" part of the model partial ring current.
:param r:
:param sint:
:param cost:
:return:
"""
# all linear parameters here were multiplied by 0.1, so that they correspond to p_0=1 npa,
# rather than the original value of 10 npa assumed in the biot-savart integral.
a1 = 12.74640393
a2 = -7.516393516
a3 = -5.476233865
a4 = 3.212704645
a5 = -59.10926169
a6 = 46.62198189
a7 = -.01644280062
a8 = 0.1234229112
a9 = -.08579198697
a10 = 0.01321366966
a11 = 0.8970494003
a12 = 9.136186247
a13 = -38.19301215
a14 = 21.73775846
a15 = -410.0783424
a16 = -69.90832690
a17 = -848.8543440
xk1 = 1.243288286
al1 = 0.2071721360
dal1 = 0.05030555417
b1 = 7.471332374
be1 = 3.180533613
xk2 = 1.376743507
al2 = 0.1568504222
dal2 = 0.02092910682
be2 = 1.985148197
xk3 = 0.3157139940
xk4 = 1.056309517
al3 = 0.1701395257
dal3 = 0.1019870070
b3 = 6.293740981
be3 = 5.671824276
al4 = 0.1280772299
dal4 = 0.02189060799
dg1 = 0.01040696080
al5 = 0.1648265607
dal5 = 0.04701592613
dg2 = 0.01526400086
c1 = 12.88384229
c2 = 3.361775101
c3 = 23.44173897
sint2=sint**2
cost2=cost**2
sc=sint*cost
alpha=sint2/r
gamma=cost/r**2
f,fa,fs = ffs(alpha,al1,dal1)
d1=f**xk1/((r/b1)**be1+1.)
d2=d1*cost2
f,fa,fs = ffs(alpha,al2,dal2)
d3=fa**xk2/r**be2
d4=d3*cost2
f,fa,fs = ffs(alpha,al3,dal3)
d5=fs**xk3*alpha**xk4/((r/b3)**be3+1.)
d6=d5*cost2
f,fa,fs = ffs(gamma,0.,dg1)
fcc=(1.+((alpha-al4)/dal4)**2)
d7 =1./fcc*fs
d8 =d7/fcc
d9 =d8/fcc
d10=d9/fcc
arg=1.+((alpha-al5)/dal5)**2
d11=1./arg/(1.+(gamma/dg2)**2)
d12=d11/arg
d13=d12/arg
d14=d13/arg
d15=1./(r**4+c1**2)
d16=cost2/(r**4+c2**2)
d17=cost2**2/(r**4+c3**2)
bt_prc_q = a1*d1+a2*d2+a3*d3+a4*d4+a5*d5+a6*d6+a7*d7+a8*d8+a9*d9+ \
a10*d10+a11*d11+a12*d12+a13*d13+a14*d14+a15*d15+a16*d16+a17*d17
return bt_prc_q
def ffs(a, a0, da):
sq1 = np.sqrt((a + a0) ** 2 + da ** 2)
sq2 = np.sqrt((a - a0) ** 2 + da ** 2)
fa = 2. / (sq1 + sq2)
f = fa * a
fs = 0.5 * (sq1 + sq2) / (sq1 * sq2) * (1.-f * f)
return f, fa, fs
def rc_shield(a,ps,x_sc,x,y,z):
"""
B due to the ring current shield.
:param a: coefficient.
:param ps: geo-dipole tilt angle in radius.
:param x_sc: scaling factors.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
fac_sc = (x_sc+1)**3
cps = np.cos(ps)
sps = np.sin(ps)
s3ps=2*cps
pst1=ps*a[84]
pst2=ps*a[85]
st1=np.sin(pst1)
ct1=np.cos(pst1)
st2=np.sin(pst2)
ct2=np.cos(pst2)
x1=x*ct1-z*st1
z1=x*st1+z*ct1
x2=x*ct2-z*st2
z2=x*st2+z*ct2
l=0
[bx,by,bz] = [0.]*3
for m in range(2): # m=1 is for the 1st sum ("perp." symmetry) and m=2 is for the second sum ("parall." symmetry)
for i in range(3):
p=a[72+i]
q=a[78+i]
cypi=np.cos(y/p)
cyqi=np.cos(y/q)
sypi=np.sin(y/p)
syqi=np.sin(y/q)
for k in range(3):
r=a[75+k]
s=a[81+k]
szrk=np.sin(z1/r)
czsk=np.cos(z2/s)
czrk=np.cos(z1/r)
szsk=np.sin(z2/s)
sqpr=np.sqrt(1/p**2+1/r**2)
sqqs=np.sqrt(1/q**2+1/s**2)
epr=np.exp(x1*sqpr)
eqs=np.exp(x2*sqqs)
for n in range(2): # n=1 is for the first part of each coefficient and n=2 is for the second one
for nn in range(2): # nn = 1,2 further splits the coefficients into 2 parts, to take into account the scale factor dependence
if m == 0:
fx = -sqpr*epr*cypi*szrk*fac_sc
fy = epr*sypi*szrk/p *fac_sc
fz = -epr*cypi*czrk/r *fac_sc
if n == 0:
if nn == 0:
[hx,hy,hz] = [fx,fy,fz]
else:
[hx,hy,hz] = [fx*x_sc, fy*x_sc, fz*x_sc]
else:
if nn == 0:
[hx,hy,hz] = [fx*cps, fy*cps, fz*cps]
else:
[hx,hy,hz] = [fx*cps*x_sc, fy*cps*x_sc, fz*cps*x_sc]
else: # m == 2
fx = -sps*sqqs*eqs*cyqi*czsk*fac_sc
fy = sps/q*eqs*syqi*czsk *fac_sc
fz = sps/s*eqs*cyqi*szsk *fac_sc
if n == 0:
if nn == 0:
[hx,hy,hz] = [fx,fy,fz]
else:
[hx,hy,hz] = [fx*x_sc,fy*x_sc,fz*x_sc]
else:
if nn == 0:
[hx,hy,hz] = [fx*s3ps,fy*s3ps,fz*s3ps]
else:
[hx,hy,hz] = [fx*s3ps*x_sc, fy*s3ps*x_sc, fz*s3ps*x_sc]
if m == 0:
hxr = hx*ct1+hz*st1
hzr = -hx*st1+hz*ct1
else:
hxr = hx*ct2+hz*st2
hzr = -hx*st2+hz*ct2
bx = bx+hxr*a[l]
by = by+hy *a[l]
bz = bz+hzr*a[l]
l=l+1
return bx, by, bz
def dipole(ps, x,y,z):
"""
Calculates GSM components of a geo-dipole field with the dipole moment corresponding to the epoch of 2000.
:param ps: geo-dipole tilt angle in radius.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
"""
sps = np.sin(ps)
cps = np.cos(ps)
p = x**2
u = z**2
v = 3*z*x
t = y**2
q = 30115./np.sqrt(p+t+u)**5
bx = q*((t+u-2*p)*sps-v*cps)
by = -3*y*q*(x*sps+z*cps)
bz = q*((p+t-2*u)*cps-v*sps)
return bx,by,bz
``` |
{
"source": "jonasteuwen/manet-old",
"score": 3
} |
#### File: manet-old/examples/create_lmdb_set.py
```python
import lmdb
import os
from tqdm import tqdm
import simplejson as json
import numpy as np
from manet.utils import read_dcm_series, write_list
def write_kv_to_lmdb(db, key, value):
"""
Write (key, value) to db.
"""
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
tqdm.write('MapFullError: Doubling LMDB map size to {}MB.'.format(new_limit))
db.set_mapsize(new_limit)
def write_data_to_lmdb(db, key, image, metadata):
"""Write image data to db."""
write_kv_to_lmdb(db, key, np.ascontiguousarray(image).tobytes())
meta_key = key + '_metadata'
ser_meta = json.dumps(metadata)
write_kv_to_lmdb(db, meta_key, ser_meta)
def build_db(path, db_name, image_folders, generate_keys=False):
"""Build LMDB with images."""
db = lmdb.open(os.path.join(path, db_name), map_async=True, max_dbs=0)
if generate_keys:
keys_filename = os.path.join(path, db_name + '_keys.lst')
write_list(
[], keys_filename, header=['LMDB keys for db {}'.format(db_name)])
for key, folder in tqdm(image_folders):
try:
data, metadata = read_dcm_series(folder)
# If dataset is written to LMDB,
# we do not need the filenames anymore.
metadata.pop('filenames', None)
series_ids = metadata.pop('series_ids', None)
if series_ids:
metadata['series_id'] = series_ids[0]
metadata['dtype'] = '{}'.format(data.dtype)
write_data_to_lmdb(db, key, data, metadata)
if generate_keys:
write_list([key], keys_filename, append=True)
except Exception as e:
tqdm.write('{} failed: {}'.format(path, e))
```
#### File: manet-old/examples/save_image.py
```python
import matplotlib
matplotlib.use('Agg')
import click
from manet.utils import read_dcm, read_image
from manet.feature.peak import peak_local_max
from manet.transform.mask import resize, bounding_box
from manet.plotting.imshow import plot_2d
@click.command()
@click.argument('image', type=click.Path(exists=True))
@click.option('--mask', default=None,
help='Location mask dcm', type=click.Path(exists=True))
@click.option('--overlay', default=None,
help='Location to overlay map', type=click.Path(exists=True))
@click.option('--output', default='output.png', help='Image to write to')
@click.option('--height', default=16, help='height of the image in inches')
@click.option('--dpi', default=None, help='dpi of the output image')
@click.option('--linewidth', default=2, help='linewidth of the contours.')
@click.option('--bbox/--no-bbox', default=False, help='Plot bounding box')
@click.option('--contour/--no-contour', default=True, help='Do not plot contour.')
@click.option('--threshold', default=0.5, help='Threshold for the overlay')
@click.option('--alpha', default=0., help='alpha of the overlay.')
@click.option('--local-maxima/--no-local-maxima', default=False, help='add local maximal to the plot')
def write_image(image, mask, overlay, output, height, dpi, linewidth, bbox, contour, threshold, alpha, local_maxima):
"""Write image to disk, given input dcm. Possible to add contours and bounding boxes.
"""
image, _ = read_dcm(image, window_leveling=True)
if mask:
mask_arr, _ = read_dcm(mask, window_leveling=False)
if image.shape != mask_arr.shape:
mask_arr = resize(mask_arr, image.shape)
if bbox:
bboxes = bounding_box(mask_arr)
bboxes = [x for x, _ in bboxes]
else:
bboxes = None
if not contour:
# If we do not want to show the contour, we set it to None.
mask_arr = None
if overlay:
overlay, _ = read_image(overlay, force_2d=True)
min_distance = 37 if local_maxima else False
plot_2d(image, height=height, mask=mask_arr, bboxes=bboxes,
overlay=overlay, overlay_cmap='jet', overlay_alpha=alpha,
overlay_contour_color='b', overlay_threshold=threshold,
overlay_local_max_min_distance=min_distance, save_as=output,
dpi=dpi, linewidth=linewidth)
print('Output written to {}.'.format(output))
if __name__ == '__main__':
write_image()
```
#### File: manet/lmdb/dataset.py
```python
import os
import copy
import lmdb
import numpy as np
import simplejson as json
from manet.utils import write_list, read_list
class LmdbDb(object):
def __init__(self, path, db_name):
"""Load an LMDB database, containing a dataset.
The dataset should be structured as image_id: binary representing the contiguous block.
If image_id is available we also need image_id_metadata which is a json parseble dictionary.
This dictionary should contains the key 'shape' representing the shape and 'dtype'.
If the keys file is available, the file is loaded, otherwise generated.
Parameters
----------
path : str
Path to folder with LMDB db.
db_name : str
Name of the database.
"""
lmdb_path = os.path.join(path, db_name)
lmdb_keys_path = os.path.join(path, db_name + '_keys.lst')
self.lmdb_path = lmdb_path
self.env = lmdb.open(lmdb_path, max_readers=None, readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries'] // 2
if os.path.isfile(lmdb_keys_path):
self._keys = read_list(lmdb_keys_path)
else:
# The keys file does not exist, we will generate one, but this can take a while.
with self.env.begin(write=False) as txn:
keys = [key for key, _ in txn.cursor() if '_metadata' not in key]
write_list(keys, lmdb_keys_path, header=['LMDB keys for db {}'.format(db_name)])
self._keys = keys
def __delitem__(self, key):
idx = self._keys.index[key]
self._keys.pop(idx, None)
def copy(self):
return copy.deepcopy(self)
def has_key(self, key):
return key in self._keys
def keys(self):
return self._keys
def __getitem__(self, key):
with self.env.begin(buffers=True, write=False) as txn:
if key not in self._keys:
raise KeyError(key)
buf = txn.get(key)
meta_buf = txn.get(key + '_metadata')
metadata = json.loads(str(meta_buf))
dtype = metadata['dtype']
shape = metadata['shape']
data = np.ndarray(shape, dtype, buffer=buf)
out = {}
out['data'] = data
out['metadata'] = metadata
return out
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.lmdb_path + ')'
```
#### File: manet/transform/rescale_transform.py
```python
import numpy as np
from manet._shared.utils import assert_nD
from skimage import __version__
from distutils.version import LooseVersion
from skimage.transform import rescale
SKIMAGE_VERSION = '0.14dev'
def random_rescale_2d(arr, zoom_perc):
"""Rescale an array in 2D.
Parameters
----------
arr : ndarray
array to rescale
zoom_percentage : float
number between 0 and 1 to denote the percentage to scale
Returns
-------
Randomly rescaled array of zoom_perc% and the selected zoom.
"""
assert_nD(arr, 2)
if zoom_perc < 0 or zoom_perc > 1:
raise ValueError('zoom percentage should be in [0, 1]')
zoom_range = 1 + zoom_perc
if LooseVersion(__version__) < SKIMAGE_VERSION:
raise RuntimeError('scikit-image >= %s needed for rescaling.' % SKIMAGE_VERSION)
# scale to [2 - range, range]
zoom = 2 - zoom_range + 2*(zoom_range - 1)*np.random.rand()
arr = rescale(arr, zoom, anti_aliasing=True if zoom < 1 else False,
mode='constant', multichannel=False)
return arr, zoom
```
#### File: manet/utils/patch_utils.py
```python
from __future__ import division
import numpy as np
from manet.utils import prob_round, read_dcm
from manet.utils import cast_numpy
from manet.utils.bbox_utils import _split_bbox, _combine_bbox
from manet.utils.mask_utils import random_mask_idx, bounding_box
def extract_patch(image, bbox, pad_value=0):
"""Extract bbox from images, coordinates can be negative.
Parameters
----------
image : ndarray
nD array
bbox : list or tuple
bbox of the form (coordinates, size),
for instance (4, 4, 2, 1) is a patch starting at row 4, col 4 with height 2 and width 1.
pad_value : number
if bounding box would be out of the image, this is value the patch will be padded with.
Returns
-------
ndarray
"""
# Coordinates, size
bbox_coords, bbox_size = _split_bbox(bbox)
# Offsets
l_offset = -bbox_coords.copy()
l_offset[l_offset < 0] = 0
r_offset = (bbox_coords + bbox_size) - np.array(image.shape)
r_offset[r_offset < 0] = 0
region_idx = [slice(i, j) for i, j
in zip(bbox_coords + l_offset,
bbox_coords + bbox_size - r_offset)]
out = image[region_idx]
if np.all(l_offset == 0) and np.all(r_offset == 0):
return out
# If we have a positive offset, we need to pad the patch.
patch = pad_value*np.ones(bbox_size, dtype=image.dtype)
patch_idx = [slice(i, j) for i, j
in zip(l_offset, bbox_size + l_offset - r_offset)]
patch[patch_idx] = out
return patch
def rebuild_bbox(bbox, new_size):
"""Given a bounding box and a requested size return the new bounding box around the center of the old.
If the coordinate would be non-integer, the value is randomly rounded up or down.
Parameters
----------
bbox : tuple, list or ndarray
new_size : tuple or list
Returns
-------
New bounding box.
"""
new_size = cast_numpy(new_size)
bbox_coords, bbox_size = _split_bbox(bbox)
bbox_center = bbox_coords - bbox_size / 2.
new_bbox_coords = prob_round(bbox_center - new_size / 2.)
new_bbox = _combine_bbox(new_bbox_coords, new_size)
return new_bbox
def sym_bbox_from_bbox(point, bbox):
"""Given a a bounding box and a point,
the smallest box containing the bounding box around that
point is returned."""
bbox_coords, bbox_size = _split_bbox(bbox)
# Compute the maximal distance between the center of mass and the bbox.
max_dist = np.max([
(point - bbox_coords).max(),
(bbox_coords + bbox_size - point).max()
])
new_size = (2*max_dist + 1)*np.ones(bbox_size, dtype=int)
new_bbox_coords = point - max_dist*np.ones(bbox_size, dtype=int)
new_bbox = _combine_bbox(new_bbox_coords, new_size)
return new_bbox
def sym_bbox_from_point(point, bbox_size):
"""Given a size and a point, the symmetric bounding box around that point is returned.
If there is ambiguity due to floats, the result is randomly rounded."""
bbox_size = cast_numpy(bbox_size)
point = cast_numpy(point)
bbox_coords = prob_round(point - bbox_size / 2.)
bbox = _combine_bbox(bbox_coords, bbox_size)
return bbox
def sample_from_mask(mask, avoid, num_tries=100):
"""A random index is sampled for a mask in the non-zero values.
As a first try, num_tries iterations randomly select a point and if found,
proceeds. This is more efficient than finding all possible non-zero
values which is O(n x m). If this fails within num_tries iterators, we look
through all non-positive indices. Otherwise, we look through all
possible indexes.
Parameters
----------
mask : str or ndarray
Path to file containing mask or ndarray.
Returns
-------
An index sampled within the mask.
"""
if isinstance(mask, basestring):
mask, _ = read_dcm(mask, window_leveling=False, dtype=int)
bbox = bounding_box(mask)
mask = extract_patch(mask, bbox)
i = 0
rand_idx = None
while i < num_tries:
# We sample up to a part of the edge
rand_idx = tuple(
[np.random.randint(x, y) for x, y
in zip(avoid, mask.shape - avoid)])
if mask[rand_idx] != 0:
break
i += 1
# If that didn't work, we unfortunately have to do a full search.
# Here we do not try to avoid the edge.
if not rand_idx:
rand_idx = random_mask_idx(mask)
bbox_coords, _ = _split_bbox(bbox)
rand_idx = cast_numpy(bbox_coords)
idx = tuple(rand_idx + bbox_coords)
return idx
``` |
{
"source": "JonasTFab/Master",
"score": 2
} |
#### File: Master/iris/visualise_SJI_data.py
```python
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
from matplotlib.contour import QuadContourSet
from astropy.io import fits
#from astropy.wcs import WCS
import numpy as np
"""
Program to visualise any of the four fits image files.
"""
print("Enter index of wavelength to plot:")
print("0 --> 1330")
print("1 --> 1400")
print("2 --> 2796")
print("3 --> 2832")
print("Auto enter 2...")
arg = 2#int(input("Enter: "))
path = "/mn/stornext/d10/HDC2/iris/data/level2/2013/10/09/20131009_232607_3820012146/"
file_1330 = "iris_l2_20131009_232607_3820012146_SJI_1330_t000.fits"
file_1440 = "iris_l2_20131009_232607_3820012146_SJI_1400_t000.fits"
file_2796 = "iris_l2_20131009_232607_3820012146_SJI_2796_t000.fits"
file_2832 = "iris_l2_20131009_232607_3820012146_SJI_2832_t000.fits"
file = [file_1330,file_1440,file_2796,file_2832]
#try:
data = fits.open(path+file[arg])
#except:
# assert False, "Error occured! Need to insert valid argument! [0,1,2,3]"
image_data = data[0].data
data.close()
# neglect extreme values
non_zero = np.where(image_data>0)
vmax = 3*np.mean(image_data[non_zero])
# locate jets
init_percentile = 90
data_p = np.percentile(image_data[non_zero], init_percentile)
jets = np.zeros(image_data.shape)
jets_loc = np.where(image_data>data_p)
jets[jets_loc] = 1
#
idx = 10
fig = plt.figure()
axes = plt.axes([0.1, 0.2, 0.8, 0.65])
# define sliders:
# image slider
slider_im = Slider( plt.axes([0.15, 0.05, 0.7, 0.05]),
label="frame #",
valmin=0,
valmax=image_data.shape[0]-1,
valinit=idx,
valfmt="%i")
# percentile slider
slider_p = Slider( plt.axes([0.40, 0.9, 0.45, 0.05]),
label="Percentile",
valmin=0,
valmax=100,
valinit=init_percentile,
valfmt="%i")
plt.axes(axes)
im = plt.imshow(image_data[idx,:,:], vmin=0, vmax=vmax, cmap="gray")
plt.gca().invert_yaxis() # fix y-axis
plt.colorbar()
# what happens when toggle contour lines
def toggle_contour(X):
global im_jets
try:
for coll in im_jets.collections:
plt.gca().collections.remove(coll)
except:
im_jets = plt.contour(jets[idx,:,:])
fig.canvas.draw_idle()
# what happens using image slider
def update_im(X):
global idx, im_jets
idx = int(X)
im.set_data(image_data[idx,:,:])
try:
for coll in im_jets.collections:
coll.remove()
im_jets = plt.contour(jets[idx,:,:])
except:
pass
fig.canvas.draw_idle()
# what happen using percentile slider
def update_percentile(X):
global jets, im_jets
try:
for coll in im_jets.collections:
coll.remove()
data_p = np.percentile(image_data[non_zero], int(X))
jets_loc = np.where(image_data>data_p)
jets[:,:,:] = 0
jets[jets_loc] = 1
im_jets = plt.contour(jets[idx,:,:])
except:
pass
fig.canvas.draw_idle()
slider_im.on_changed(update_im)
slider_p.on_changed(update_percentile)
button_toggle = Button(plt.axes([0.05, 0.9, 0.2, 0.05]), "Toggle contour")
plt.axes(axes)
button_toggle.on_clicked(toggle_contour)
plt.show()
del image_data
```
#### File: Master/iris/webscraping.py
```python
import requests
"""
A webscraping tool that looks for information from a URL based
on the keyword argument 'get_info_of'.
Currently only tested on
https://iris.lmsal.com/iris2/iris2_chapter02_01.html#inversion-of-iris-mg-ii-h-k-lines-with-iris2
because of its relevance.
Definitely not the most elegant approach, but works fine for now.
"""
def request_info(URL, get_info_of):
"""
URL is URL.
get_info_of is a list of parameters to extract info from
"""
# convert page to utf-8 text format
page = requests.get(URL)
page.encoding = page.apparent_encoding
text = page.text
out_info = []
predef = '<li><p><code class="docutils literal notranslate"><span class="pre">'
for param in get_info_of:
# locate the keyword from the URL page
if param=="model": idx_i = text.find(predef+param+"<")
elif param=="mask_extra2":
param = "mask_extra"
idx_i = text.find(predef+param)
idx_i += text[idx_i+1:].find(predef+param)+1
else: idx_i = text.find(predef+param)
#param_text = text[idx_i+len(predef):idx_i+len(predef)+1500]
param_text = text[idx_i+len(predef):idx_i+len(predef)+1500]
if param=="qc_flag": idx_f = param_text.find("</p></li>\n</ul>")
else: idx_f = param_text.find("</p></li>")
param_text = param_text[:idx_f]
# lots of removals
param_text = param_text.replace('%s</span></code>: '%param, "")
param_text = param_text.replace('<code class="docutils literal notranslate"><span class="pre">', "")
param_text = param_text.replace('</span></code>', "")
param_text = param_text.replace('\n', " ")
param_text = param_text.replace('<span class="math notranslate nohighlight">\(', "")
param_text = param_text.replace('\)</span>', "")
param_text = param_text.replace('<a class="reference external" href=', "")
param_text = param_text.replace('<code class="xref std std-numref docutils literal notranslate"><span class="pre">', "")
param_text = param_text.replace('</span> <span', "")
param_text = param_text.replace('class="pre">', "")
param_text = param_text.replace('<a class="reference internal" href="#inversion-of-a-large-raster-file-a-region-of-interest-or-a-masked-area"><span class="std std-numref">S', "")
param_text = param_text.replace('</span></a>', "")
param_text = param_text.replace("</p> <ul> <li><p>", " ")
param_text = param_text.replace("</p></li> <li><p>", ", ")
param_text = param_text.replace("\\", "")
param_text = param_text.replace("\\", "")
out_info.append(param_text)
return out_info
if __name__ == '__main__':
"""
URL = "https://iris.lmsal.com/iris2/iris2_chapter02_01.html#inversion-of-iris-mg-ii-h-k-lines-with-iris2"
get_info = ["qc_flag"]
info = request_info(URL, get_info)
"""
#print(info)
```
#### File: run/output/study_rh_output.py
```python
from helita.sim import rh15d
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings # ignore tedious warnings
warnings.filterwarnings("ignore")
##############################################################
def load_rh_data(folder, print_attributes=False):
# reset IPython kernel
try:
# works only if in IPython kernel
from IPython import get_ipython
get_ipython().magic("reset -sf")
except:
pass
# defines variables that should be global
global DATA, WAVELENGTH, WAVELENGTH_INDEX, WAVELENGTH_SELECTED
#global
DATA = rh15d.Rh15dout(folder)
WAVELENGTH = DATA.files[4].wavelength
#wl_idx = data.files[4].wavelength_indices
WAVELENGTH_INDEX = np.where((WAVELENGTH.values>279.401921894) &
(WAVELENGTH.values<280.501526399))[0]
WAVELENGTH_SELECTED = WAVELENGTH[WAVELENGTH_INDEX]
# print attributes from RH code
if print_attributes==True:
for i, file in enumerate(DATA.files):
print("\n\n--------------------------\n\n")
print("data.files[index]: ", i)
print(file)
##############################################################
# get all output folders within current folder
def get_output_folders():
folders = os.walk(".")
output_folders = []
for path in folders:
if not "wrong" in path[0] and all(x in path[0] for x in ["x","y","dx","dy"]):
output_folders.append(path[0].replace("./", ""))
return output_folders
##############################################################
# plots intensity from RH code
def plot_intensity():
nx = DATA.files[4].nx
ny = DATA.files[4].ny
colx,coly = np.random.randint(nx), np.random.randint(ny)
"""for i in np.linspace(0,nx-1,nx).astype("int"):
for j in np.linspace(0,ny-1,nx).astype("int"):
I = DATA.files[4].intensity[i, j, WAVELENGTH_INDEX]
plt.plot(WAVELENGTH_SELECTED, I/I[0], color="black", alpha=0.01)
"""
mean_I = np.mean(DATA.files[4].intensity[:,:,WAVELENGTH_INDEX], axis=(0,1))
plt.plot(WAVELENGTH_SELECTED, mean_I, ".-")
plt.xlabel("Wavelength (nm)")
plt.ylabel(r"Intensity $(I/I_0)$")
plt.grid()
plt.show()
##############################################################
def kmean_model(data=False, k_create_model=False, test_model=False,
test_profiles=False, wavelength=False):
#if not data:
# assert False, "Did not input data!"
if I_data.ndim != 2:
assert False, "Wrong dimension on input data! Must be two-dimensianal."
# if 'test_model' is not defined, train a model instead
if not test_model:
from rh_kmean import create_kmean_from_data
kmean = create_kmean_from_data(data, k_value=k_create_model)
# test a trained model
else:
from rh_kmean import test_kmean_model
test_kmean_model(test_model,
test_profiles=test_profiles,
wavelength=wavelength,
k_nearest=False)
##############################################################
def get_intensity_from_all_folders(plot_converge_box=False):
folders = get_output_folders()
colx = [] # x coordinate of converged columns
coly = [] # y coordinate of converged columns
colx_nc = [] # x columns that did not converge
coly_nc = [] # y columns that did not converge
I = [] # intensities
#folders.pop(3) # as the folder is currently empty
#folders.pop(1)
#folders.append(".")
for f in folders:
print(f)
# goes through all folders that is
# output from rh code
for f_i, folder in enumerate(folders):
load_rh_data(folder=folder, print_attributes=False)
nx = DATA.files[4].nx # number of columns in x
ny = DATA.files[4].ny # number of columns in y
xnum = DATA.files[3].xnum.values # column index x from model
ynum = DATA.files[3].ynum.values # column index y from model
# making sure that same columns are
# not added multiple times
if f_i != 0:
for i in range(nx):
for j in range(ny):
add_col = True
# if column is already added, do not add again
for k in range(len(colx)):
if xnum[i] == colx[k] and ynum[j] == coly[k]:
add_col = False
break
if add_col and ~np.isnan(DATA.files[4].intensity[i,j,:].values).any():
colx.append(xnum[i])
coly.append(ynum[j])
I.append(DATA.files[4].intensity[i, j, WAVELENGTH_INDEX].values)
elif add_col:
colx_nc.append(xnum[i])
coly_nc.append(ynum[j])
# if first output folder, add all columns
else:
for i in range(nx):
for j in range(ny):
if ~np.isnan(DATA.files[4].intensity[i,j,:].values).any():
colx.append(xnum[i])
coly.append(ynum[j])
I.append(DATA.files[4].intensity[i, j, WAVELENGTH_INDEX].values)
else:
colx_nc.append(xnum[i])
coly_nc.append(ynum[j])
# remove elemnts from col(x/y)_nc that later
# has converged (from other folders/runs)
pop = []
for i in range(len(colx_nc)):
for j in range(len(colx)):
if colx_nc[i] == colx[j] and coly_nc[i] == coly[j]:
pop.append(i)
#colx_nc.pop(i)
#coly_nc.pop(i)
# must pop aftewards, else remove wrong index
pop.reverse()
for i in pop:
colx_nc.pop(i)
coly_nc.pop(i)
# plot columns that have and have not
# converged in model
if plot_converge_box:
plt.close()
fig,ax = plt.subplots()
ax.set_title("Converged synthetic spectra of model: %.2f %%"
% (100*len(colx)/1024**2))
ax.plot([0,1024,1024,0,0], [0,0,1024,1024,0], "--", color="black", label="Model box", lw=0.5)
ax.scatter(colx, coly, marker="s", label="Converged columns", s=20, color="green")
ax.scatter(colx_nc, coly_nc, marker="s", label="Non-converged columns", s=20, color="red")
ax.legend()
ax.grid()
fig.show()
# return intensities, converged columns
# and non-converged columns
return np.array(I), np.array([colx,coly]), np.array([colx_nc,coly_nc])
##############################################################
get_intensity_from_all_folders(plot_converge_box=True)
#get_output_folders()
#output_folder = "x0-1024_y0-1024_dxdy25-25"
#load_rh_data(folder=output_folder, print_attributes=False)
#plot_intensity()
#I_data = DATA.files[4].intensity[:,:,WAVELENGTH_INDEX].values
#I_data = I_data[~np.isnan(I_data).any(axis=2)]
"""k = 5
kmean_model(data=I_data, k_create_model=k)
kmean_model(test_model="KMEAN_MODELS/1641_%i"%k,
test_profiles=I_data,
wavelength=WAVELENGTH_SELECTED)"""
#
``` |
{
"source": "jonastheis/das",
"score": 2
} |
#### File: das/client/app.py
```python
import math, argparse
from common.game import Game
from client.network.transport import ClientTransport
from common.constants import TRANSPORT, USERS, DIRECTIONS, init_logger, MSG_TYPE
from common.command import MoveCommand, HealCommand, AttackCommand
from common.visualizer import Visualizer
import threading, random, time
import json
import logging
logger = logging.getLogger("sys." + __name__.split(".")[-1])
gameLogger = logging.getLogger("game." + __name__.split(".")[-1])
class ClientApp():
def __init__(self, servers):
self.game = Game()
self.transport_layer = ClientTransport(self.game, servers)
id, map = self.transport_layer.setup_client()
self.id = id
# replace json objects with user object
self.game.from_serialized_map(map)
logger.info("Setup data -> id: {0}".format(id))
self.transport_layer.id = id
self.transport_layer.listen()
self.my_user = next(filter(lambda el: el.id == self.id, self.game.users))
def generate_commands(self, iterations, malicious):
"""
Run _generate_commands in a new thread
"""
threading.Thread(target=self._generate_commands, args=(iterations, malicious)).start()
def _generate_commands(self, iterations, malicious):
"""
Generate simulation commands
:param iterations: Number of iterations
:return: None
"""
logger.debug("Generating commands for {} iterations".format(iterations))
for i in range(iterations):
if not malicious:
new_command = self.simulate_player()
else:
new_command = self.simulate_malicious_player()
# If there is no dragon and no one with hp<50%, no commands will be generated. In that case do nothing
if new_command:
self.game.commands.append(new_command)
def simulate_player(self):
"""
Simulate the actions of the self player based on game spec
:return: Command Object
"""
######## Option 1: heal a close-by player
heal_candidates = self.get_users_in_range(self.my_user.pos, 5, USERS.PLAYER)
for user in heal_candidates:
if user.id != self.my_user.id:
if user.hp < user.MAX_HP/2:
return HealCommand(self.id, user.id)
######## Option 2: Attack a close dragon if available
attack_candidates = self.get_users_in_range(self.my_user.pos, 2, USERS.DRAGON)
if len(attack_candidates):
target = attack_candidates[0]
return AttackCommand(self.id, target.id)
######## Option 3: Move toward the closest dragon
dragons = list(filter(lambda _user: _user.type == USERS.DRAGON, self.game.users))
if len(dragons):
# sort dragons by distance
# NOTE: we assume that the distance of the closest dragon is more than 2.
# Because otherwise we would have returned with an attack command
dragons.sort(key=lambda dragon: math.fabs(dragon.pos[0]-self.my_user.pos[0]) + math.fabs(dragon.pos[1]-self.my_user.pos[1]))
# Move options: Move vertically toward that dragon or Horizontally
# If they are in the same row/col we know what to do
move_target = dragons[0]
value = None
move_direction = None
if self.my_user.pos[0] == move_target.pos[0]:
move_direction = DIRECTIONS.H
elif self.my_user.pos[1] == move_target.pos[1]:
move_direction = DIRECTIONS.V
else:
# If not, we choose randomly
move_direction = random.choice([DIRECTIONS.H, DIRECTIONS.V])
if move_direction == DIRECTIONS.H:
value = 1 if move_target.pos[1] > self.my_user.pos[1] else -1
else:
value = 1 if move_target.pos[0] > self.my_user.pos[0] else -1
if value and move_direction:
return MoveCommand(self.id, value, move_direction)
logger.warning("Failed to find a simulation for player. Random walk it is...")
return MoveCommand(self.id, random.choice([1, -1]), random.choice([DIRECTIONS.H, DIRECTIONS.V]))
def simulate_malicious_player(self):
"""
Simulate the actions of a malicious player
:return: Command Object
"""
######## Option 1: attack a close-by player
attack_candidates = self.get_users_in_range(self.my_user.pos, 1, USERS.PLAYER)
for user in attack_candidates:
if user.id != self.my_user.id:
return AttackCommand(self.id, user.id)
######## Option 2: Heal a close by dragon
attack_candidates = self.get_users_in_range(self.my_user.pos, 2, USERS.DRAGON)
if len(attack_candidates):
target = attack_candidates[0]
return HealCommand(self.id, target.id)
######## Option 3: Move toward the closest dragon
dragons = list(filter(lambda _user: _user.type == USERS.DRAGON, self.game.users))
if len(dragons):
# sort dragons by distance
# NOTE: we assume that the distance of the closest dragon is more than 2.
# Because otherwise we would have returned with an attack command
dragons.sort(key=lambda dragon: math.fabs(dragon.pos[0]-self.my_user.pos[0]) + math.fabs(dragon.pos[1]-self.my_user.pos[1]))
# Move options: Move vertically toward that dragon or Horizontally
# If they are in the same row/col we know what to do
move_target = dragons[0]
value = None
move_direction = None
if self.my_user.pos[0] == move_target.pos[0]:
move_direction = DIRECTIONS.H
elif self.my_user.pos[1] == move_target.pos[1]:
move_direction = DIRECTIONS.V
else:
# If not, we choose randomly
move_direction = random.choice([DIRECTIONS.H, DIRECTIONS.V])
if move_direction == DIRECTIONS.H:
value = 10 if move_target.pos[1] > self.my_user.pos[1] else -10
else:
value = 10 if move_target.pos[0] > self.my_user.pos[0] else -10
if value and move_direction:
return MoveCommand(self.id, value, move_direction)
logger.warning("Failed to find a simulation for player. Random walk it is...")
return MoveCommand(self.id, random.choice([10, -10]), random.choice([DIRECTIONS.H, DIRECTIONS.V]))
def get_users_in_range(self, point, limit, type=-1):
"""
Utility function for simulation. Return a list of all users (player and dragons) in a range
if type is specified, only users from that type will be returned
:param limit: distance limit
:param point: the root point of the distance
":type: type of players to return
:return: User[]
"""
users = []
for user in self.game.users:
if (math.fabs(point[0] - user.pos[0])) + (math.fabs(point[1] - user.pos[1])) <= limit:
if type == -1:
users.append(user)
else:
if user.type == type:
users.append(user)
return users
def run(self, commands_per_second, malicious):
"""
:param commands_per_second: number of commands per second
run _run in a new thread
"""
threading.Thread(target=self._run, args=(commands_per_second, malicious)).start()
def _run(self, command_per_second, malicious):
while self.game.up:
time.sleep(1/command_per_second)
# Generate one or max two new commands that will be appended to the end of the list
self._generate_commands(1, malicious)
if len(self.game.commands):
command_to_apply = self.game.commands.pop(0)
self.transport_layer.send_data(command_to_apply.to_json(), MSG_TYPE.COMMAND)
if __name__ == "__main__":
"""
Arguments to the command line:
--vis to enable/disable the visualizer
--log-prefix to choose the file name of the log
--config to pass list of possibly available game servers
CRITICAL 50
ERROR 40
WARNING 30
INFO 20
DEBUG 10
NOTSET 0
"""
parser = argparse.ArgumentParser(description="DAS Client app")
parser.add_argument("--vis", action="store_true")
parser.add_argument("--log-prefix", dest="prefix", default="DEFAULT")
parser.add_argument("--config", nargs="?", dest="config", required=False, default='./test/das_config.json')
parser.add_argument("--game-log", dest="gameLog", action="store_false", default=True)
parser.add_argument("--malicious", action="store_true", default=False)
parser.add_argument("--log-level", dest="logLevel", default="20")
args = parser.parse_args()
args.logLevel = int(args.logLevel)
# get available server
config = json.load((open(args.config)))
servers = []
for server in config['servers']:
servers.append((server.split(':')[0], int(server.split(':')[1])))
init_logger("log/client_{}.log".format(args.prefix), args.gameLog, args.logLevel)
client = ClientApp(servers)
client.run(.5, args.malicious)
# start visualization
if args.vis:
visualizer = Visualizer(client.game, client.id)
visualizer.visualize()
```
#### File: das/common/command.py
```python
import json, math, time
from common.constants import *
from common.user import User
import logging
logger = logging.getLogger("sys." + __name__.split(".")[-1])
gLogger = logging.getLogger("game." + __name__.split(".")[-1])
class Command(object):
"""
The common pattern for each command's apply is as follows:
They should all return Boolean values indicating if sth went wrong or not
Each command will optionally override to_json_broadcast
Note that the generic to_json should NEVER be overriden becasue it is used in the client side to send the message
to the server
TODO: currently, only Join, Leave and Move have this patters
"""
def __init__(self, client_id, timestamp):
self.type = type(self).__name__
self.timestamp = timestamp
self.client_id = client_id
def apply(self, game):
gLogger.info("Applying commands {}".format(self.__str__()))
def to_json(self):
"""
Generic method for converting an object to json
:return:
"""
return json.dumps(self.__dict__)
def to_json_broadcast(self):
"""
To be used when sending a command from the server to all other clients.
"""
return self.to_json()
@classmethod
def from_json(cls, json_str):
json_data = json.loads(json_str)
if json_data['type'] == 'MoveCommand':
command_obj = MoveCommand(
json_data['client_id'],
json_data['value'],
json_data['direction'],
json_data['timestamp'])
elif json_data['type'] == 'NewPlayerCommand':
command_obj = NewPlayerCommand(
json_data['client_id'],
json_data['timestamp'],
json_data['player_dict'])
elif json_data['type'] == 'PlayerLeaveCommand':
command_obj = PlayerLeaveCommand(
json_data['client_id'],
json_data['is_killed'],
json_data['timestamp'])
elif json_data['type'] == 'AttackCommand':
command_obj = AttackCommand(
json_data['client_id'],
json_data['target_id'],
json_data['timestamp'])
elif json_data['type'] == 'HealCommand':
command_obj = HealCommand(
json_data['client_id'],
json_data['target_id'],
json_data['timestamp'])
else:
gLogger.error("Error:: Unrecognized command received. skipping...")
return command_obj
def __str__(self):
return '%s(%s)' % (
type(self).__name__,
', '.join('%s=%s' % item for item in vars(self).items())
)
def __repr__(self):
return self.__str__()
def get_user_by_id(self, game, id):
"""
Utility func
return a user based on its id
will search the entire map and compare values with the given id
"""
for i in range(game.row):
for j in range(game.col):
if game.map[i][j] != 0 and game.map[i][j].id == id :
return game.map[i][j]
return 0
def get_distance(self, pos, sop):
"""
Utility func
"""
return math.fabs(pos[0] - sop[0]) + math.fabs(pos[1] - sop[1])
class NewPlayerCommand(Command):
def __init__(self, client_id, timestamp=0, player_dict=None):
Command.__init__(self, client_id, timestamp)
self.initial_state = ""
self.player_dict = player_dict
def apply(self, game):
Command.apply(self, game)
new_user = User(USERS.PLAYER, self.client_id)
if game.is_server:
game.add_user(new_user)
self.player_dict = new_user.to_json()
self.initial_state = game.serialize()
else:
for (key, val) in self.player_dict.items():
setattr(new_user, key, val)
game.add_user(new_user, new_user.pos[0], new_user.pos[1])
gLogger.info("Command {} successfully executed".format(self.__str__()))
return True
def to_json_broadcast(self):
dict = self.__dict__.copy()
del dict["initial_state"]
return json.dumps(dict)
class PlayerLeaveCommand(Command):
def __init__(self, client_id, is_killed=False, timestamp=0):
Command.__init__(self, client_id, timestamp)
self.is_killed = is_killed
def apply(self, game):
Command.apply(self, game)
game.remove_user_by_id(self.client_id)
gLogger.info("Command {} successfully executed".format(self.__str__()))
return True
class MoveCommand(Command):
def __init__(self, client_id, value, direction, timestamp=0):
Command.__init__(self, client_id, timestamp)
self.value = value
self.direction = direction
def apply(self, game):
Command.apply(self, game)
_user = self.get_user_by_id(game, self.client_id)
if _user == 0:
gLogger.error("Command {} failed. No Player Found".format(self.__str__()))
return False
# Dragons cannot move
if _user.type == USERS.DRAGON:
gLogger.error("Command {} failed. Dragons cannot move".format(self.__str__()))
return False
_row, _col = _user.pos
target_row = _row
target_col = _col
# make sure only 1 step
if abs(self.value) != 1:
gLogger.error("Command {} failed. Invalid step count".format(self.__str__()))
return False
if self.direction == DIRECTIONS.V:
target_row += self.value
elif self.direction == DIRECTIONS.H:
target_col += self.value
# Check if target is in boundaries of the map
if target_row >= game.row or target_col >= game.col or target_row < 0 or target_col < 0:
gLogger.error("Command {} failed. Position [{}, {}] out of scope of game".format(self.__str__(), target_row, target_col))
return False
# Check if target pos is full
if game.map[target_row][target_col] != 0:
gLogger.error("Command {} failed. Position [{}, {}] already full".format(self.__str__(), target_row, target_col))
return False
# update game map
game.map[_row][_col] = 0
game.map[target_row][target_col] = _user
# update user position
_user.pos = [target_row, target_col]
gLogger.info("Command {} successfully executed".format(self.__str__()))
return True
def __str__(self):
return Command.__str__(self) + "[value {} direction {}]".format(self.value, self.direction)
class AttackCommand(Command):
def __init__(self, client_id, target_id, timestamp=0):
Command.__init__(self, client_id, timestamp)
self.target_id = target_id
self.user_id = client_id
def apply(self, game, response_queue=None):
Command.apply(self, game)
attacker = self.get_user_by_id(game, self.user_id)
target = self.get_user_by_id(game, self.target_id)
if attacker == 0:
gLogger.error("Command {} failed. Attacker not found".format(self.__str__()))
return False
if attacker.type != USERS.PLAYER:
gLogger.error("Commnd {} failed. Dragons can't attack autonomously".format(self.__str__()))
return False
if target == 0:
gLogger.error("Command {} failed. Target not found".format(self.__str__()))
return False
if target.type != USERS.DRAGON:
gLogger.error("Command {} failed. Can't attack users".format(self.__str__()))
return False
attacker_row, attacker_col = attacker.pos
target_row, target_col = target.pos
distance = self.get_distance([attacker_row, attacker_col], [target_row, target_col])
if distance > 2:
gLogger.error("Command {} failed. Attack distance bigger than 2".format(self.__str__()))
return False
target.hp -= attacker.ap
attacker.hp -= target.ap
if target.hp <= 0:
game.remove_user_by_id(self.target_id)
if attacker.hp <= 0:
game.remove_user_by_id(self.client_id)
if game.is_server:
response_queue.put(PlayerLeaveCommand(self.client_id, True, timestamp=time.time()))
gLogger.info("Command {} successfully executed".format(self.__str__()))
return True
class HealCommand(Command):
def __init__(self, client_id, target_id, timestamp=0):
Command.__init__(self, client_id, timestamp)
self.target_id = target_id
def apply(self, game):
Command.apply(self, game)
healer = self.get_user_by_id(game, self.client_id)
target = self.get_user_by_id(game, self.target_id)
if healer == 0:
gLogger.error("Command {} failed. Healer not found".format(self.__str__()))
return False
if healer.type != USERS.PLAYER:
gLogger.error("Commnd {} failed. Dragons can't heal".format(self.__str__()))
return False
if target == 0:
gLogger.error("Command {} failed. Target not found".format(self.__str__()))
return False
if target.type != USERS.PLAYER:
gLogger.error("Command {} failed. Can't heal a dragon".format(self.__str__()))
return False
if self.client_id == self.target_id:
gLogger.error("Command {} failed. Can't heal yourself".format(self.__str__()))
return False
healer_row, healer_col = healer.pos
target_row, target_col = target.pos
distance = self.get_distance([healer_row, healer_col], [target_row, target_col])
if distance > 5:
gLogger.error("Command {} failed. Heal distance bigger than 5".format(self.__str__()))
return False
heal_amount = healer.ap
target.hp += heal_amount
if target.hp > target.MAX_HP:
target.hp = target.MAX_HP
gLogger.info("Command {} successfully executed".format(self.__str__()))
return True
```
#### File: das/common/constants.py
```python
import logging
import os
class ACTIONS:
MOVE = 'move'
HEAL = 'heal'
ATTACK = 'attack'
class DIRECTIONS:
H = 'h'
V = 'v'
class TRANSPORT:
host = '0.0.0.0'
port = 8000
UDP_DELAY_PER_PLAYER = .001
class USERS:
PLAYER = 'p'
DRAGON = 'd'
class GLOBAL :
MAX_LOG_LENGTH = -1
class MSG_TYPE:
COMMAND = 'cmd'
INIT = 'init'
EXIT = 'exit'
BCAST = 'bc'
HBEAT = 'hb'
INIT_REQ = 'init_req'
INIT_RES = 'init_res'
PING = 'ping'
LOG = 'log_req'
class HEARTBEAT:
INIT = 30
INC = 10
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
game_file = None
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if(levelno>=50):
color = '\x1b[31m' # red
elif(levelno>=40):
color = '\x1b[31m' # red
elif(levelno>=30):
color = '\x1b[33m' # yellow
elif(levelno >= 20):
color = '\x1b[32m' # green
elif(levelno>=10):
color = '\x1b[94m' # pink
else:
color = '\x1b[0m' # normal
args[1].levelname = color + args[1].levelname + '\x1b[0m' # normal
args[1].name = '.'.join([bcolors.BOLD + args[1].name.split(".")[0] + bcolors.ENDC] + args[1].name.split(".")[1:])
#print "after"
return fn(*args)
return new
def init_logger(file, separate_game_log=True, log_lvl=10):
global game_file
# Two base logger types
sysLogger = logging.getLogger("sys")
gameLogger = logging.getLogger("game")
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
#If log directory doesn't exist creates it
dirname = os.path.dirname(file)
if not os.path.exists(dirname):
os.makedirs(dirname)
# clear contents from previous run
open(file, 'w').close()
fileHandler = logging.FileHandler(file)
formatter = logging.Formatter(bcolors.HEADER + '%(asctime)s' + bcolors.ENDC + ' ' + bcolors.UNDERLINE + '%(name)s' + bcolors.ENDC + ' ' + bcolors.BOLD + ' %(levelname)s' + bcolors.ENDC + ' :: %(message)s')
fileHandler.setFormatter(formatter)
sysLogger.addHandler(fileHandler)
sysLogger.setLevel(log_lvl)
sysLogger.addHandler(fileHandler)
gameLogger.setLevel(log_lvl)
if separate_game_log:
game_file = file.split(".")[0] + "_game.log"
open(game_file, 'w').close()
gameFileHandler = logging.FileHandler(game_file)
gameFileHandler.setFormatter(formatter)
gameLogger.addHandler(gameFileHandler)
else:
gameLogger.addHandler(fileHandler)
sysLogger.info("System Logger initialized")
sysLogger.info("Game Logger initialized")
def get_game_log():
with open(game_file) as gf:
logs = gf.read()
return logs
def set_game_log(log):
with open(game_file, 'w') as gf:
logging.getLogger("sys").info("Previous game logs written to {}".format(game_file))
gf.write(log)
```
#### File: das/common/network_util.py
```python
import struct
import json
SIZE_BYTES = 4
STRUCT_IDENTIFIER = ">I" # big-endian unsigned short (2 bytes)
def pack(data):
"""
Wraps a message which is to be sent via a TCP socket according to the protocol:
big-endian 2bytes message length + message in bytes
:param data: the data to be sent
:return: the prepared message as a byte string
"""
data_bytes = data.encode('utf8')
size = struct.pack(STRUCT_IDENTIFIER, len(data_bytes))
return b"".join([size, data_bytes])
def read_message(socket):
"""
Receives a message from a TCP socket of various length.
The first to bytes in big-endian order signal the size of the current message to receive.
:param socket: the socket to receive from
:return: the full received message as a utf8 string
"""
# read 2 bytes to determine size of message
size_data = read_bytes_from_socket(socket, SIZE_BYTES)
# get message size from struct
message_size = struct.unpack(STRUCT_IDENTIFIER, size_data)[0]
# read actual message
data = read_bytes_from_socket(socket, message_size)
return data.decode('utf8')
def read_bytes_from_socket(socket, size):
"""
Reads #size bytes from the socket
:param socket: the socket to receive from
:param size: the amount of bytes to read
:return:
"""
total_len = 0
total_data = []
recv_size = size
# read #size bytes from socket
while total_len < size:
try:
# note: socket.recv can return before receiving full size
sock_data = socket.recv(recv_size)
except:
raise TCPConnectionError("Socket Closed")
# if empty -> connection is closing
if not sock_data:
raise TCPConnectionError("Connection error while reading data.")
else:
total_data.append(sock_data)
# calculate total_len from all chunks in total_data
total_len = sum([len(i) for i in total_data])
# adjust receive size to not receive too much data (e.g. from the next message)
recv_size = size - total_len
return b"".join(total_data)
def send_udp_message(socket, address, type, data=None):
"""
Sens a UDP message via the socket to the given address with type and data set in JSON.
:param socket: the socket to send the message
:param address: the address to send the message to
:param type: the message type
:param data: the message data
:return:
"""
if data:
data = json.dumps({"type": type, "payload": data})
else:
data = json.dumps({"type": type})
socket.sendto(data.encode('utf-8'), address)
def read_udp_message(socket):
"""
Reads a UDP message from the socket and unpacks it.
:param socket: the socket to receive from
:return: json decoded message and address (host, port) as a tuple
"""
data, address = socket.recvfrom(4096)
data = data.decode('utf-8')
return json.loads(data), address
class TCPConnectionError(Exception):
def __init__(self, msg="Error with the connection"):
super(TCPConnectionError, self).__init__(msg)
```
#### File: server/core/engine.py
```python
import multiprocessing
import time
import queue
import threading
from common import game
from common.command import AttackCommand
from common.user import User
from common.visualizer import Visualizer
import logging
logger = logging.getLogger("sys." + __name__.split(".")[-1])
class Engine(multiprocessing.Process):
"""
Runs as a separate process to execute the game logic based on inputs (commands) of clients.
Once launched it remains running.
"""
def __init__(self, request_queue, response_queue, meta_request_queue, meta_response_queue, initial_users, vis=False):
multiprocessing.Process.__init__(self)
self.request_queue = request_queue
self.response_queue = response_queue
self.meta_request_queue = meta_request_queue
self.meta_response_queue = meta_response_queue
self.game = game.Game()
self.game.is_server = True
self.vis = vis
for user in initial_users:
self.game.add_user(User(user['type']), user['r'], user['c'])
self.T = 500
logger.info("Engine successfully started.")
def run(self):
"""
Overloaded function provided by multiprocessing.Process. Called upon start().
"""
# start meta_request thread
self.process_meta_requests()
# visualizer needs to run in main thread
if self.vis:
threading.Thread(target=self._run).start()
# start visualization
visualizer = Visualizer(self.game)
visualizer.visualize()
else:
self._run()
def _run(self):
while True:
self.process_commands()
# periodically process commands
time_ms = int(round(time.time() * 1000))
time_sync = int(time_ms/self.T) * self.T + self.T
time.sleep((time_sync - time_ms) / 1000)
def get_all_requests(self):
"""
Gets all events in a burst manner from the queue.
:return: list of all the events sorted by timestamp
"""
current_tick = time.time()
all_commands = []
exec_commands = []
while True:
try:
all_commands.append(self.request_queue.get_nowait())
except queue.Empty:
break
# sort list by timestamp
threshold = self.T / 2000
for command in all_commands:
if current_tick - command.timestamp < threshold :
logger.debug("Putting back {}".format(command))
self.request_queue.put(command)
else:
exec_commands.append(command)
exec_commands.sort(key=lambda command: (command.timestamp, command.client_id))
return exec_commands
def process_commands(self):
"""
Processes all currently available command/events.
"""
commands = self.get_all_requests()
if len(commands):
logger.debug("Interval reached. Processing {} commands".format(len(commands)))
while len(commands):
command = commands.pop(0)
if type(command) is AttackCommand:
status = command.apply(self.game, self.response_queue)
else:
status = command.apply(self.game)
# only send to clients if successful
if status:
self.response_queue.put(command)
else:
logger.debug("Interval reached. No command to process")
def process_meta_requests(self):
"""
Starts the meta_request thread.
"""
threading.Thread(target=self._process_meta_requests).start()
def _process_meta_requests(self):
"""
Waits (blocking) for requests from the server process and handles them.
"""
while True:
req = self.meta_request_queue.get()
if req['type'] == "get_map":
self.meta_response_queue.put(self.game.serialize())
```
#### File: server/network/base_connection.py
```python
import json
import threading
import select
from common.network_util import read_message, pack, TCPConnectionError
from common.constants import GLOBAL
import logging
logger = logging.getLogger("sys." + __name__.split(".")[-1])
class BaseConnection(object):
"""
A wrapper for a TCP socket.
Runs in separate thread and listens for input of the socket.
"""
def __init__(self, socket, address, id):
self.socket = socket
self.address = address
self.id = id
self.up = True
self.inputs = [self.socket]
self.mythread = threading.Thread(target=self._handle)
self.mythread.start()
def __str__(self):
return "Connection@{}:{}".format(self.address[0], self.address[1])
def _handle(self):
"""
Listens via blocking select for inputs from the socket.
Runs in a separate thread.
"""
while self.up:
# This is important because we might client he socket while it is waiting in select
# For now a continue is enough and in the next loop self.up is False
try:
read_sockets, write_sockets, error_sockets = select.select(self.inputs, [], self.inputs)
except:
logger.error("Error while checking connection sockets")
continue
for sock in read_sockets:
# read_packet raises exception if there is no data -> client is disconnecting
try:
data = read_message(sock)
logger.debug("{} :: Received [{}]".format(self.__str__(), data[:GLOBAL.MAX_LOG_LENGTH]))
# if there is data pass it to the game engine
self.on_message(data)
except TCPConnectionError:
self.shutdown()
# shutdown connection if there is an error
for sock in error_sockets:
self.shutdown()
def on_message(self, data):
"""
Called whenever a message received on the
:param data:
:return:
"""
pass
def shutdown(self):
"""
Shuts down the socket, makes sure that the thread can die.
"""
logger.warning("Shutting down [{}]".format(self.__str__()))
self.up = False
self.socket.close()
def send(self, data, type=None):
"""
Sends the data via the socket.
:param data: the data to be sent
"""
if not type is None:
data = json.dumps({'type': type, 'payload': data})
logger.debug("{} :: sending message [{}]".format(self.__str__(), data[:GLOBAL.MAX_LOG_LENGTH]))
self.socket.sendall(pack(data))
```
#### File: server/network/client_connection.py
```python
import json
import time
from .base_connection import BaseConnection
from common import command
from common.network_util import pack
from common.constants import MSG_TYPE, GLOBAL, TRANSPORT
import logging
logger = logging.getLogger("sys." + __name__.split(".")[-1])
class ClientConnection(BaseConnection):
"""
A wrapper for a TCP socket.
Runs in separate thread and listens for input of the socket.
"""
def __init__(self, connection, address, id, server):
self.server = server
BaseConnection.__init__(self, connection, address, id)
def __str__(self):
return "Client@" + BaseConnection.__str__(self).split("@")[1]
def on_message(self, data):
json_data = json.loads(data)
if json_data['type'] == MSG_TYPE.COMMAND:
command_obj = command.Command.from_json(json_data['payload'])
# set time of command to synchronised server time
command_obj.timestamp = time.time()
self.server.request_command(command_obj)
elif json_data['type'] == MSG_TYPE.INIT:
id = json_data['payload']
if id == '':
# send new player command to game engine
self.server.request_command(command.NewPlayerCommand(self.id, timestamp=time.time()))
# setup_client will be called in ClientServer dispatch method, once client is placed on map
else:
# player is rejoining
old_id = self.id
self.id = id
# send init message to client
self.setup_client()
# only now add client to connections so that it starts receiving updates
self.server.add_connection(self, old_id, self.id)
else:
logger.warning("Received an unknown message type [{}]".format(data))
def setup_client(self, initial_map=None):
"""
Sends the init message to the client with id and the initial map.
:param initial_map: if not provided will be retrieved from the engine
"""
if initial_map is None:
self.server.meta_request_queue.put({"type": "get_map"})
# if we start using the meta queue for other purposes we need to properly process it
initial_map = self.server.meta_response_queue.get()
data = json.dumps({
'type': MSG_TYPE.INIT,
'id': self.id,
'initial_map': initial_map
})
logger.debug("{} :: sending init message [{}]".format(self.__str__(), data[:GLOBAL.MAX_LOG_LENGTH]))
self.socket.sendall(pack(data))
def shutdown(self, b_cast=True):
"""
Shuts down the socket, makes sure that the thread can die and notifies the server about the ending connection.
"""
BaseConnection.shutdown(self)
self.server.remove_connection(self.id)
# need to notify engine about the connection loss with the client -> so he can be removed from the field
if b_cast:
self.server.request_command(command.PlayerLeaveCommand(self.id, is_killed=False, timestamp=time.time()))
def shutdown_killed(self):
"""
Shuts down the socket, makes sure that the thread can die.
"""
BaseConnection.shutdown(self)
self.server.remove_connection(self.id)
```
#### File: server/network/p2p_connection.py
```python
import json
from .base_connection import BaseConnection
from common import command
from common.constants import MSG_TYPE, HEARTBEAT, get_game_log, set_game_log
import logging
logger = logging.getLogger("sys." + __name__.split(".")[-1])
class P2PConnection(BaseConnection):
def __init__(self, socket, address, id, server):
self.server = server
self.heartbeat = HEARTBEAT.INIT
self.peer_connections = 0
BaseConnection.__init__(self, socket, address, id)
def __str__(self):
return "Peer@" + BaseConnection.__str__(self).split("@")[1] + " [hb:{}]".format(self.heartbeat)
def on_message(self, data):
json_data = json.loads(data)
if json_data['type'] == MSG_TYPE.HBEAT:
self.heartbeat += HEARTBEAT.INC
self.peer_connections = int(json_data['payload']['num_connections'])
elif json_data['type'] == MSG_TYPE.BCAST:
# Put the message in the queue
command_obj = command.Command.from_json(json_data['command'])
self.server.request_queue.put(command_obj)
elif json_data['type'] == MSG_TYPE.INIT_REQ:
self.server.meta_request_queue.put({"type": "get_map"})
# if we start using the meta queue for other purposes we need to properly process it
initial_map = self.server.meta_response_queue.get()
# send initial map and pending commands so that the new server will be at the same state
self.send(json.dumps({
'type': MSG_TYPE.INIT_RES,
'initial_map': initial_map,
'log': get_game_log(),
'pending_commands': self.server.get_current_commands()
}))
elif json_data['type'] == MSG_TYPE.INIT_RES:
self.server.init_queue.put(json_data['initial_map'])
set_game_log(json_data['log'])
for command_json in json_data['pending_commands']:
self.server.request_queue.put_nowait(command.Command.from_json(command_json))
else:
logger.warning("Unrecognized message received from peer [{}]".format(data))
```
#### File: das/test/console_control.py
```python
import subprocess
import sys
import os
import psutil
import time
servers = {}
clients = {}
fileDir = os.path.dirname(os.path.realpath('__file__'))
sys.path.append(fileDir)
def extract_id(text):
id = -1
try:
id = int(text.split(' ')[1])
except:
print('No id/no provided. please try again.')
return id
class Server(object):
@staticmethod
def handle(text):
if text.startswith('server.init'):
no = extract_id(text)
for i in range(1, no+1):
if i == 1:
Server.start(i, master_node=True)
else:
Server.start(i)
time.sleep(0.3)
elif text == 'server.status':
Server.status()
elif text == 'server.killall':
Server.kill_all()
elif text.startswith('server.kill'):
Server.kill(extract_id(text))
elif text.startswith('server.start'):
Server.start(extract_id(text))
else:
print('Unknown command, type help to see all available commands.')
@staticmethod
def status():
for id, server in servers.items():
if server is None:
status = 'killed'
client_connections = 0
p2p_connections = 0
else:
status = 'running'
# 1 UDP socket, 2 TCP server sockets #### and x.laddr.port == id*1000+10
active_connections = list(filter(lambda x: x.status == 'ESTABLISHED', server.connections()))
client_connections = len(list(filter(lambda x: x.laddr.port == id*10000, active_connections)))
p2p_connections = len(active_connections) - client_connections
print('Server {}: {}\tp2p: {}, clients: {}'.format(id, status, p2p_connections, client_connections))
@staticmethod
def kill(id):
if id == -1:
return
if id in servers:
server = servers[id]
if server is None:
return
for child in server.children():
child.kill()
server.kill()
servers[id] = None
@staticmethod
def kill_all():
for server_id in servers:
Server.kill(server_id)
@staticmethod
def start(id, master_node=False):
if id == -1:
return
port = id * 10000
arguments = ['python', '-m', 'server.app',
'--config', 'test/das_config.json',
'--log-prefix', str(id),
'--port', str(port)]
if master_node:
arguments.extend(['--users', 'test/das_hell.json'])
print("Starting Server {} with {}".format(id, arguments))
proc = subprocess.Popen(arguments)
servers[id] = psutil.Process(proc.pid)
class Client(object):
@staticmethod
def handle(text):
if text == 'client.status':
Client.status()
elif text == 'client.killall':
Client.kill_all()
elif text.startswith('client.init'):
Client.create(extract_id(text))
elif text.startswith('client.kill'):
Client.kill(extract_id(text))
elif text.startswith('client.start'):
Client.start(extract_id(text))
else:
print('Unknown command, type help to see all available commands.')
@staticmethod
def create(count):
for i in range(1, count+1):
Client.start(i)
time.sleep(0.5)
@staticmethod
def status():
for id, client in clients.items():
if client is None:
status = 'killed'
connected_to = 0
else:
try:
status = client.as_dict()['status']
connected_to = list(filter(lambda x: x.status == 'ESTABLISHED', client.connections()))[0].raddr.port
connected_to = int(connected_to / 10000)
except Exception as e:
connected_to = 0
status = 'killed'
clients[id] = None
print('Client {}: {}\tserver: {}'.format(id, status, connected_to))
@staticmethod
def kill(id):
if id == -1:
return
if id in clients:
client = clients[id]
if client is None:
return
try:
client.kill()
except:
pass
clients[id] = None
@staticmethod
def kill_all():
for client_id in clients:
Client.kill(client_id)
@staticmethod
def start(id):
if id == -1:
return
arguments = ['python', '-m', 'client.app',
'--config', 'test/das_config.json',
'--log-prefix', str(id)]
print("Starting Client {} with {}".format(id, arguments))
proc = subprocess.Popen(arguments)
clients[id] = psutil.Process(proc.pid)
if __name__ == '__main__':
while True:
text = input('>> ')
if text == 'help':
print("""
server.status - prints up/down status of servers and currently active connections/connected clients
server.init {no} - creates {no} servers, the first with map 'test/das_hell.json'
server.start {id} - starts server with {id}
server.kill {id} - kills server with {id}
server.killall - kills all servers
client.status - prints up/down status of clients and to which server they are connected
client.init {no} - creates {no} clients
client.start {id} - starts client with {id}
client.kill {id} - kills client with {id}
client.killall - kills all clients
help - shows this help
exit - quits the console helper
""")
elif text == 'exit':
Client.kill_all()
Server.kill_all()
break
elif text.startswith('server'):
Server.handle(text)
elif text.startswith('client'):
Client.handle(text)
else:
print('Unknown command, type help to see all available commands.')
```
#### File: das/test/simulation.py
```python
import argparse
import emulation.GTAEventsReader
import time
import os
import subprocess
import sys
from threading import Thread
from threading import Lock
from decimal import Decimal
from common.constants import init_logger
import logging
SLAVE_NODE_SERVER_NAME_PREFIX = "slave_"
MASTER_NODE_SERVER_NAME = "master_node"
MASTER_SERVER = " 0"
SIMULATION_SERVERS_WARMUP_TIME = 3
logger = logging.getLogger("sys." + __name__.split(".")[-1])
LOCATION_CLIENT_APP_WINDOWS = '..\\client\\app.py'
LOCATION_CLIENT_APP_LINUX = '../client/app.py'
LOCATION_SERVER_APP_WINDOWS = '..\\server\\app.py'
LOCATION_SERVER_APP_LINUX = '../server/app.py'
# This method adds a client to the simulation.
# Input: lock: lock object for concurrency control,
# event_details: the details related to the connection of the client to the simulation.
# adjTimestamp: an adjustment on the timestamp to discount the time that took for the event to be triggered.
# Output: none.
def addClient(lock, event_details, clientApp, adjTimestamp):
# If the delayed time is longer than the timestamp in which the event should be trigger, the sleep is skiped and
# the event is triggered automatically.
if event_details.timeStamp - Decimal(adjTimestamp) > 0:
time.sleep(event_details.timeStamp - Decimal(adjTimestamp))
# command line to start the client application: python ../client/app.py --log-prefix player_id
proc = subprocess.Popen([sys.executable, "-m", "client.app", '--log-prefix', event_details.playerId, '--config','./test/das_config.json'])
logger.debug("This is the playerId: " + event_details.playerId + " and this is the PID:" + str(proc.pid))
with lock:
playersAndProcesses[event_details.playerId] = proc.pid
logger.info("Player: " + event_details.playerId + " joining the game.")
return
# This method removes a client to the simulation.
# Input: lock: lock object for concurrency control,
# event_details: the details related to the connection of the client to the simulation.
# adjTimestamp: an adjustment on the timestamp to discount the time that took for the event to be triggered.
# isWindows: if the current execution is on Windows.
# Output: none.
def removeClient(lock, event_details, isWindows, adjTimestamp):
with lock:
numberOfPlayers = len(playersAndProcesses)
if numberOfPlayers > 0:
if event_details.timeStamp - Decimal(adjTimestamp) > 0:
time.sleep(event_details.timeStamp - Decimal(adjTimestamp))
if not isWindows:
commandLine = "kill -9 " + str(playersAndProcesses[event_details.playerId])
else:
#Killing the process using a Windows command
commandLine = "taskkill /f /pid " + str(playersAndProcesses[event_details.playerId])
# Executing the command to kill the respective process.
os.system(commandLine)
with lock:
playersAndProcesses[event_details.playerId] = None
numberOfPlayers = len(playersAndProcesses)
logger.info("Player: " + event_details.playerId + " leaving the game.")
else:
logger.info("This player currently doesn't have an active session, so no Logout action will be performed.")
return
if numberOfPlayers == 0:
logger.info("This was the last player to leave the game, end of simulation.")
def triggerJoinLeaveEvents(listOfEventsToTrigger, lock, clientApp, delayBetweenEvents):
listOfThreads = []
adjustmentTimestamp = 0
for event in listOfEventsToTrigger:
if event.eventType == emulation.GTAEventsReader.PLAYER_LOGIN:
thread = Thread(target=addClient, args=(lock, event, clientApp, adjustmentTimestamp,))
thread.start()
listOfThreads.append(thread)
else:
if event.eventType == emulation.GTAEventsReader.PLAYER_LOGOUT:
thread = Thread(target=removeClient, args=(lock, event, runningWindows, adjustmentTimestamp,))
thread.start()
listOfThreads.append(thread)
#Assuming that the time between events is respected also for Login/logout
time.sleep(delayBetweenEvents)
adjustmentTimestamp += delayBetweenEvents
# Waits for all threads to finish
for single_thread in listOfThreads:
single_thread.join()
def addServer(event_details, serverApp, configFile, serverName, target_port, is_master):
time.sleep(event_details.timeStamp)
# Starting the server
# command line to start the base server: python ../server/app.py --log-prefix player_id
if is_master:
proc = subprocess.Popen([sys.executable, "-m", "server.app" ,'--users', 'test/das_map.json' , '--config', configFile, '--log-prefix', serverName, '--port', str(target_port)])
else:
proc = subprocess.Popen([sys.executable, "-m", "server.app" , '--config', configFile, '--log-prefix', serverName, '--port', str(target_port)])
if proc.pid > 0:
logger.info("Server" + serverName + "successfully added. Process Id: " + str(proc.pid))
serverProcesses[serverName] = proc.pid
else:
logger.error("Error while loading the base server. Simulation will be aborted.")
return
if MASTER_NODE_SERVER_NAME == serverName:
time.sleep(SIMULATION_SERVERS_WARMUP_TIME)
def triggerServerEvents(serverApp, configFile, base_port, port_offset, numSlaveServers, listOfServerEvents):
# The list of server events precedes the parameter numSlaveServers
if (listOfServerEvents is not None):
for event in listOfServerEvents:
if event.eventType == emulation.GTAEventsReader.SERVER_ADD:
if event.playerId == MASTER_SERVER:
thread = Thread(target=addServer, args=(event, serverApp,configFile,MASTER_NODE_SERVER_NAME, base_port, True))
else:
slave_port = base_port + port_offset * int(event.playerId)
thread = Thread(target=addServer, args=(event, serverApp, configFile, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip(), str(slave_port), False))
thread.start()
else:
if event.eventType == emulation.GTAEventsReader.SERVER_REMOVE:
if event.playerId == MASTER_SERVER:
thread = Thread(target=killServer, args=(event, MASTER_NODE_SERVER_NAME,))
else:
thread = Thread(target=killServer, args=(event, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip(),))
thread.start()
# if event.eventType == emulation.GTAEventsReader.SERVER_ADD:
#
# if event.playerId == MASTER_SERVER:
# addServer(event, serverApp, configFile, MASTER_NODE_SERVER_NAME, base_port)
# else:
# slave_port = base_port + port_offset * int(event.playerId)
# addServer(event, serverApp, configFile, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip(), str(slave_port))
# else:
# if event.eventType == emulation.GTAEventsReader.SERVER_REMOVE:
# if event.playerId == MASTER_SERVER:
# killServer(event, MASTER_NODE_SERVER_NAME)
# else:
# killServer(event, SLAVE_NODE_SERVER_NAME_PREFIX + str(event.playerId).strip())
# else:
# logger.error("Server Event for" + event.playerId + " not identified")
else:
if(numSlaveServers is not None):
# Starting the base server
# command line to start the base server: python ../server/app.py --log-prefix player_id
proc = subprocess.Popen([sys.executable, serverApp, '--config', configFile, '--log-prefix', 'master_node', '--port', str(base_port)])
if proc.pid > 0:
logger.info("Base Server successfully added. Process Id: " + str(proc.pid))
serverProcesses.append(proc.pid)
else:
logger.error("Error while loading the base server. Simulation will be aborted.")
return
time.sleep(SIMULATION_SERVERS_WARMUP_TIME)
# Initializing the slave servers for simulation
i = 1
while i <= numSlaveServers:
slave_port = base_port + port_offset*i
proc = subprocess.Popen([sys.executable, serverApp, '--config', configFile, '--log-prefix', 'slave_' + str(i), '--config', str(slave_port)])
if proc.pid > 0:
logger.info("Slave Server " + str(i) + " successfully added. Process Id:" + str(proc.pid))
serverProcesses.append(proc.pid)
else:
logger.error("Error while loading slave server " + str(i) + ".")
i += 1
time.sleep(SIMULATION_SERVERS_WARMUP_TIME)
else:
logger.error("The number of slave servers or a list of server events was not provided, "
"so no servers will be added to the simulation.")
return
# This kills the process for a given serverName used for the simulation.
# input: isWindows: if the current execution is on Windows.
# input: serverName
# Output: none.
def killServer(event_details, serverName):
time.sleep(event_details.timeStamp)
if serverProcesses[serverName] is not None:
if not runningWindows:
commandLine = "kill -9 " + str(serverProcesses[serverName])
else:
#Killing the process using a Windows command
commandLine = "taskkill /f /pid " + str(serverProcesses[serverName])
logger.info("Removing the server process:" + str(serverProcesses[serverName]))
# Executing the command to kill the respective process.
os.system(commandLine)
serverProcesses[serverName] = None
return
# This kills the processes used for the simulation.
# isWindows: if the current execution is on Windows.
# Output: none.
def killServers():
with serverLock:
numberOfProcesses = len(serverProcesses)
if numberOfProcesses > 0:
for serverProcess in serverProcesses:
if not runningWindows:
commandLine = "kill -9 " + str(serverProcess)
else:
#Killing the process using a Windows command
commandLine = "taskkill /f /pid " + str(serverProcess)
logger.info("Removing the server process:" + str(serverProcess))
# Executing the command to kill the respective process.
os.system(commandLine)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simulation")
# Parameters related to the simulation
parser.add_argument("--elap-time", dest="simulationElapsedTimeInSeconds", default=30)
parser.add_argument("--delayBetweenEvents", dest="timeBetweenEvents", default=0.5)
parser.add_argument("--gta-file", dest="gtaFilename", default='WoWSession_Node_Player_Fixed_Dynamic_reduced.zip')
# Parameters related to the servers used in the simulation
parser.add_argument("--base-port", dest="basePort", default=7000)
parser.add_argument("--port-offset", dest="portOffset",default=1000)
parser.add_argument("--num-slave-servers", dest="numSlaveServers", default=0)
parser.add_argument("--server-event-file", dest="serverEventFilename")
parser.add_argument("--server-config", dest="serverConfig", default="./test/das_config.json")
# Example of parameters to invoke main --elap-time 15 --delayBetweenEvents 1 --gta-file WoWSession_Node_Player_Fixed_Dynamic_reduced.zip --server-event-file Server_Connectons_Disconnections.zip
args = parser.parse_args()
init_logger("log/simulation_{}.log".format(time.time()))
# Assigning the parameters received in the command line to the variables which will be used for the simulation
simulationElapsedTimeInSeconds = int(args.simulationElapsedTimeInSeconds)
timeBetweenEvents = float(args.timeBetweenEvents)
gtaFilename = args.gtaFilename
base_port = int(args.basePort)
port_offset = int(args.portOffset)
numSlaveServers = int(args.numSlaveServers)
serverEventFilename = args.serverEventFilename
configurationFile = args.serverConfig
# This list will contain pairs of players and the associated process.
global playersAndProcesses
playersAndProcesses = {}
# List of processes related to the servers used in the simulation (master + slaves)
global serverProcesses
serverProcesses = {}
# This lock is used to implement concurrency control on the list of players and processes which will be shared
# accros multiple threads.
lock = Lock()
serverLock = Lock()
runningWindows = False
if os.name == 'nt':
runningWindows = True
fileDir = os.path.dirname(os.path.realpath('__file__'))
# Depending on the OS in which the simulation is running the way in which the client and server are invoked is
# different.
if not runningWindows:
clientAppLocation = os.path.join(fileDir, LOCATION_CLIENT_APP_LINUX)
serverAppLocation = os.path.join(fileDir, LOCATION_SERVER_APP_LINUX)
else:
# Windows file structure
clientAppLocation = os.path.join(fileDir, LOCATION_CLIENT_APP_WINDOWS)
serverAppLocation = os.path.join(fileDir, LOCATION_SERVER_APP_WINDOWS)
# List of events still considering the timestamps read from the GTA file
listOfEvents = emulation.GTAEventsReader.LoadEventsFromFile(gtaFilename, emulation.GTAEventsReader.MODE_PLAYERS)
# Normalize the timeStamps of the Login/Logout events using the given simulation's elapsed time.
listOfNormalizedPlayerEvents = emulation.GTAEventsReader.NormalizeEvents(listOfEvents, simulationElapsedTimeInSeconds)
logger.info("Total number of Login/Logout events: " + str(len(listOfNormalizedPlayerEvents)))
# List of server events
listOfEvents = None
listOfEvents = emulation.GTAEventsReader.LoadEventsFromFile(serverEventFilename, emulation.GTAEventsReader.MODE_SERVERS)
if listOfEvents is not None:
# Normalize the timeStamps of the server events using the given simulation's elapsed time.
listOfNormalizedServerEvents = emulation.GTAEventsReader.NormalizeEvents(listOfEvents, simulationElapsedTimeInSeconds)
logger.info("Total number of server events: " + str(len(listOfNormalizedServerEvents)))
logger.info("Starting the simulation.")
logger.info("Initializing servers.")
triggerServerEvents(serverAppLocation, configurationFile, base_port, port_offset, numSlaveServers, listOfNormalizedServerEvents)
logger.info("Triggering events.")
triggerJoinLeaveEvents(listOfNormalizedPlayerEvents, lock, clientAppLocation, timeBetweenEvents)
if listOfEvents is None:
logger.info("List of server events not used - killing the processes related to the servers.")
killServers(runningWindows)
print("This is the end of the simulation.")
``` |
{
"source": "jonastheis/healthor",
"score": 2
} |
#### File: healthor/plot/utils.py
```python
import pandas as pd
import numpy as np
import subprocess
import os
import glob
import time
import datetime
OMNETPP_BASE_DIRECTORY = os.path.abspath('../simulation/')
OMNETPP_RESULTS_DIRECTORY = os.path.join(OMNETPP_BASE_DIRECTORY, 'results')
def compile_simulation():
print('Compiling...')
ret = subprocess.run([
'make',
], cwd=OMNETPP_BASE_DIRECTORY, capture_output=True)
if ret.returncode != 0:
print(ret)
exit(1)
print('Compiling... done.')
def run_simulation(config_name):
compile_simulation()
start = time.time()
print(format_time(time.time()), '| Simulation run %s...' % config_name)
ret = subprocess.run([
'./simulation',
# 'omnetpp.ini',
# '-n', 'basic.ned',
'-u', 'Cmdenv',
'-c', config_name,
], cwd=OMNETPP_BASE_DIRECTORY, capture_output=True)
if ret.returncode != 0:
print(ret)
exit(1)
end = time.time()
duration = end - start
print(format_time(time.time()), '| %s duration |' % str(datetime.timedelta(seconds=duration)), 'Simulation run %s... done.' % config_name)
def export_to_csv(config_name):
ret = subprocess.run([
'scavetool', 'x', '%s-#0.vec' % config_name, '%s-#0.sca' % config_name, '-o', '%s.csv' % config_name,
], cwd=OMNETPP_RESULTS_DIRECTORY, capture_output=True)
if ret.returncode != 0:
print(ret)
exit(1)
def parse_if_number(s):
try: return float(s)
except: return True if s=="true" else False if s=="false" else s if s else None
def parse_ndarray(s):
return np.fromstring(s, sep=' ') if s else None
def parse_omnetpp_csv(config_name):
path = os.path.join(OMNETPP_RESULTS_DIRECTORY, '%s.csv' % config_name)
return pd.read_csv(path, converters={
'value': parse_if_number,
'attrvalue': parse_if_number,
'binedges': parse_ndarray,
'binvalues': parse_ndarray,
'vectime': parse_ndarray,
'vecvalue': parse_ndarray,
})
def glob_csv_files(config_name, type):
path = os.path.join(OMNETPP_RESULTS_DIRECTORY, '%s-%s_*.csv' % (config_name, type))
return sorted(glob.glob(path))
def save_simulation_state(path):
ret = subprocess.run([
'tar', '-czvf', os.path.join(path, 'simulation.tar.gz'), OMNETPP_BASE_DIRECTORY,
], capture_output=True)
if ret.returncode != 0:
print(ret)
exit(1)
def save_to_csv(df, path, name):
df.to_csv(os.path.join(path, name + '.csv'))
def format_time(t):
return time.strftime('%Y-%m-%d %H:%M:%S GMT', time.gmtime(t))
``` |
{
"source": "JonasThorsell/AoC",
"score": 3
} |
#### File: 2020/13/aoc2013b.py
```python
import sys
from functools import reduce
# Chinese Remainder Theorem:
# https://fangya.medium.com/chinese-remainder-theorem-with-python-a483de81fbb8
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a*b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1: return 1
while a > 1:
q = a // b
a, b = b, a%b
x0, x1 = x1 - q * x0, x0
if x1 < 0: x1 += b0
return x1
t = int(sys.stdin.readline())
b = sys.stdin.readline().split(',')
n = []
a = []
for i in range(len(b)):
if not b[i] == 'x':
n.append(int(b[i]))
a.append(-i)
print(chinese_remainder(n, a))
```
#### File: 2020/16/aoc2016b.py
```python
import sys
import re
import functools
def chktf(tf, tfr):
for r in tfr:
if (r[1] <= tf <= r[2] or r[3] <= tf <= r[4]):
return True
return False
def matchtfl(tfl, r):
for f in tfl:
if not (r[1] <= f <= r[2] or r[3] <= f <= r[4]):
return False
return True
p = re.compile(r'^([a-z ]+): (\d+)-(\d+) or (\d+)-(\d+)')
tfr=[]
for l in sys.stdin:
if not l.strip():
break
tfr.append([int(x) if x.isdigit() else x for x in p.match(l).groups()])
sys.stdin.readline()
mt = [int(x) for x in sys.stdin.readline().split(',')]
sys.stdin.readline()
sys.stdin.readline()
nt = []
for l in sys.stdin:
nt.append([int(x) for x in l.split(',')])
nvt = [t for t in nt if sum([chktf(f, tfr) for f in t]) == len(t)]
gbf = []
for i in range(len(nvt[0])):
gbf.append([t[i] for t in nvt])
vr = [[] for i in range(len(gbf))]
for r in tfr:
for i, f in enumerate(gbf):
if matchtfl(f, r):
vr[i].append(r[0])
om = [x[0] for x in vr if len(x) == 1]
while (len(om) < len(tfr)):
for m in om:
for f in vr:
if len(f) > 1 and m in f:
f.remove(m)
om = [x[0] for x in vr if len(x) == 1]
print(functools.reduce(lambda a,b : a*b, [x[1] for x in zip(om,mt) if x[0][:9] == 'departure']))
```
#### File: 2020/22/aoc2022b.py
```python
import sys
d=[[],[]]
for p in range(2):
sys.stdin.readline()
for l in sys.stdin:
if not l.strip():
break
d[p].append(int(l))
def score(d):
return sum([(i+1)*c for i,c in enumerate(d[::-1])])
def rc(d0, d1, g):
h = {}
while len(d0) and len(d1):
w = 0
if str((d0, d1)) in h:
return (0, score(d0))
h[str((d0, d1))] = True
c0 = d0.pop(0)
c1 = d1.pop(0)
if len(d0) >= c0 and len(d1) >= c1:
w, _ = rc(d0[:c0].copy(), d1[:c1].copy(), g + 1)
else:
w = 0 if c0 > c1 else 1
if w == 0:
d0.append(c0)
d0.append(c1)
else:
d1.append(c1)
d1.append(c0)
return (0, score(d0)) if w == 0 else (1, score(d1))
w, s = rc(d[0], d[1], 1)
print(f'W {w+1} score: {s}')
```
#### File: 2020/25/aoc2025a.py
```python
import sys
def cls(pk, sn=7):
ls = 0
v = 1
while not v == pk:
ls += 1
v = (v * sn) % 20201227
return ls
def cek(ls, sn):
v = 1
for i in range(ls):
v = (v * sn) % 20201227
return v
card_pk = int(sys.stdin.readline())
door_pk = int(sys.stdin.readline())
card_ls = cls(card_pk)
door_ls = cls(door_pk)
print(f'card pk: {card_pk} ls: {card_ls}')
print(f'door pk: {door_pk} ls: {door_ls}')
print(cek(card_ls, door_pk))
print(cek(door_ls, card_pk))
```
#### File: 2020/4/aoc2004b.py
```python
import sys
import re
pl=[{}]
for l in sys.stdin:
if l.strip():
pl[-1].update({kv.split(':')[0]: kv.split(':')[1] for kv in l.split()})
else:
pl.append({})
def chkp(p):
fl = ('byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid')
ecl = ('amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth')
if not all(f in p for f in fl):
return False
if (not p['byr'].isdigit() or int(p['byr']) < 1920 or int(p['byr']) > 2002):
return False
if (not p['iyr'].isdigit() or int(p['iyr']) < 2010 or int(p['iyr']) > 2020):
return False
if (not p['eyr'].isdigit() or int(p['eyr']) < 2020 or int(p['eyr']) > 2030):
return False
hgtu = p['hgt'][-2:]
hgtv = p['hgt'][:-2]
if (not hgtu in ['cm', 'in'] or not hgtv.isdigit()):
return False
if (hgtu == 'cm' and (int(hgtv) < 150 or int(hgtv) > 193)):
return False
if (hgtu == 'in' and (int(hgtv) < 59 or int(hgtv) > 76)):
return False
if (not re.search("^#[0-9a-f]{6}$", p['hcl'])):
return False
if (not p['ecl'] in ecl):
return False
if (not p['pid'].isdigit() or len(p['pid']) != 9):
return False
return True
vpl = list(filter(chkp, pl))
print(len(vpl))
```
#### File: 2021/16/aoc2116a.py
```python
import sys
gversum = 0
def decode(s, npkg=-1):
global gversum
i = 0
while len(s) - i >= 6 and npkg != 0:
version = int(s[i:i+3],2)
gversum += version
typeid = int(s[i+3:i+6],2)
i += 6
if npkg > 0: npkg -= 1
#print(version, typeid)
if typeid == 4: # literal value
literals = ''
literal = 0
group = 1
while group:
group = int(s[i])
i += 1
literals += s[i:i+4]
i+=4
literal = int(literals,2)
print(version, typeid, literal)
else: # operator
lengthtype = int(s[i])
i += 1
if lengthtype == 0: # total length in bits
sublength = int(s[i:i+15],2)
print(version, typeid, lengthtype, sublength)
i+=15
decode(s[i:i+sublength])
i+=sublength
else:
subpkg = int(s[i:i+11],2)
print(version, typeid, lengthtype, subpkg)
i+=11
i+=decode(s[i:], subpkg)
return i
for l in sys.stdin:
bits =''
gversum = 0
for c in l.strip():
bits += '{:04b}'.format(int(c,16))
print(l.strip())
print(bits)
decode(bits,1)
print(gversum)
i=0
```
#### File: 2021/5/aoc2105a.py
```python
import sys
import re
import numpy as np
def line(c,x1,y1,x2,y2):
for x in range(min(x1,x2),max(x1,x2)+1):
for y in range(min(y1,y2),max(y1,y2)+1):
c[x,y] = c[x,y]+1
p = re.compile('(\d+),(\d+) -> (\d+),(\d+)')
ln = []
for l in sys.stdin:
m = [int(x) for x in p.search(l).groups()]
if m[0] == m[2] or m[1] == m[3]:
ln.append(m)
ln = np.array(ln)
m = ln.max(axis=0)
xm = max(m[0],m[2])
ym = max(m[1],m[3])
c = np.zeros((xm+1,ym+1), dtype=np.uint)
for l in ln:
line(c,l[0],l[1],l[2],l[3])
print(c.T)
print(np.sum(c >= 2))
``` |
{
"source": "JonasToth/depth-conversions",
"score": 2
} |
#### File: scripts/analysis/transformation_step.py
```python
import argparse
import configparser
import glob
import logging as log
from os.path import abspath, basename, dirname, join, exists
from pathlib import Path
import subprocess
import sys
__l = log.getLogger(__file__)
__cmd_prefix = ['']
__cmds = {
'filter': 'depth_filter',
'converter': 'depth2x',
'extractor': 'feature_extractor',
'plotter': 'keypoint_plotter',
'distribution': 'feature_performance',
'matching': 'feature_performance',
'recognition': 'feature_performance',
}
def arguments():
parser = argparse.ArgumentParser(
description='Run the transformation from depth image to feature image')
parser.add_argument('-c', '--config',
help='Path to configuration file that describes the'
' pipeline')
parser.add_argument('--command-prefix',
help='Define a prefix that is prepended to every'
' command. This can be used to run the commands'
' with \'docker run\'',
default='')
parser.add_argument('--force',
help='Force execution of the transformation, even'
' if the files already exist.',
action='store_true')
return parser.parse_args()
def command_invocer(invocation, config_args, source_info):
filtered = [s for s in invocation if len(s) > 0]
__l.debug('Final invocation: %s' % filtered)
__l.info('Running the following command:')
__l.info(' '.join(filtered))
return_code = subprocess.run(filtered, shell=False, stdin=None,
stdout=sys.stdout, stderr=sys.stderr)
if return_code.returncode < 0:
__l.error("Command got terminated by signal %d" %
-return_code.returncode)
__l.error("Invocation:\n%s" % '\n'.join(filtered))
elif return_code.returncode > 0:
__l.warning("Command terminated with error code!")
__l.warning("Invocation:\n%s" % '\n'.join(filtered))
# Because filtering is a way to create a new base-dataset, a configuration
# file must be written for the new dataset.
new_cfg = configparser.ConfigParser()
new_cfg['data'] = source_info
new_cfg['data']['pattern'] = basename(config_args['target'])
new_cfg.write(open(join(dirname(config_args['target']),
config_args.get('config', 'dataset.config')), 'w'))
def path_adjustment(dictionary, keys, path):
"""
Iterates :param: keys in the :param: dictionary and replaces each value
with the 'abspath(join(path, dictionary[key]))'.
"""
for key in keys:
if key not in dictionary:
continue
dictionary[key] = abspath(join(dirname(path), dictionary[key]))
def run_filter(config_args, source_info):
__l.debug('Filter command: %s' % config_args['filter'])
__l.debug('Filter arguments: %s' % config_args['arguments'])
invocation = __cmd_prefix
invocation.append(__cmds['filter'])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--output', config_args['target']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.append(config_args['filter'])
invocation.extend(config_args['arguments'].split(' '))
command_invocer(invocation, config_args, source_info)
def run_converter(config_args, source_info):
__l.debug('Converter type: %s' % config_args['type'])
invocation = __cmd_prefix
invocation.append(__cmds['converter'])
invocation.extend(['--calibration', source_info['intrinsic']])
invocation.extend(['--model', source_info['model']])
invocation.extend(['--type', source_info['type']])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.append(config_args['type'])
if config_args['type'] == 'bearing':
add_args = config_args.get('arguments', '').split(' ')
assert len(add_args) == 1, "Too many argument for bearing angle"
invocation.extend(add_args)
invocation.append(config_args['target'])
else:
invocation.extend(['--output', config_args['target']])
invocation.extend(config_args.get('arguments', '').split(' '))
command_invocer(invocation, config_args, source_info)
def run_extraction(config_args, source_info):
__l.debug('Detector: %s' % config_args['detector'])
__l.debug('Descriptor: %s' % config_args['descriptor'])
invocation = __cmd_prefix
invocation.append(__cmds['extractor'])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--output', config_args['target']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.append('detector')
det_args = config_args.get('detector_filter', '').split(' ')
invocation.extend(det_args)
invocation.append(config_args['detector'])
add_args = config_args.get('detector_args', '').split(' ')
invocation.extend(add_args)
invocation.extend(['descriptor', config_args['descriptor']])
add_args = config_args.get('descriptor_args', '').split(' ')
invocation.extend(add_args)
command_invocer(invocation, config_args, source_info)
def run_plotting(config_args, source_info):
invocation = __cmd_prefix
invocation.append(__cmds['plotter'])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--output', config_args['target']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.extend(['--color', config_args.get('color', 'all')])
command_invocer(invocation, config_args, source_info)
def run_distribution(config_args, source_info):
invocation = __cmd_prefix
invocation.append(__cmds['distribution'])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--output', config_args['target']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.append('keypoint-distribution')
invocation.extend(['--image-width', config_args['width']])
invocation.extend(['--image-height', config_args['height']])
invocation.extend(['--response-histo', config_args['response']])
invocation.extend(['--size-histo', config_args['size']])
invocation.extend(['--kp-distance-histo', config_args['kp_distance']])
invocation.extend(['--kp-distribution-histo',
config_args['kp_distribution']])
command_invocer(invocation, config_args, source_info)
def run_matching(config_args, source_info):
invocation = __cmd_prefix
invocation.append(__cmds['distribution'])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--output', config_args['target']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.append('matching')
invocation.extend(['--distance-norm', config_args['norm']])
invocation.extend(['--match-output', config_args['match_output']])
invocation.extend(['--original-images', config_args['original_images']])
invocation.extend(['--matched-distance-histo',
config_args['match_distances']])
command_invocer(invocation, config_args, source_info)
def run_recognition(config_args, source_info):
invocation = __cmd_prefix
invocation.append(__cmds['recognition'])
invocation.extend(['--input', source_info['pattern']])
invocation.extend(['--output', config_args['target']])
invocation.extend(['--start', source_info['start']])
invocation.extend(['--end', source_info['end']])
invocation.append('recognition-performance')
invocation.extend(['--depth-image', config_args['depth_images']])
invocation.extend(['--pose-file', source_info['pose']])
invocation.extend(['--intrinsic', source_info['intrinsic']])
if 'mask' in source_info:
invocation.extend(['--mask', source_info['mask']])
invocation.extend(['--match-norm', config_args['norm']])
invocation.extend(['--keypoint-distance-threshold',
config_args['distance_threshold']])
if 'backprojection' in config_args:
invocation.extend(['--backprojection', config_args['backprojection']])
invocation.extend(['--orig-images', config_args['original_images']])
if 'true_positive_strength' in config_args:
invocation.extend(['--true-positive-strength',
config_args['true_positive_strength']])
if 'false_negative_strength' in config_args:
invocation.extend(['--false-negative-strength',
config_args['false_negative_strength']])
if 'false_positive_strength' in config_args:
invocation.extend(['--false-positive-strength',
config_args['false_positive_strength']])
invocation.extend(['--backprojection-selected-histo',
config_args['backprojection_selected_histo']])
invocation.extend(['--relevant-elements-histo',
config_args['relevant_elements_histo']])
invocation.extend(['--true-positive-histo',
config_args['true_positive_histo']])
invocation.extend(['--false-positive-histo',
config_args['false_positive_histo']])
invocation.extend(['--true-positive-distance-histo',
config_args['true_positive_distance_histo']])
invocation.extend(['--false-positive-distance-histo',
config_args['false_positive_distance_histo']])
command_invocer(invocation, config_args, source_info)
def create_video(config_args):
__l.debug('Creating video \'%s\'' % config_args['output'])
# Adjusted from
# https://stackoverflow.com/questions/24961127/how-to-create-a-video-from-images-with-ffmpeg
ffmpeg_call = ['ffmpeg', '-r', config_args['rate'], '-i',
config_args['source'], '-c:v', 'libx264',
'-y', # overwrite the output-file automatically
'-vf', 'fps=25', '-pix_fmt', 'yuv420p',
config_args['output']]
return_code = subprocess.run(ffmpeg_call, shell=False, stdin=None,
stdout=sys.stdout, stderr=sys.stderr)
return_code.check_returncode()
def main():
args = arguments()
global __cmd_prefix
__cmd_prefix = args.command_prefix.split(' ')
__l.debug('Command prefix: %s' % __cmd_prefix)
if not exists(args.config):
__l.error('Configuration path \'%s\' does not exist!' % args.config)
sys.exit(1)
toplevel_cfg = configparser.ConfigParser()
__l.debug('Reading configuration from file: %s' % args.config)
toplevel_cfg.read(args.config, encoding='utf-8')
# Substitute paths in the configurations with absolute paths.
path_adjustment(toplevel_cfg['data'], ['target', 'test_glob', 'source'],
args.config)
__l.debug('Source Data configuration expected here: %s' %
toplevel_cfg['data']['source'])
if not exists(toplevel_cfg['data']['source']):
__l.error('Configuration path \'%s\' does not exist!' %
toplevel_cfg['data']['source'])
sys.exit(1)
source_data_cfg = configparser.ConfigParser()
source_data_cfg.read(toplevel_cfg['data']['source'], encoding='utf-8')
# Substitute paths in source-configuration as well.
path_adjustment(source_data_cfg['data'],
['pattern', 'pose', 'mask', 'intrinsic'],
toplevel_cfg['data']['source'])
# Create directories for the output, if they do not exist.
dirs_to_output = dirname(toplevel_cfg['data']['target'])
Path(dirs_to_output).mkdir(parents=True, exist_ok=True)
if not args.force:
n_elements = 0
provided_count = int(toplevel_cfg['data'].get('expected_elements', -1))
if provided_count != -1:
n_elements = provided_count
else:
# Check if any work has to be done, if yes, do it.
# Otherwise just return.
start_idx = source_data_cfg.getint('data', 'start')
end_idx = source_data_cfg.getint('data', 'end')
# Counting is inclusive in the processing tools.
n_elements = end_idx - start_idx + 1
__l.debug('Expect %d elements' % n_elements)
globbed = glob.glob(toplevel_cfg['data']['test_glob'])
__l.debug('Glob detects %d elements' % len(globbed))
if len(globbed) == n_elements:
__l.info('Detected that the output files already exist.'
' Skipping processing of %s!' % args.config)
return
if len(globbed) > n_elements:
__l.error('Test expression resulted in more files then the'
' original dataset has. Check you configuration!'
' No processing!')
return
if 'filter' in toplevel_cfg:
toplevel_cfg['filter']['target'] = toplevel_cfg['data']['target']
run_filter(toplevel_cfg['filter'], source_data_cfg['data'])
elif 'converter' in toplevel_cfg:
toplevel_cfg['converter']['target'] = toplevel_cfg['data']['target']
run_converter(toplevel_cfg['converter'], source_data_cfg['data'])
elif 'extract' in toplevel_cfg:
toplevel_cfg['extract']['target'] = toplevel_cfg['data']['target']
run_extraction(toplevel_cfg['extract'], source_data_cfg['data'])
elif 'plot' in toplevel_cfg:
toplevel_cfg['plot']['target'] = toplevel_cfg['data']['target']
run_plotting(toplevel_cfg['plot'], source_data_cfg['data'])
elif 'distribution' in toplevel_cfg:
toplevel_cfg['distribution']['target'] = toplevel_cfg['data']['target']
path_adjustment(toplevel_cfg['distribution'],
['response', 'size', 'kp_distance', 'kp_distribution'],
args.config)
run_distribution(toplevel_cfg['distribution'], source_data_cfg['data'])
elif 'matching' in toplevel_cfg:
toplevel_cfg['matching']['target'] = toplevel_cfg['data']['target']
path_adjustment(toplevel_cfg['matching'],
['match_output', 'original_images', 'match_distances'],
args.config)
run_matching(toplevel_cfg['matching'], source_data_cfg['data'])
elif 'recognition' in toplevel_cfg:
toplevel_cfg['recognition']['target'] = toplevel_cfg['data']['target']
path_adjustment(toplevel_cfg['recognition'],
['depth_images', 'backprojection', 'original_images',
'backprojection_selected_histo',
'relevant_elements_histo', 'true_positive_histo',
'false_positive_histo',
'true_positive_distance_histo',
'false_positive_distance_histo'],
args.config)
depth_image_cfg = configparser.ConfigParser()
depth_image_cfg.read(toplevel_cfg['recognition']['depth_images'],
encoding='utf-8')
toplevel_cfg['recognition']['depth_images'] = \
join(dirname(toplevel_cfg['recognition']['depth_images']),
depth_image_cfg['data']['pattern'])
run_recognition(toplevel_cfg['recognition'], source_data_cfg['data'])
# Creating a video from the frames helps with visualization.
if 'video' in toplevel_cfg:
# Replacing '%' with '%%' to mask interpolation of the ConfigParser.
# ffmpeg uses '%' as placeholder for indices in filenames
# (printf-syntax).
toplevel_cfg['video']['source'] = \
toplevel_cfg['video']['source'].replace('%', '%%%%')
path_adjustment(toplevel_cfg['video'], ['output', 'source'],
args.config)
create_video(toplevel_cfg['video'])
if __name__ == '__main__':
log.basicConfig(format='%(asctime)s, %(levelname)s: %(message)s',
level=log.WARN)
main()
``` |
{
"source": "JonasTrampe/hucon",
"score": 3
} |
#### File: python_lib/hucon/PCA9685.py
```python
import time
from OmegaExpansion import onionI2C
# Set of all addresses which are successfully initialized.
_INITIALIZED_DRIVER = set()
class PCA9685(object):
""" Class to to handle multiple initialization from the PCA9685 at the same time.
This class will setup the PCA9685 once the first 'init' function is called.
After that, the setup routine will not called any more.
"""
_address = None
_i2c = None
_MODE1 = 0x00
_MODE2 = 0x01
_PRESCALE = 0xFE
_LED0_ON_L = 0x06
_ALL_LED_ON_L = 0xFA
def __init__(self, address):
""" Initialize the module and the PCS9685 driver if this is not initialized at the moment.
"""
global _INITIALIZED_DRIVER
self._address = address
self._i2c = onionI2C.OnionI2C()
# Initialize the PCA9685 for the first time.
if self._address not in _INITIALIZED_DRIVER:
self._setup()
_INITIALIZED_DRIVER.add(self._address)
def _setup(self):
""" Set the PCA into a working state.
"""
self.set_all_channel(0)
# Set default Mode2 register
self._i2c.writeByte(self._address, self._MODE2, 0x04)
# Set sleep on.
self._i2c.writeByte(self._address, self._MODE1, 0x11)
# Set a frequency of 50 Hz
# pre-scale = round(25Mhz/(4096*50Hz))-1 = 121
self._i2c.writeByte(self._address, self._PRESCALE, 121)
# Release sleep.
self._i2c.writeByte(self._address, self._MODE1, 0x01)
time.sleep(0.005)
# Reset device.
self._i2c.writeByte(self._address, self._MODE1, 0x81)
def set_channel(self, channel, value):
""" Write the specific channel
"""
if channel not in range(16):
raise Exception('The channel must be in range from 0 to 15!')
if value not in range(4096):
raise Exception('The value must be in range from 0 to 4095!')
register_address = self._LED0_ON_L + channel * 4
# Set the on always to 0 and the off value will set the duration of the on state.
self._i2c.writeByte(self._address, register_address + 0, 0)
self._i2c.writeByte(self._address, register_address + 1, 0)
self._i2c.writeByte(self._address, register_address + 2, value & 0xFF)
self._i2c.writeByte(self._address, register_address + 3, value >> 8)
def set_all_channel(self, value):
""" Write to all channels with the write ALL_LED register.
"""
if value not in range(4096):
raise Exception('The value must be in range from 0 to 4095!')
# Set the on always to 0 and the off value will set the duration of the on state.
self._i2c.writeByte(self._address, self._ALL_LED_ON_L + 0, 0)
self._i2c.writeByte(self._address, self._ALL_LED_ON_L + 1, 0)
self._i2c.writeByte(self._address, self._ALL_LED_ON_L + 2, value & 0xFF)
self._i2c.writeByte(self._address, self._ALL_LED_ON_L + 3, value >> 8)
if __name__ == '__main__':
pwm = PCA9685(0x4A)
for cha in range(16):
print('Channel %d\n' % cha)
pwm.set_channel(cha, 255)
time.sleep(1)
pwm.set_all_channel(0)
``` |
{
"source": "JonasTriki/masters-thesis-ml",
"score": 3
} |
#### File: masters-thesis-ml/code/approx_nn.py
```python
from os import makedirs
from typing import Optional, Tuple, Union
import annoy
import numpy as np
import scann
from tqdm import tqdm
from typing_extensions import Literal
rng_seed = 399
np.random.seed(rng_seed)
class ApproxNN:
"""
Approximate nearest neighbour class; using either ScaNN method [1] or Annoy index [2].
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Accelerating Large-Scale Inference with Anisotropic Vector Quantization.
In International Conference on Machine Learning.
.. [2] <NAME>. (2018). Annoy: Approximate Nearest Neighbors in C++/Python.
Url: https://github.com/spotify/annoy.
"""
def __init__(self, ann_alg: Literal["scann", "annoy"] = "scann") -> None:
"""
Initializes the approximate nearest neighbour class.
Parameters
----------
ann_alg : str, "scann" or "annoy"
Approximate nearest neighbour algorithm/method (defaults to "scann").
"""
self._ann_alg = ann_alg
self._ann_index: Optional[
Union[scann.scann_ops_pybind.ScannSearcher, annoy.AnnoyIndex]
] = None
def build(
self,
data: np.ndarray,
distance_measure: Optional[str] = None,
scann_num_leaves_scaling: float = 2.5,
scann_default_num_neighbours: int = 100,
annoy_n_trees: int = 250,
verbose: int = 1,
) -> None:
"""
Builds the approximate nearest neighbour (ANN) index.
Parameters
----------
data : np.ndarray
Data to build the ANN index on.
distance_measure : str, optional
Name of the distance measure (or metric). If ann_alg is set to "scann", then
choose from ["dot_product", "squared_l2"]. Otherwise, choose one of the metrics
from https://github.com/spotify/annoy. Defaults to "dot_product" if ann_alg is
set to "scann" and "euclidean" otherwise.
scann_num_leaves_scaling : float, optional
Scaling to use when computing the number of leaves for building ScaNN (defaults
to 2.5). Only has an effect if ann_alg is set to "scann".
scann_default_num_neighbours : int, optional
Default number of neighbours to use for building ScaNN (defaults to 1). Only has
an effect if ann_alg is set to "scann".
annoy_n_trees : int, optional
Number of trees to use for building Annoy index (defaults to 250). Only has an
effect if ann_alg is set to "annoy".
verbose : int, optional
Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose). Defaults to 1 (verbose).
"""
n, d = data.shape
if verbose == 1:
print(f"Building ANN index using {self._ann_alg}...")
if self._ann_alg == "scann":
if distance_measure is None:
distance_measure = "dot_product"
# Compute number of leaves to use when building ScaNN
scann_num_leaves_order_of_magnitude = int(np.log10(np.sqrt(n)))
scann_num_leaves_num = 10 ** scann_num_leaves_order_of_magnitude
scann_num_leaves_scaled = int(
scann_num_leaves_scaling * scann_num_leaves_num
)
# Create and build index
self._ann_index = (
scann.scann_ops_pybind.builder(
db=data,
num_neighbors=scann_default_num_neighbours,
distance_measure=distance_measure,
)
.tree(
num_leaves=scann_num_leaves_scaled,
num_leaves_to_search=int(scann_num_leaves_scaled / 10),
training_sample_size=250000, # TODO: How to select this number?
)
.score_ah(
dimensions_per_block=2, anisotropic_quantization_threshold=0.2
)
.reorder(
reordering_num_neighbors=250
) # TODO: How to select this number?
.build()
)
elif self._ann_alg == "annoy":
if distance_measure is None:
distance_measure = "euclidean"
# Add data to index and build it
self._ann_index = annoy.AnnoyIndex(f=d, metric=distance_measure)
self._ann_index.set_seed(rng_seed)
if verbose == 1:
print("Adding items to index...")
for i in tqdm(range(n)):
self._ann_index.add_item(i, data[i])
if verbose == 1:
print("Building index...")
self._ann_index.build(n_trees=annoy_n_trees, n_jobs=-1)
if verbose == 1:
print("Done!")
def save(self, output_path: str) -> None:
"""
Saves the approximate nearest neighbour instance to disk.
Parameters
----------
output_path : str
Output path (directory if ann_alg is "scann", filepath otherwise).
"""
if self._ann_alg == "scann":
makedirs(output_path, exist_ok=True)
self._ann_index.serialize(output_path)
elif self._ann_alg == "annoy":
self._ann_index.save(output_path)
def load(
self,
ann_path: str,
annoy_data_dimensionality: Optional[int] = None,
annoy_mertic: Optional[str] = None,
annoy_prefault: bool = False,
) -> None:
"""
Loads an approximate nearest neighbour (ANN) instance from disk.
Parameters
----------
ann_path : str
Path of saved ANN instance (directory if ann_alg is "scann", filepath otherwise).
annoy_data_dimensionality : int, optional
Dimensionality of data (required if ann_alg is set to "annoy").
annoy_mertic : str, optional
Distance metric (required if ann_alg is set to "annoy").
annoy_prefault : bool, optional
Whether or not to enable the `prefault` option when loading Annoy index
(defaults to False).
"""
if self._ann_alg == "scann":
self._ann_index = scann.scann_ops_pybind.load_searcher(ann_path)
elif self._ann_alg == "annoy":
self._ann_index = annoy.AnnoyIndex(
f=annoy_data_dimensionality, metric=annoy_mertic
)
self._ann_index.load(fn=ann_path, prefault=annoy_prefault)
def search(
self,
query_vector: np.ndarray,
k_neighbours: int,
excluded_neighbour_indices: list = [],
scann_pre_reorder_num_neighbors: Optional[int] = None,
scann_leaves_to_search: Optional[int] = None,
return_distances: bool = False,
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Searches for the nearest neighbour of given query vector using approximate nearest
neighbour instance.
Parameters
----------
query_vector : np.ndarray
Vector to query.
k_neighbours : int
Number of neighbours to find.
excluded_neighbour_indices : list, optional
List of neighbour indices to exclude (defaults to []).
scann_pre_reorder_num_neighbors : int, optional
`pre_reorder_num_neighbors` argument sent to ScaNNs search method (defaults to None).
scann_leaves_to_search : int, optional
`scann_leaves_to_search` argument sent to ScaNNs search method (defaults to None).
return_distances : bool, optional
Whether or not to return distances, in addition to neighbour indices (defaults to False).
Returns
-------
neighbours : np.ndarray
Nearest neighbouring indices.
distances : np.ndarray, optional
Distances to nearest neighbouring data points.
(Only returned if return_distances is set to True).
"""
num_excluded_indices = len(excluded_neighbour_indices)
k_neighbours_search = k_neighbours + num_excluded_indices
if self._ann_alg == "scann":
neighbours, distances = self._ann_index.search(
q=query_vector,
final_num_neighbors=k_neighbours_search,
pre_reorder_num_neighbors=scann_pre_reorder_num_neighbors,
leaves_to_search=scann_leaves_to_search,
)
elif self._ann_alg == "annoy":
annoy_result = self._ann_index.get_nns_by_vector(
vector=query_vector,
n=k_neighbours_search,
include_distances=return_distances,
)
if return_distances:
neighbours, distances = annoy_result
distances = np.array(distances)
else:
neighbours = annoy_result
neighbours = np.array(neighbours)
if num_excluded_indices > 0:
accepted_indices_filter = np.array(
[idx not in excluded_neighbour_indices for idx in neighbours]
)
neighbours = neighbours[accepted_indices_filter][:k_neighbours]
if return_distances:
distances = distances[accepted_indices_filter][:k_neighbours]
if return_distances:
return neighbours, distances
else:
return neighbours
def get_distance(self, i: int, j: int) -> float:
"""
Gets distance between items i and j.
Parameters
----------
i : int
Index of first item.
j : int
Index of second item.
Returns
-------
i_j_dist : float
Distance between items i and j.
"""
if self._ann_alg == "annoy":
return self._ann_index.get_distance(i, j)
else:
raise ValueError(
"get_distance() method is only available if ANN algorithm is set to 'annoy'."
)
```
#### File: masters-thesis-ml/code/text_preprocessing_utils.py
```python
import re
from string import digits
from typing import Dict, List, Match
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
from num2words import num2words
from contractions_utils import contractions_dict
# Download NLTK files
nltk.download("punkt")
# Convert keys to lowercase
contractions_dict_lower: Dict[str, str] = {
key.lower(): val for key, val in contractions_dict.items()
}
def remove_urls(text: str) -> str:
"""
Remove URLs from a text.
Parameters
----------
text : str
Text to remove URLs from.
Returns
-------
new_text : str
New text without URLs.
"""
url_regex = r"(?:(?:http|ftp)s?:\/\/|www\.)[\n\S]+"
return re.sub(url_regex, "", text)
def remove_stopwords(words: list, language: str) -> list:
"""
Removes stop words from list of tokenized words.
Parameters
----------
words : list
List of tokenized words.
language : str
Language of words
Returns
-------
new_words : list
List of words without stop words.
"""
new_words = []
for word in words:
if word not in stopwords.words(language):
new_words.append(word)
return new_words
def replace_contractions(text: str) -> str:
"""
Replace contractions in string of text.
Parameters
----------
text : str
Text to replace contractions from.
Example replacements:
- isn't --> is not
- don't --> do not
- I'll --> I will
Returns
-------
new_text : str
New text without contractions.
"""
def replace_contraction_matches(contraction_match: Match) -> str:
"""
Replaces contraction matches (used as argument to re.sub).
Parameters
----------
contraction_match : re.Match
Contraction regex match.
Returns
-------
match_result : str
Fixed string (mapping from contraction match).
"""
match = contraction_match.group(0).lower()
return contractions_dict_lower.get(match)
# Create regex for matching contraction keys
contractions_keys_re = "|".join(contractions_dict.keys())
contractions_re = re.compile(
f"({contractions_keys_re})",
flags=re.IGNORECASE | re.DOTALL,
)
# Replace all contraction occurrences.
new_text = contractions_re.sub(replace_contraction_matches, text)
return new_text
def to_lowercase(words: list) -> list:
"""
Convert all characters to lowercase from list of tokenized words.
Parameters
----------
words : list
List of tokenized words.
Returns
-------
new_words : list
List of words in lowercase.
"""
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
def remove_punctuation(words: list) -> list:
"""
Remove punctuation from list of tokenized words.
Parameters
----------
words : list
List of tokenized words.
Returns
-------
new_words : list
List of new words without punctuations.
"""
new_words = []
for word in words:
new_word = re.sub(r"[^\w\s]|_", " ", word)
# Splitting new word on punctuation
# and adding them separately
# e.g. out-of-the-box --> out, of, the, box
for new_word in new_word.split():
new_words.append(new_word)
return new_words
def remove_digits(words: list) -> list:
"""
Removes digits from list of tokenized words.
Parameters
----------
words : list
List of tokenized words.
Returns
-------
new_words : list
List of words without digits.
"""
new_words = []
remove_digits_trans = str.maketrans("", "", digits)
for word in words:
new_word = word.translate(remove_digits_trans)
new_words.append(new_word)
return new_words
def replace_numbers(words: list, lang: str, ordinal: bool = False) -> list:
"""
Replaces (ordinal) numbers with its textual representation.
Parameters
----------
words : list
List of words.
lang: str
Language of words (stripped)
ordinal : bool, optional
Whether or not to use ordinal textual representation.
Returns
-------
new_words : list
List of new words with textual representation of numbers.
"""
new_words = []
for word in words:
if ordinal:
re_results = re.findall(r"(\d+)(?:st|nd|rd|th)", word)
else:
re_results = re.findall(r"\d+", word)
if len(re_results) > 0:
number = int(re_results[0])
number_words = num2words(number, lang=lang, ordinal=ordinal)
# Remove commas
number_words = number_words.replace(",", "")
# Splitting number word on space
# and adding them separately
# e.g. one hundred and sixteenth
# --> one, hundred, and, sixteenth
for new_word in number_words.split():
new_words.append(new_word)
else:
new_words.append(word)
return new_words
def replace_all_numbers(words: list, language: str) -> list:
"""
Replaces normal and ordinal numbers with its textual representation.
Parameters
----------
words : list
List of words.
language : str
Language of words
Returns
-------
new_words : list
List of new words with textual representation of numbers.
"""
# Add exception for Danish
if language == "danish":
lang = "dk"
elif language == "swedish":
lang = "sv"
else:
lang = language[:2] # Extract first two characters (e.g. english --> en)
words = replace_numbers(words, lang, ordinal=True)
words = replace_numbers(words, lang)
return words
def text_to_words(
text: str, should_replace_contractions: bool = True, language: str = "english"
) -> List[str]:
"""
Converts text into a list of words. Removes URLs and replaces contractions
from the original text, before tokenizing into words.
Parameters
----------
text : str
Text to process.
should_replace_contractions: bool
Whether or not to replace contractions (defaults to True).
language : str
Language (defaults to "english").
Returns
-------
words : list
List of words from the original text.
"""
# text = remove_urls(text)
if should_replace_contractions and language == "english":
text = replace_contractions(text)
# Tokenize text (convert into words)
words = word_tokenize(text, language)
return words
def preprocess_words(
words: List[str],
language: str = "english",
should_remove_digits: bool = False,
should_replace_numbers: bool = True,
should_remove_stopwords: bool = False,
) -> list:
"""
Preprocesses list of words using a series of techniques:
- Converts to lower-case
- Removes punctuation
- Replaces numbers with textual representation
Parameters
----------
words : list of str
List of words to preprocess.
language : str
Language (defaults to "english")
should_remove_digits : bool
Whether or not to remove digits from text (defaults to False).
should_replace_numbers : bool
Whether or not to replace numbers with textual representation
(defaults to True). Has no effect if should_remove_digits is set to True.
should_remove_stopwords : bool
Whether or not to remove stop words (defaults to False).
Returns
-------
words : list of str
Preprocessed list of words.
"""
# Apply a series of techniques to the words
words = to_lowercase(words)
words = remove_punctuation(words)
if should_remove_digits:
words = remove_digits(words)
elif should_replace_numbers:
words = replace_all_numbers(words, language)
if should_remove_stopwords:
words = remove_stopwords(words, language)
return words
def preprocess_text(
text: str,
language: str = "english",
should_replace_contractions: bool = True,
should_remove_digits: bool = False,
should_replace_numbers: bool = True,
should_remove_stopwords: bool = False,
) -> List[str]:
"""
Preprocesses text using a series of techniques:
- Removes URLs
- Replaces contractions
- Tokenizes text
- Removes non-ASCII
- Converts to lower-case
- Removes punctuation
- Replaces numbers with textual representation
- Removes stop words
Parameters
----------
text : str
Text to preprocess.
language : str
Language (defaults to "english")
should_replace_contractions : bool
Whether or not to replace contractions (defaults to True).
should_remove_digits : bool
Whether or not to remove digits from text (defaults to False).
should_replace_numbers : bool
Whether or not to replace numbers with textual representation
(defaults to True). Has no effect if should_remove_digits is set to True.
should_remove_stopwords : bool
Whether or not to remove stop words (defaults to False).
Returns
-------
words : list of str
Preprocessed text split into a list of words.
"""
# Convert to list of words
words = text_to_words(
text=text,
language=language,
should_replace_contractions=should_replace_contractions,
)
# Process words
words = preprocess_words(
words=words,
language=language,
should_remove_digits=should_remove_digits,
should_replace_numbers=should_replace_numbers,
should_remove_stopwords=should_remove_stopwords,
)
return words
```
#### File: code/topological_data_analysis/topological_polysemy_pipeline.py
```python
import argparse
import sys
from os import makedirs
from os.path import isfile, join
from typing import Optional
import joblib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from nltk.corpus import wordnet as wn
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import euclidean_distances
from tqdm import tqdm
sys.path.append("..")
from approx_nn import ApproxNN # noqa: E402
from topological_data_analysis.topological_polysemy import ( # noqa: E402
tps_multiple,
tps_point_cloud,
)
from word_embeddings.word2vec import load_model_training_output # noqa: E402
def parse_args() -> argparse.Namespace:
"""
Parses arguments sent to the python script.
Returns
-------
parsed_args : argparse.Namespace
Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--semeval_word_senses_filepath",
type=str,
default="",
help="Filepath of the SemEval-2010 task 14 word senses",
)
parser.add_argument(
"--word2vec_semeval_model_dir",
type=str,
default="",
help="Directory of the SemEval-2010 task 14 word2vec model",
)
parser.add_argument(
"--word2vec_enwiki_model_dir",
type=str,
default="",
help="Directory of the enwiki word2vec model",
)
parser.add_argument(
"--word2vec_google_news_model_dir",
type=str,
default="",
help="Directory of the Google News 3M word2vec model",
)
parser.add_argument(
"--glove_model_dir",
type=str,
default="",
help="Directory of the GloVe model",
)
parser.add_argument(
"--fasttext_model_dir",
type=str,
default="",
help="Directory of the fastText model",
)
parser.add_argument(
"--fasttext_tps_model_dir",
type=str,
default="",
help="Directory of the TPS fastText model",
)
parser.add_argument(
"--tps_neighbourhood_sizes",
nargs="+",
help="Neighbourhood sizes to use when computing TPS (e.g. 50, 60)",
)
parser.add_argument(
"--num_top_k_words_frequencies",
type=int,
help="Number of top words to use when computing TPS scores vs. word frequencies",
)
parser.add_argument(
"--cyclo_octane_data_filepath",
type=str,
default="",
help="Filepath of the cyclo-octane dataset",
)
parser.add_argument(
"--henneberg_data_filepath",
type=str,
default="",
help="Filepath of the Henneberg dataset",
)
parser.add_argument(
"--custom_point_cloud_neighbourhood_size",
type=int,
help="Neighbourhood size to use when computing TPS for custom point clouds",
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Output directory to save results",
)
return parser.parse_args()
def tps_word_embeddings_correlation_plot(
tps_scores: np.ndarray,
y_values: np.ndarray,
y_label: str,
tps_vs_y_correlation: float,
output_plot_filepath: str,
neighbourhood_size: int,
) -> None:
"""
Saves a correlation plot between TPS scores and some y values.
Parameters
----------
tps_scores : np.ndarray
TPS scores.
y_values : np.ndarray
Y-values to plot against TPS scores.
y_label : str
Y-axis label.
tps_vs_y_correlation : float
Correlation between TPS scores and y values.
output_plot_filepath : str
Output plot filepath.
neighbourhood_size : int
Neighbourhood size used to compute TPS scores (appended to output filepath).
"""
# Plot TPS scores to GS
fig, ax = plt.subplots(figsize=(10, 5))
scatter_h = ax.scatter(x=tps_scores, y=y_values)
if len(tps_scores) > 1000:
scatter_h.set_rasterized(True)
ax.set_xlabel(f"TPS_{neighbourhood_size}")
ax.set_ylabel(y_label)
ax.set_title(f"Correlation: {tps_vs_y_correlation:.5f}")
plt.tight_layout()
plt.savefig(
output_plot_filepath,
backend="pgf",
)
plt.close(fig)
def tps_word_embeddings(
word_embeddings_name: str,
neighbourhood_sizes: list,
semeval_target_words: np.ndarray,
semeval_target_words_gs_clusters: np.ndarray,
word_embeddings_normalized: np.ndarray,
word_to_int: dict,
word_vocabulary: list,
num_top_k_words_frequencies: int,
output_dir: str,
word_counts: Optional[list] = None,
ann_instance: ApproxNN = None,
) -> None:
"""
Computes TPS for word embeddings and saves correlation plots.
Parameters
----------
word_embeddings_name : str
Name of the word embeddings.
neighbourhood_sizes : list
Neighbourhood sizes to compute TPS scores of.
semeval_target_words : np.ndarray
SemEval-2010 task 14 target words.
semeval_target_words_gs_clusters : np.ndarray
SemEval-2010 task 14 GS clusters.
word_embeddings_normalized : np.ndarray
Normalized word embeddings.
word_to_int : dict
Dictionary for mapping a word to its integer representation.
word_vocabulary : list
List of words/word ints to use for the vocabulary.
num_top_k_words_frequencies : list
Number of top words to use when computing TPS scores vs. word frequencies.
output_dir : str
Output directory.
word_counts : list
List containing word counts
ann_instance : ApproxNN
ApproxNN instance to use for computing TPS scores.
"""
# Ensure output directory exists
output_dir_plots = join(output_dir, word_embeddings_name)
makedirs(output_dir_plots, exist_ok=True)
# Only use the SemEval-2010 task 14 words in vocabulary
semeval_target_words_in_vocab_filter = [
i for i, word in enumerate(semeval_target_words) if word in word_to_int
]
semeval_target_words_in_vocab = semeval_target_words[
semeval_target_words_in_vocab_filter
]
semeval_target_words_gs_clusters_in_vocab = semeval_target_words_gs_clusters[
semeval_target_words_in_vocab_filter
]
tps_vs_gs_key = "TPS_n vs. GS"
tps_vs_synsets_key = "TPS_n vs. synsets"
tps_vs_frequency_key = "TPS_n vs. frequency"
result_dict: dict = {
"n": neighbourhood_sizes,
tps_vs_gs_key: [],
tps_vs_synsets_key: [],
}
has_word_counts = word_counts is not None
if has_word_counts:
result_dict[tps_vs_frequency_key] = []
for neighbourhood_size in neighbourhood_sizes:
print(f"-- Neighbourhood size: {neighbourhood_size} --")
# -- Compute TPS scores and correlation vs GS words --
output_plot_filepath = join(
output_dir_plots,
f"tps_{neighbourhood_size}_vs_gs.pdf",
)
output_tps_filepath = join(
output_dir_plots,
f"tps_{neighbourhood_size}_vs_gs.npy",
)
if not isfile(output_plot_filepath):
print("Computing TPS scores for GS words")
tps_scores_semeval = tps_multiple(
target_words=semeval_target_words_in_vocab,
word_to_int=word_to_int,
neighbourhood_size=neighbourhood_size,
word_embeddings_normalized=word_embeddings_normalized,
ann_instance=ann_instance,
n_jobs=-1,
progressbar_enabled=True,
)
# Compute correlation vs GS word meanings
tps_score_vs_gs_correlation, _ = pearsonr(
x=tps_scores_semeval, y=semeval_target_words_gs_clusters_in_vocab
)
result_dict[tps_vs_gs_key].append(tps_score_vs_gs_correlation)
# Save plot of TPS scores vs. GS
tps_word_embeddings_correlation_plot(
tps_scores=tps_scores_semeval,
y_values=semeval_target_words_gs_clusters_in_vocab,
y_label="Clusters in GS",
tps_vs_y_correlation=tps_score_vs_gs_correlation,
output_plot_filepath=output_plot_filepath,
neighbourhood_size=neighbourhood_size,
)
# Save TPS scores to file
np.save(output_tps_filepath, tps_scores_semeval)
# -- Compute TPS scores and correlation vs Wordnet synsets words --
output_plot_filepath = join(
output_dir_plots,
f"tps_{neighbourhood_size}_vs_synsets.pdf",
)
output_tps_filepath = join(
output_dir_plots,
f"tps_{neighbourhood_size}_vs_synsets.npy",
)
if not isfile(output_plot_filepath):
# Find words in vocabulary that have synsets in Wordnet
tps_scores_wordnet_synsets = []
wordnet_synsets_words_in_vocab = []
wordnet_synsets_words_in_vocab_meanings = []
print("Computing TPS scores for words in vocabulary with Wordnet synsets")
for word in tqdm(word_vocabulary):
num_synsets_word = len(wn.synsets(word))
if num_synsets_word > 0:
wordnet_synsets_words_in_vocab.append(word)
wordnet_synsets_words_in_vocab_meanings.append(num_synsets_word)
wordnet_synsets_words_in_vocab = np.array(wordnet_synsets_words_in_vocab)
tps_scores_wordnet_synsets = tps_multiple(
target_words=wordnet_synsets_words_in_vocab,
word_to_int=word_to_int,
neighbourhood_size=neighbourhood_size,
word_embeddings_normalized=word_embeddings_normalized,
ann_instance=ann_instance,
n_jobs=-1,
progressbar_enabled=True,
)
# Compute correlation vs Wordnet synsets
tps_score_vs_wordnet_synsets_correlation, _ = pearsonr(
x=tps_scores_wordnet_synsets, y=wordnet_synsets_words_in_vocab_meanings
)
result_dict[tps_vs_synsets_key].append(
tps_score_vs_wordnet_synsets_correlation
)
# Save plot of TPS scores vs. Wordnet synsets
tps_word_embeddings_correlation_plot(
tps_scores=tps_scores_wordnet_synsets,
y_values=wordnet_synsets_words_in_vocab_meanings,
y_label="Synsets in WordNet",
tps_vs_y_correlation=tps_score_vs_wordnet_synsets_correlation,
output_plot_filepath=output_plot_filepath,
neighbourhood_size=neighbourhood_size,
)
# Save TPS scores to file
np.save(output_tps_filepath, tps_scores_wordnet_synsets)
# -- Compute TPS scores and correlation vs Wordnet synsets words --
output_plot_filepath = join(
output_dir_plots,
f"tps_{neighbourhood_size}_vs_frequency.pdf",
)
output_tps_filepath = join(
output_dir_plots,
f"tps_{neighbourhood_size}_vs_frequency.npy",
)
if has_word_counts and not isfile(output_plot_filepath):
print(
f"Computing TPS scores for top {num_top_k_words_frequencies} words vs. word frequencies"
)
tps_score_word_frequencies = tps_multiple(
target_words=word_vocabulary[:num_top_k_words_frequencies],
word_to_int=word_to_int,
neighbourhood_size=neighbourhood_size,
word_embeddings_normalized=word_embeddings_normalized,
ann_instance=ann_instance,
n_jobs=-1,
progressbar_enabled=True,
)
# Compute correlation vs Wordnet synsets
tps_score_vs_word_frequency_correlation, _ = pearsonr(
x=tps_score_word_frequencies,
y=word_counts[:num_top_k_words_frequencies],
)
result_dict[tps_vs_frequency_key].append(
tps_score_vs_word_frequency_correlation
)
# Save plot of TPS scores vs. word frequencies
tps_word_embeddings_correlation_plot(
tps_scores=tps_score_word_frequencies,
y_values=word_counts[:num_top_k_words_frequencies],
y_label="Word frequency",
tps_vs_y_correlation=tps_score_vs_word_frequency_correlation,
output_plot_filepath=output_plot_filepath,
neighbourhood_size=neighbourhood_size,
)
# Save TPS scores to file
np.save(output_tps_filepath, tps_score_word_frequencies)
def topological_polysemy_pipeline(
semeval_word_senses_filepath: str,
word2vec_semeval_model_dir: str,
word2vec_enwiki_model_dir: str,
word2vec_google_news_model_dir: str,
glove_model_dir: str,
fasttext_model_dir: str,
fasttext_tps_model_dir: str,
tps_neighbourhood_sizes: str,
num_top_k_words_frequencies: int,
cyclo_octane_data_filepath: str,
henneberg_data_filepath: str,
custom_point_cloud_neighbourhood_size: int,
output_dir: str,
) -> None:
"""
Computes the topological polysemy of various word embeddings and data sets.
Saves results in output dir with some additional plots.
Parameters
----------
semeval_word_senses_filepath : str
Filepath of the SemEval-2010 task 14 word senses
word2vec_semeval_model_dir : str
Directory of the SemEval-2010 task 14 word2vec model.
word2vec_enwiki_model_dir : str
Directory of the enwiki word2vec model.
word2vec_google_news_model_dir : str
Directory of the Google News 3M word2vec model
glove_model_dir : str
Directory of the GloVe model.
fasttext_model_dir : str
Directory of the fastText model.
fasttext_tps_model_dir : str
Directory of the TPS fastText model.
tps_neighbourhood_sizes : str
Neighbourhood sizes to use when computing TPS (e.g. 50, 60).
num_top_k_words_frequencies : int
Number of top words to use when computing TPS scores vs. word frequencies.
cyclo_octane_data_filepath : str
Filepath of the cyclo-octane dataset.
henneberg_data_filepath : str
Filepath of the Henneberg dataset.
custom_point_cloud_neighbourhood_size : int
Neighbourhood size to use when computing TPS for custom point clouds.
output_dir : str
Output directory to save results.
"""
# Ensure output directory exists
makedirs(output_dir, exist_ok=True)
# Load SemEval-2010 task 14 word senses
semeval_word_senses: dict = joblib.load(semeval_word_senses_filepath)
semeval_target_words = np.array(list(semeval_word_senses["all"].keys()))
semeval_target_word_gs_clusters = np.array(
list(semeval_word_senses["all"].values())
)
# Parse strings into int
tps_neighbourhood_sizes = [int(n_size) for n_size in tps_neighbourhood_sizes]
# -- Compute TPS for word embeddings (SemEval and enwiki) --
for dataset_name, model_dir in zip(
["semeval_2010_task_14", "enwiki"],
[word2vec_semeval_model_dir, word2vec_enwiki_model_dir],
):
# Load word embeddings
print(f"Loading {dataset_name} word embeddings...")
w2v_training_output = load_model_training_output(
model_training_output_dir=model_dir,
model_name="word2vec",
dataset_name=dataset_name,
return_normalized_embeddings=True,
return_scann_instance=True,
)
last_embedding_weights_normalized = w2v_training_output[
"last_embedding_weights_normalized"
]
last_embedding_weights_scann_instance = w2v_training_output[
"last_embedding_weights_scann_instance"
]
words = w2v_training_output["words"]
word_to_int = w2v_training_output["word_to_int"]
word_counts = w2v_training_output["word_counts"]
print("Done!")
print("Computing TPS for word embeddings...")
tps_word_embeddings(
word_embeddings_name=dataset_name,
neighbourhood_sizes=tps_neighbourhood_sizes,
semeval_target_words=semeval_target_words,
semeval_target_words_gs_clusters=semeval_target_word_gs_clusters,
word_embeddings_normalized=last_embedding_weights_normalized,
word_to_int=word_to_int,
word_vocabulary=words,
num_top_k_words_frequencies=num_top_k_words_frequencies,
output_dir=output_dir,
word_counts=word_counts,
ann_instance=last_embedding_weights_scann_instance,
)
del last_embedding_weights_scann_instance
print("Done!")
# -- Compute TPS for external word embeddings --
# Prepare constants
external_word_embeddings = [
(
"google_news_3m",
"GoogleNews-vectors-negative300",
word2vec_google_news_model_dir,
),
(
"glove_cc_840b_300d",
"glove.840B.300d",
glove_model_dir,
),
(
"fasttext_cc_300d",
"cc.en.300.vec",
fasttext_model_dir,
),
(
"fasttext_tps_300d",
"fastText.TPS.300d",
fasttext_tps_model_dir,
),
]
# Compute TPS for each external word embeddings
for word_embeddings_name, model_name, model_dir in external_word_embeddings:
# Prepare filepaths
model_normalized_weights_filepath = join(
model_dir, f"{model_name}_normalized.npy"
)
model_words_filepath = join(model_dir, f"{model_name}_words.txt")
model_scann_artifacts_dir = join(model_dir, f"{model_name}_scann_artifacts")
# Load data
print(f"Loading {model_name} data...")
model_weights_normalized = np.load(
model_normalized_weights_filepath, mmap_mode="r"
)
with open(model_words_filepath, "r") as words_file:
model_words = np.array(words_file.read().split("\n"))
model_approx_nn = ApproxNN(ann_alg="scann")
model_approx_nn.load(ann_path=model_scann_artifacts_dir)
print("Done!")
print(f"Computing TPS for {model_name} word embeddings...")
tps_word_embeddings(
word_embeddings_name=word_embeddings_name,
neighbourhood_sizes=tps_neighbourhood_sizes,
semeval_target_words=semeval_target_words,
semeval_target_words_gs_clusters=semeval_target_word_gs_clusters,
word_embeddings_normalized=model_weights_normalized,
word_to_int={word: i for i, word in enumerate(model_words)},
word_vocabulary=model_words,
num_top_k_words_frequencies=num_top_k_words_frequencies,
output_dir=output_dir,
ann_instance=model_approx_nn,
)
del model_approx_nn
print("Done!")
# -- Compute TPS for custom point clouds --
for point_cloud_name, point_cloud_filepath in zip(
["cyclo_octane", "henneberg"],
[cyclo_octane_data_filepath, henneberg_data_filepath],
):
# Load and prepare data for TPS
point_cloud = pd.read_csv(point_cloud_filepath, header=None).values
point_cloud_normalized = point_cloud / np.linalg.norm(
point_cloud, axis=1
).reshape(-1, 1)
point_cloud_pairwise_dists = euclidean_distances(point_cloud)
# Compute TPS scores
num_points = len(point_cloud)
tps_scores = np.zeros(num_points)
print(f"Computing TPS scores for {point_cloud_name}...")
for point_index in tqdm(range(num_points)):
tps_score = tps_point_cloud(
point_index=point_index,
neighbourhood_size=custom_point_cloud_neighbourhood_size,
point_cloud_normalized=point_cloud_normalized,
point_cloud_pairwise_dists=point_cloud_pairwise_dists,
)
tps_scores[point_index] = tps_score
# Save result
point_cloud_output_dir = join(output_dir, point_cloud_name)
makedirs(point_cloud_output_dir, exist_ok=True)
np.save(
join(
point_cloud_output_dir,
f"tps_scores_{custom_point_cloud_neighbourhood_size}.npy",
),
tps_scores,
)
if __name__ == "__main__":
args = parse_args()
topological_polysemy_pipeline(
semeval_word_senses_filepath=args.semeval_word_senses_filepath,
word2vec_semeval_model_dir=args.word2vec_semeval_model_dir,
word2vec_enwiki_model_dir=args.word2vec_enwiki_model_dir,
word2vec_google_news_model_dir=args.word2vec_google_news_model_dir,
glove_model_dir=args.glove_model_dir,
fasttext_model_dir=args.fasttext_model_dir,
fasttext_tps_model_dir=args.fasttext_tps_model_dir,
tps_neighbourhood_sizes=args.tps_neighbourhood_sizes,
num_top_k_words_frequencies=args.num_top_k_words_frequencies,
cyclo_octane_data_filepath=args.cyclo_octane_data_filepath,
henneberg_data_filepath=args.henneberg_data_filepath,
custom_point_cloud_neighbourhood_size=args.custom_point_cloud_neighbourhood_size,
output_dir=args.output_dir,
)
```
#### File: code/topological_data_analysis/tps_spheres_experiment_data.py
```python
import argparse
import sys
from os import makedirs
from os.path import isfile, join
import numpy as np
from tqdm import tqdm
rng_seed = 399
np.random.seed(rng_seed)
sys.path.append("..")
from topological_data_analysis.tda_utils import generate_points_in_spheres # noqa: E402
from topological_data_analysis.topological_polysemy import ( # noqa: E402
tps_multiple_point_cloud,
)
def parse_args() -> argparse.Namespace:
"""
Parses arguments sent to the python script.
Returns
-------
parsed_args : argparse.Namespace
Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--tps_neighbourhood_size",
type=int,
default="",
help="TPS neighbourhood size",
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Output directory where processed files will be saved to",
)
return parser.parse_args()
def prepare_spheres_data(noisy_spheres: bool, output_dir: str) -> list:
"""
Prepares spheres data.
Parameters
----------
noisy_spheres : bool
Whether or not to create noisy sphere data
output_dir : str
Output directory where processed files will be saved to.
Returns
-------
sphere_data_filepaths : list
List of sphere data filepaths.
"""
# Generate sphere data
sphere_point_shift = 2
space_dimensionality = 300
sphere_dimensionalities = [2, 3, 4, 5, 10, 20, 50, 300]
point_in_each_sphere_gen = 1000000
sphere_sample_num_intervals = 20
sphere_sample_size = 1000
sphere_points_data_filepaths = []
sphere_noisy_str = "_noisy" if noisy_spheres else ""
for sphere_dimensionality in sphere_dimensionalities:
print(f"Sphere dimensionality: {sphere_dimensionality}")
sphere_points_data_filepath = join(
output_dir,
f"sphere_points_data_{sphere_dimensionality}{sphere_noisy_str}.npy",
)
sampled_sphere_points_data_filepath = join(
output_dir,
f"sampled_sphere_points_data_{sphere_dimensionality}{sphere_noisy_str}.npy",
)
sphere_points_data_filepaths.append(
(
sphere_dimensionality,
sphere_points_data_filepath,
sampled_sphere_points_data_filepath,
)
)
if isfile(sphere_points_data_filepath) and isfile(
sampled_sphere_points_data_filepath
):
continue
print("Generating points...")
sphere_points, sphere_point_labels = generate_points_in_spheres(
num_points=point_in_each_sphere_gen,
sphere_dimensionality=sphere_dimensionality,
space_dimensionality=space_dimensionality,
create_intersection_point=True,
noisy_spheres=noisy_spheres,
random_state=rng_seed,
)
sphere_point_shift_arr = np.repeat(sphere_point_shift, space_dimensionality)
sphere_points += sphere_point_shift_arr
shpere_points_intersection = sphere_point_shift_arr
distances_to_intersection_point = np.zeros(sphere_points.shape[0])
print("Computing distances...")
for i, sphere_point in enumerate(tqdm(sphere_points)):
distances_to_intersection_point[i] = np.linalg.norm(
sphere_point - shpere_points_intersection
)
distances_to_intersection_point_sorted_indices = np.argsort(
distances_to_intersection_point
)
# Sample sphere points from intervals, sorted by distance to intersection point
sampled_sphere_point_indices = [
distances_to_intersection_point_sorted_indices[0] # <-- Intersection point
]
interval_width = (sphere_points.shape[0] - 1) // sphere_sample_num_intervals
for i in range(sphere_sample_num_intervals):
min_interval_idx = max(i * interval_width, 1)
max_interval_idx = (i + 1) * interval_width
interval_indices = distances_to_intersection_point_sorted_indices[
np.arange(min_interval_idx, max_interval_idx)
]
sampled_indices = np.random.choice(
interval_indices, size=sphere_sample_size, replace=False
)
sampled_sphere_point_indices.extend(sampled_indices)
sampled_sphere_point_indices = np.array(sampled_sphere_point_indices)
sphere_points_data = np.column_stack(
(
sphere_points,
sphere_point_labels,
distances_to_intersection_point,
)
)
sampled_sphere_points_data = np.column_stack(
(
sphere_points[sampled_sphere_point_indices],
sphere_point_labels[sampled_sphere_point_indices],
distances_to_intersection_point[sampled_sphere_point_indices],
sampled_sphere_point_indices,
)
)
# Save data
print("Saving data...")
np.save(sphere_points_data_filepath, sphere_points_data)
np.save(sampled_sphere_points_data_filepath, sampled_sphere_points_data)
# Free resources
del sphere_points_data
del sphere_points
del sphere_point_labels
del distances_to_intersection_point
del sampled_sphere_point_indices
del sampled_sphere_points_data
return sphere_points_data_filepaths
def compute_tps_scores(
sphere_data_filepaths: list, tps_neighbourhood_size: int, output_dir: str
) -> None:
"""
Computes TPS scores of sphere data.
Parameters
----------
sphere_data_filepaths : list
List of sphere dimensionalities and data filepaths.
tps_neighbourhood_size : int
TPS neighbourhood size.
output_dir : str
Output directory where processed files will be saved to.
"""
for (
sphere_dimensionality,
sphere_points_filepath,
sphere_point_indices_filepath,
) in sphere_data_filepaths:
# Check if TPS scores are computed already
tps_scores_filepath = join(
output_dir,
f"sphere_points_data_{sphere_dimensionality}_tps_{tps_neighbourhood_size}_scores.npy",
)
if isfile(tps_scores_filepath):
continue
print(f"Sphere dimensionality: {sphere_dimensionality}")
print("Loading data...")
sphere_points_data = np.load(sphere_points_filepath)
sphere_points = sphere_points_data[:, :-2]
sphere_points_normalized = sphere_points / np.linalg.norm(
sphere_points, axis=1
).reshape(-1, 1)
sampled_sphere_points_data = np.load(sphere_point_indices_filepath)
sampled_sphere_point_indices = sampled_sphere_points_data[:, -1].astype(int)
print("Done!")
# Compute TPS scores
print("Computing TPS...")
tps_scores_point_in_spheres = tps_multiple_point_cloud(
point_indices=sampled_sphere_point_indices,
neighbourhood_size=tps_neighbourhood_size,
point_cloud_normalized=sphere_points_normalized,
return_persistence_diagram=False,
n_jobs=-1,
progressbar_enabled=True,
)
np.save(tps_scores_filepath, tps_scores_point_in_spheres)
# Free resources
del sphere_points_data
del sampled_sphere_points_data
del sampled_sphere_point_indices
del sphere_points
del sphere_points_normalized
del tps_scores_point_in_spheres
def tps_spheres_experiment_data_preprocessing(
tps_neighbourhood_size: int, output_dir: str
) -> None:
"""
Preprocesses data for the TPS spheres experiment.
Parameters
----------
tps_neighbourhood_size : int
TPS neighbourhood size.
output_dir : str
Output directory where processed files will be saved to.
"""
for is_noisy in [False, True]:
print(f"Noisy: {is_noisy}")
noisy_str = "_noisy" if is_noisy else ""
experiment_output_dir = join(output_dir, f"tps_spheres_experiment{noisy_str}")
makedirs(experiment_output_dir, exist_ok=True)
print("Preparing spheres data...")
sphere_data_filepaths = prepare_spheres_data(noisy_spheres=is_noisy, output_dir=experiment_output_dir)
print("Computing TPS scores...")
compute_tps_scores(
tps_neighbourhood_size=tps_neighbourhood_size,
sphere_data_filepaths=sphere_data_filepaths,
output_dir=experiment_output_dir,
)
if __name__ == "__main__":
args = parse_args()
tps_spheres_experiment_data_preprocessing(
tps_neighbourhood_size=args.tps_neighbourhood_size, output_dir=args.output_dir
)
```
#### File: code/word_embeddings/eval_utils.py
```python
import sys
from typing import Dict, List, Optional, Tuple, Union
import joblib
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from fastdist import fastdist
from sklearn.base import ClusterMixin, TransformerMixin
from sklearn.manifold import TSNE
from tqdm.auto import tqdm
from umap import UMAP
sys.path.append("..")
from approx_nn import ApproxNN # noqa: E402
def get_word_vec(
target_word: str, word_to_int: Dict[str, int], weights: np.ndarray
) -> np.ndarray:
"""
Gets the word vector of a word.
Parameters
----------
target_word : str
Target word to find word vector of.
word_to_int : dict of str and int
Dictionary mapping from word to its integer representation.
weights : np.ndarray
Numpy matrix (vocabulary size, embedding dim) containing word vectors.
Returns
-------
word_vec : np.ndarray
Word vector of a word
"""
return weights[word_to_int[target_word]]
def similar_words(
weights: np.ndarray,
word_to_int: Dict[str, int],
words: np.ndarray,
ann_instance: ApproxNN = None,
top_n: int = 10,
positive_words: Optional[List[str]] = None,
negative_words: Optional[List[str]] = None,
vocab_size: int = -1,
return_similarity_score: bool = True,
) -> List[Union[Tuple, str]]:
"""
Finds the most similar words of a linear combination of positively and negatively
contributing words.
Parameters
----------
weights : np.ndarray
Numpy matrix (vocabulary size, embedding dim) containing word vectors.
word_to_int : dict of str and int
Dictionary mapping from word to its integer representation.
words : np.ndarray
Numpy array containing words from the vocabulary.
ann_instance : ApproxNN, optional
ApproxNN instance, built on word embeddings (defaults to None).
top_n : int, optional
Number of similar words (defaults to 10).
positive_words : list of str, optional
List of words contribution positively (defaults to empty list).
negative_words : list of str, optional
List of words contribution negatively (defaults to empty list).
vocab_size : int, optional
Vocabulary size to use, e.g., only most common `vocab_size` words to taken
into account (defaults to -1 meaning all words).
return_similarity_score : bool, optional
Whether or not to return the cosine similarity score (`ann_instance`
must be set to None to have an effect).
Returns
-------
If return_similarity_score is True, then
pairs : list of tuples of str and int
List of `top_n` similar words and their cosine similarities.
else:
closest_words : list of str
List of `top_n` similar words.
"""
# Default values
if positive_words is None:
positive_words = []
if negative_words is None:
negative_words = []
# Restrict vocabulary
if vocab_size > 0:
weights = weights[:vocab_size]
words = words[:vocab_size]
# Create query word vector
query_word_vec = np.zeros((weights.shape[1],), dtype=np.float64)
query_word_vec += np.array(
[get_word_vec(pos_word, word_to_int, weights) for pos_word in positive_words]
).sum(axis=0)
query_word_vec -= np.array(
[get_word_vec(neg_word, word_to_int, weights) for neg_word in negative_words]
).sum(axis=0)
# Create indices list of query words to exclude from search
exclude_words_indices = [
word_to_int[word] for word in positive_words + negative_words
]
# Find closest words
if ann_instance is None:
# Use cosine similarity to find similar words
cos_sims = fastdist.cosine_vector_to_matrix(query_word_vec, weights)
sorted_indices = cos_sims.argsort()[::-1]
sorted_indices = [
idx for idx in sorted_indices if idx not in exclude_words_indices
]
else:
query_word_vec_norm = query_word_vec / np.linalg.norm(query_word_vec)
sorted_indices = ann_instance.search(
query_vector=query_word_vec_norm,
k_neighbours=top_n,
excluded_neighbour_indices=exclude_words_indices,
)
# Filter top words/similarities
top_words = words[sorted_indices][:top_n]
# Create word similarity pairs
if return_similarity_score and ann_instance is None:
top_sims = cos_sims[sorted_indices][:top_n]
result = list(zip(top_words, top_sims))
else:
result = top_words
return result
def create_embeddings_of_train_weight_checkpoints(
model_weights_filepaths: list,
vocab_size: int,
embedding_dim: int,
clusterer: ClusterMixin,
transformer: Union[UMAP, TSNE, TransformerMixin],
) -> Tuple[np.ndarray, np.ndarray]:
"""
Creates embeddings using a transformer over the course of multiple word2vec training
checkpoints.
Parameters
----------
model_weights_filepaths : list
List filepaths to model weights.
vocab_size : int
Vocabulary size to use.
embedding_dim : int
Embedding dimension used in models.
clusterer : ClusterMixin
Clusterer instance to use when labeling words. Applies clustering
to the last set of checkpoints and uses them.
transformer : Union[UMAP, TSNE, TransformerMixin]
Transformer/dimensionality reduction instance applied to the embeddings.
Returns
-------
results : tuple of np.ndarray
A tuple consisting of the transformed embeddings and its respective
clustering labels.
"""
# Prepare matrix of word embeddings of all time steps
num_checkpoints = len(model_weights_filepaths)
embeddings = np.zeros((num_checkpoints * vocab_size, embedding_dim))
cluster_labels = np.zeros(vocab_size)
for i, model_weights_filepath in enumerate(model_weights_filepaths):
# Load weights and restrict to vocabulary
weights = np.load(model_weights_filepath, mmap_mode="r")
weights = weights[:vocab_size]
embeddings[i * vocab_size : (i + 1) * vocab_size] = weights
# Use cluster labels from last embedding
if i == num_checkpoints - 1:
cluster_labels = clusterer.fit_predict(weights)
# Create transformed embedding from all checkpoints
transformed_embedding = transformer.fit_transform(embeddings)
return transformed_embedding, cluster_labels
def visualize_embeddings_over_time(
transformed_word_embeddings: np.ndarray,
cluster_labels: np.ndarray,
vocab_size: int,
words: np.ndarray,
num_words: Optional[int] = None,
title: str = "Word embeddings over time",
) -> None:
"""
Visualizes transformed (e.g. into 2D or 3D) word embeddings over time.
Parameters
----------
transformed_word_embeddings : np.ndarray
Transformed word embeddings.
cluster_labels : np.ndarray
Cluster labels of each word in the transformed word embeddings.
vocab_size : int
Vocabulary size of transformed word embeddings.
words : np.ndarray
Words in the vocabulary in a numpy array.
num_words : int, optional
Number of words to visualize (defaults to all words).
title : str, optional
Title to use for the plot (defaults to "Word embeddings over time").
"""
is_3d = transformed_word_embeddings.shape[1] == 3
num_time_steps = int(len(transformed_word_embeddings) / vocab_size)
if num_words is None:
num_words = vocab_size
# Create Pandas DataFrame for Plotly animations
word_embeddings_over_time_df_dict: dict = {
"time_step": [],
"x": [],
"y": [],
"cluster_label": [],
"word": [],
}
if is_3d:
word_embeddings_over_time_df_dict["z"] = []
for time_step in range(1, num_time_steps + 1):
weights = transformed_word_embeddings[
(time_step - 1) * vocab_size : time_step * vocab_size
]
# Add to df
word_embeddings_over_time_df_dict["time_step"].extend(
np.repeat(time_step, num_words)
)
word_embeddings_over_time_df_dict["x"].extend(weights[:num_words, 0])
word_embeddings_over_time_df_dict["y"].extend(weights[:num_words, 1])
if is_3d:
word_embeddings_over_time_df_dict["z"].extend(weights[:num_words, 2])
word_embeddings_over_time_df_dict["cluster_label"].extend(
cluster_labels[:num_words]
)
word_embeddings_over_time_df_dict["word"].extend(words[:num_words])
# Create df from dict
word_embeddings_over_time_df = pd.DataFrame(word_embeddings_over_time_df_dict)
# Visualize animation of transformed embeddings over time
if is_3d:
fig = px.scatter_3d(
word_embeddings_over_time_df,
x="x",
y="y",
z="z",
range_x=[
transformed_word_embeddings[:num_words, 0].min(),
transformed_word_embeddings[:num_words, 0].max(),
],
range_y=[
transformed_word_embeddings[:num_words, 1].min(),
transformed_word_embeddings[:num_words, 1].max(),
],
range_z=[
transformed_word_embeddings[:num_words, 2].min(),
transformed_word_embeddings[:num_words, 2].max(),
],
animation_frame="time_step",
color="cluster_label",
hover_name="word",
title=title,
)
else:
fig = px.scatter(
word_embeddings_over_time_df,
x="x",
y="y",
range_x=[
transformed_word_embeddings[:num_words, 0].min(),
transformed_word_embeddings[:num_words, 0].max(),
],
range_y=[
transformed_word_embeddings[:num_words, 1].min(),
transformed_word_embeddings[:num_words, 1].max(),
],
animation_frame="time_step",
color="cluster_label",
hover_name="word",
title=title,
)
fig.update_scenes({"aspectmode": "cube"})
fig.show()
def plot_word_relationships_2d(
relationship_pairs: List[Tuple[str, str]],
transformed_word_embeddings: np.ndarray,
word_to_int: dict,
title: str = "Plot of relationship pairs",
x_label: str = "x1",
y_label: str = "x2",
) -> None:
"""
Plots relationships between words in 2D. Requires that the transformed word embeddings
are transformed in 2D space.
Parameters
----------
relationship_pairs : list of tuples of str
List of tuples of "from" (first entry) and "to" (second entry) words.
transformed_word_embeddings : np.ndarray
Transformed word embeddings.
word_to_int : dict of str and int
Dictionary mapping from word to its integer representation.
title : str
Title to use for the plot.
x_label : str
Label to use for the x-axis.
y_label : str
Label to use for the y-axis.
"""
fig = go.Figure()
for (from_word, to_word) in relationship_pairs:
from_word_vec = get_word_vec(
from_word, word_to_int, transformed_word_embeddings
)
to_word_vec = get_word_vec(to_word, word_to_int, transformed_word_embeddings)
# Plot points in 2D
fig.add_trace(
go.Scatter(
x=[from_word_vec[0], to_word_vec[0]],
y=[from_word_vec[1], to_word_vec[1]],
mode="markers+text",
text=[from_word, to_word],
textposition="bottom center",
hovertext=[from_word, to_word],
)
)
# Add title, x-label and y-label to plot
fig.update_layout(title=title, xaxis_title=x_label, yaxis_title=y_label)
# Annotate points with arrows
fig.add_annotation(
ax=from_word_vec[0],
ay=from_word_vec[1],
axref="x",
ayref="y",
x=to_word_vec[0],
y=to_word_vec[1],
xref="x",
yref="y",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
opacity=0.5,
)
fig.update_layout(showlegend=False)
fig.show()
def filter_word_analogy_dataset(
analogy_dataset: List[Tuple[str, ...]], word_to_int: dict, vocab_size: int
) -> list:
"""
Filters a word analogy dataset such that it only contains words from the vocabulary.
Parameters
----------
analogy_dataset : list
List of word analogies (list of tuple of words)
word_to_int : dict
Dictionary for mapping a word to its integer representation.
vocab_size : int
Vocabulary size.
Returns
-------
analogies_filtered : list
Filtered word analogies.
"""
analogies_filtered = []
for word_analogies in analogy_dataset:
# Ensure all words are in vocabulary
words_in_vocab = True
for word in word_analogies:
if word not in word_to_int or word_to_int[word] >= vocab_size:
words_in_vocab = False
break
if words_in_vocab:
analogies_filtered.append(word_analogies)
return analogies_filtered
def load_analogies_test_dataset(
analogies_filepath: str, word_to_int: dict, vocab_size: int
) -> dict:
"""
Loads an analogies test dataset file and filters out out of vocabulary entries.
Parameters
----------
analogies_filepath : str
Filepath of the analogies test dataset file.
word_to_int : dict
Dictionary for mapping a word to its integer representation.
vocab_size : int
Vocabulary size.
Returns
-------
analogies_dict : dict
Dictionary mapping from section name to list of tuples of word analogies
from the word vocabulary.
"""
# Load analogies dict from file
analogies_dict_raw = joblib.load(analogies_filepath)
# Initialize resulting dictionary
analogies_dict: dict = {key: [] for key in analogies_dict_raw.keys()}
# Ensure analogies_dict only contain entries that are in the vocabulary.
for section_name, analogies_pairs in analogies_dict_raw.items():
analogies_dict[section_name] = filter_word_analogy_dataset(
analogies_pairs, word_to_int, vocab_size
)
return analogies_dict
def evaluate_model_word_analogies(
analogies_filepath: str,
word_embeddings: np.ndarray,
word_to_int: dict,
words: np.ndarray,
vocab_size: int = -1,
ann_instance: ApproxNN = None,
top_n: int = 1,
verbose: int = 1,
) -> dict:
"""
Evaluates a word2vec model on a word analogies test dataset.
Parameters
----------
analogies_filepath : str
Filepath of the analogies test dataset file.
word_embeddings : np.ndarray
Word embeddings
word_to_int : dict mapping from str to int
Dictionary for mapping a word to its integer representation
words : np.ndarray
Numpy array of words from the vocabulary.
vocab_size : int, optional
Vocabulary size to use (defaults to -1 meaning all words).
ann_instance : ApproxNN, optional
ApproxNN instance, built on word embeddings (defaults to None).
top_n : int, optional
Number of words to look at for computing accuracy. If the predicted word is in the
`top_n` most similar words, it is flagged as a correct prediction. Defaults to
1.
verbose : int, optional
Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose). Defaults to 1 (verbose).
Returns
-------
analogies_accuracies : dict mapping from str to float
Dictionary mapping from analogy section to its accuracy (percentage).
"""
if vocab_size == -1:
vocab_size = len(words)
# Load analogies word pairs from file
analogies = load_analogies_test_dataset(analogies_filepath, word_to_int, vocab_size)
# Perform evaluation
analogies_accuracies = {}
for (section_name, analogies_word_pairs) in analogies.items():
if verbose >= 1:
print(f"-- Evaluating {section_name}... --")
num_correct = 0
total = len(analogies_word_pairs)
for qw_pair in tqdm(analogies_word_pairs):
(a_word, b_word, c_word, d_word) = qw_pair
d_word_predictions = similar_words(
positive_words=[b_word, c_word],
negative_words=[a_word],
weights=word_embeddings,
words=words,
word_to_int=word_to_int,
ann_instance=ann_instance,
vocab_size=vocab_size,
top_n=top_n,
return_similarity_score=False,
)
if d_word in d_word_predictions:
num_correct += 1
if total == 0:
analogies_accuracies[section_name] = np.nan # => no predictions made
print(f"All word analogies in {section_name} missing from vocabulary")
else:
analogies_accuracies[section_name] = num_correct / total
print(f"Accuracy: {(analogies_accuracies[section_name] * 100):.2f}%")
# Compute average accuracy over all sections (ignore NaN's)
analogies_accuracies["avg"] = np.nanmean(list(analogies_accuracies.values()))
return analogies_accuracies
```
#### File: code/word_embeddings/preprocess_eval_test_data.py
```python
import argparse
import re
import sys
import tarfile
from os import makedirs
from os.path import isdir, isfile, join
import joblib
from tqdm import tqdm
sys.path.append("..")
from utils import download_from_url, get_cached_download_text_file # noqa: E402
def parse_args() -> argparse.Namespace:
"""
Parses arguments sent to the python script.
Returns
-------
parsed_args : argparse.Namespace
Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw_data_dir",
type=str,
default="raw_data",
help="Path to the raw data directory (where files will be downloaded to)",
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Output directory to save processed data",
)
return parser.parse_args()
def parse_questions_X(questions_X_content: str) -> dict:
"""
Parses a "questions-X.txt" (where X is 'words' or 'phrases') file into a
section-separated dictionary for looking up word pairs from each section.
Parameters
----------
questions_X_content: str
Raw content of the "questions-X.txt" (where X is 'words' or 'phrases') file
Returns
-------
questions_X: dict
Dictionary mapping from section to a list of word pairs
"""
# Parse questions 'X' pairs for each section
questions_X_sections = re.findall(r"(: .+)", questions_X_content)
questions_X_delimiters = "|".join(questions_X_sections)
# Split questions 'X' content into list
questions_X_content_splits = []
for content_split in re.split(questions_X_delimiters, questions_X_content):
if len(content_split) == 0:
continue
content_split_lines = content_split[1 : len(content_split) - 1].split("\n")
questions_X_split_content: list = []
for word_line in content_split_lines:
# Split string of words into tuple of lower-case words and append to list
words = word_line.split()
words_tuple = tuple([word.lower() for word in words])
questions_X_split_content.append(words_tuple)
questions_X_content_splits.append(questions_X_split_content)
# Construct dictionary with question-X entries
questions_X = {
questions_X_sections[i][2:]: questions_X_content_splits[i]
for i in range(len(questions_X_sections))
}
return questions_X
def preprocess_questions_words(raw_data_dir: str, output_dir: str) -> None:
"""
Downloads and preprocess test data for evaluating a word2vec model
on the Semantic-Syntactic Word Relationship test set (SSWR) from
Mikolov et al. (https://arxiv.org/pdf/1301.3781.pdf)
Parameters
----------
raw_data_dir : str
Path to the raw data directory (where files will be downloaded to).
output_dir : str
Output directory to save processed data.
"""
print("Processing questions-words...")
# Fetch questions-words.txt from Tensorflow
filename = "questions-words.txt"
txt_url = f"http://download.tensorflow.org/data/{filename}"
questions_words_txt = get_cached_download_text_file(txt_url, raw_data_dir, filename)
# Parse the raw content
questions_words_dict = parse_questions_X(questions_words_txt)
print("Done!")
# Save questions-words dict to file
dest_filename = "sswr.joblib"
questions_words_filepath = join(output_dir, dest_filename)
print("Saving to file...")
joblib.dump(questions_words_dict, questions_words_filepath)
print("Done!")
def preprocess_msr(raw_data_dir: str, output_dir: str) -> None:
"""
Downloads and preprocess test data for evaluating a word2vec model
on the Microsoft Research Syntactic Analogies Dataset (MSR) from
Mikolov et al. (https://www.aclweb.org/anthology/N13-1090.pdf)
Parameters
----------
raw_data_dir : str
Path to the raw data directory (where files will be downloaded to).
output_dir : str
Output directory to save processed data.
"""
print("Processing MSR...")
# Initialize paths
dataset_name = "msr"
raw_data_url = "https://download.microsoft.com/download/A/B/4/AB4F476B-48A6-47CF-9716-5FF9D0D1F7EA/FeatureAugmentedRNNToolkit-v1.1.tgz"
raw_data_zip_filepath = join(raw_data_dir, f"{dataset_name}.tgz")
raw_data_extracted_zip_filepath = join(raw_data_dir, dataset_name)
output_filepath = join(output_dir, f"{dataset_name}.joblib")
# Download raw data if not present
if not isfile(raw_data_zip_filepath):
print(f"Downloading raw {dataset_name} data...")
download_from_url(raw_data_url, raw_data_zip_filepath)
print("Done!")
# Extract raw data if not present
if not isdir(raw_data_extracted_zip_filepath):
print("Extracting raw data...")
with tarfile.open(raw_data_zip_filepath) as tar_file:
tar_file.extractall(raw_data_extracted_zip_filepath)
print("Done!")
# Read content from extracted zip, process them and combine into one test dataset.
with open(
join(
raw_data_extracted_zip_filepath, "test_set", "word_relationship.questions"
),
"r",
) as file:
word_relationship_questions = [
line.split(" ") for line in file.read().split("\n") if len(line) > 0
]
with open(
join(raw_data_extracted_zip_filepath, "test_set", "word_relationship.answers"),
"r",
) as file:
word_relationship_answers = [
line.split(" ") for line in file.read().split("\n") if len(line) > 0
]
# Combine lists
print("Combining files...")
word_relationship_questions_answers: dict = {
"adjectives": [],
"nouns": [],
"verbs": [],
}
for i in tqdm(range(len(word_relationship_questions))):
questions = word_relationship_questions[i]
qa_label, answer = word_relationship_answers[i]
# Convert from label to category
qa_category = None
if qa_label.startswith("J"):
qa_category = "adjectives"
elif qa_label.startswith("N"):
qa_category = "nouns"
elif qa_label.startswith("V"):
qa_category = "verbs"
# Append pair to category
word_relationship_questions_answers[qa_category].append(questions + [answer])
print("Done!")
# Save list of analogies from MSR to file
print("Saving to file...")
joblib.dump(word_relationship_questions_answers, output_filepath)
print("Done!")
def preprocess_questions_phrases(raw_data_dir: str, output_dir: str) -> None:
"""
Downloads and preprocess test data for evaluating a word2vec model
on the Phrase Analogy Dataset (PAD) from Mikolov et al.
(https://arxiv.org/pdf/1310.4546.pdf).
Parameters
----------
raw_data_dir : str
Path to the raw data directory (where files will be downloaded to).
output_dir : str
Output directory to save processed data.
"""
print("Processing PAD...")
# Fetch questions-phrases.txt from Github
filename = "questions-phrases.txt"
txt_url = f"https://raw.githubusercontent.com/tmikolov/word2vec/20c129af10659f7c50e86e3be406df663beff438/{filename}"
questions_phrases_txt = get_cached_download_text_file(
txt_url, raw_data_dir, filename
)
# Parse the raw content
questions_phrases_dict = parse_questions_X(questions_phrases_txt)
print("Done!")
# Save questions-words dict to file
dest_filename = "pad.joblib"
questions_phrases_filepath = join(output_dir, dest_filename)
print("Saving to file...")
joblib.dump(questions_phrases_dict, questions_phrases_filepath)
print("Done!")
def preprocess_eval_test_data(raw_data_dir: str, output_dir: str) -> None:
"""
Downloads and preprocess test data for evaluating a word2vec model.
Parameters
----------
raw_data_dir : str
Path to the raw data directory (where files will be downloaded to).
output_dir : str
Output directory to save processed data.
"""
# Ensure data directories exist
makedirs(raw_data_dir, exist_ok=True)
makedirs(output_dir, exist_ok=True)
# Prepare test data sets
preprocess_questions_words(raw_data_dir, output_dir)
preprocess_msr(raw_data_dir, output_dir)
preprocess_questions_phrases(raw_data_dir, output_dir)
if __name__ == "__main__":
args = parse_args()
preprocess_eval_test_data(
raw_data_dir=args.raw_data_dir,
output_dir=args.output_dir,
)
```
#### File: code/word_embeddings/word2phrase.py
```python
from collections import Counter
from itertools import chain, tee, zip_longest
from os import makedirs
from os.path import basename, join
from typing import Iterable, Iterator, List, Optional, Union
import tensorflow as tf
from tqdm import tqdm
class Word2phrase:
"""
Converts words that appear frequently together (i.e. phrases) into
single words, e.g. new york times --> new_york_times
south africa --> south_africa
larry page --> larry_page
Python port of Mikolov et al.'s original word2phrase.c code:
https://github.com/tmikolov/word2vec/blob/master/word2phrase.c
"""
def __init__(
self,
min_word_count: int,
threshold: float,
threshold_decay: float,
phrase_sep: str,
) -> None:
"""
Initializes the Word2phrase instance.
Parameters
----------
min_word_count : int
Minimum number of times a word might occur for it to be in the vocabulary.
threshold : float
Threshold for determining whether a given phrase should be included.
threshold_decay : float
Value to use for decaying the threshold over time.
phrase_sep : str
Separator to use when combining phrases.
"""
self._min_word_count = min_word_count
self._threshold = threshold
self._threshold_decay = threshold_decay
self._phrase_sep = phrase_sep
self._word_occurrences_counter: Optional[Counter] = None
self._total_unigram_words = 0
@staticmethod
def _pairwise_grouping_iter(iterable: Iterable) -> Iterator:
"""
Groups elements of an iterable with pairwise tuples.
Parameters
----------
iterable : Iterable
Iterable to apply pairwise grouping to.
Returns
-------
pairwise_iterable : Iterator
Pairwise iterable
"""
left, right = tee(iterable)
try:
next(right)
except StopIteration:
pass
return zip_longest(left, right)
def _build_word_occurrences(
self, filepaths: List[str], num_texts: int, max_vocab_size: int
) -> None:
"""
Builds the internal vocabulary using text data files.
Parameters
----------
filepaths : list of str
Filepaths of text data files to build the vocabulary on.
num_texts : int
Number of texts (or sentences) of the content of `filepath`.
max_vocab_size : int
Maximum vocabulary size to use (-1 indicates all words in vocabulary).
In other words, only the top `max_vocab_size` words will be taken into
account when counting word occurrences.
"""
# Read file content and split into words
lines = []
for filepath in filepaths:
with tf.io.gfile.GFile(filepath) as f:
lines.append(f)
lines_iter = chain(*lines)
self._total_unigram_words = 0
word_occurrences_counter: Counter = Counter()
for line in tqdm(
lines_iter,
desc="- Building word occurrences",
total=num_texts,
):
words = line.strip().split()
pairwise_words = [
f"{a}{self._phrase_sep}{b}" for a, b in zip(words, words[1:])
]
# Count unigram word occurrences
word_occurrences_counter.update(words)
self._total_unigram_words += len(words)
# Count bigram word occurrences
word_occurrences_counter.update(pairwise_words)
print(f"Initial vocabulary size: {len(word_occurrences_counter)}")
# Only use most common words
if max_vocab_size == -1:
word_occurrences_counter = word_occurrences_counter.most_common()
print("Using all words in vocabulary!")
elif 0 <= max_vocab_size < len(word_occurrences_counter):
word_occurrences_counter = word_occurrences_counter.most_common(
max_vocab_size
)
print(
f"New vocabulary size after maximization: {len(word_occurrences_counter)}"
)
# Exclude words with less than `self._min_word_count` occurrences
word_occurrences_counter: dict = {
word: word_count
for word, word_count in tqdm(
word_occurrences_counter, desc="- Filtering word occurrences"
)
if word_count >= self._min_word_count
}
print(
f"Final vocabulary size after filtering on minimum word count: {len(word_occurrences_counter)}"
)
self._word_occurrences_counter = word_occurrences_counter
def fit(
self,
text_data_filepaths: List[str],
dataset_name: str,
starting_epoch_nr: int,
n_epochs: int,
num_texts: int,
max_vocab_size: int,
output_dir: str,
) -> None:
"""
Trains/fits the word2phrase instance and saves new text data files
where phrases have been replaced with single words.
Parameters
----------
text_data_filepaths : list of str
Filepaths of text data files to train on.
dataset_name : str
Name of the dataset we are fitting/training on.
starting_epoch_nr : int
Epoch number to start the training from.
n_epochs : int
Number of passes through the text data files; more runs
yields longer phrases.
num_texts : int
Number of texts (or sentences) of the content of `filepaths`.
max_vocab_size : int, optional
Maximum vocabulary size to use (-1 indicates all words in vocabulary).
In other words, only the top `max_vocab_size` words will be taken into
account when counting word occurrences.
output_dir : str
Output directory to save the new text data files.
"""
end_epoch_nr = n_epochs + starting_epoch_nr - 1
for epoch in range(starting_epoch_nr, end_epoch_nr + 1):
print(f"Epoch {epoch}/{end_epoch_nr}")
# Create output directory for current epoch
current_output_dir = join(
output_dir, f"{dataset_name}_phrases", f"epoch_{epoch}"
)
makedirs(current_output_dir, exist_ok=True)
# Compute threshold
threshold = self._threshold * (1 - self._threshold_decay) ** (epoch - 1)
# Builds vocabulary using text data files
self._build_word_occurrences(
filepaths=text_data_filepaths,
num_texts=num_texts,
max_vocab_size=max_vocab_size,
)
# Iterate over all texts/sentences for each text data file.
new_filepaths = [
join(current_output_dir, basename(filepath))
for filepath in text_data_filepaths
]
print(
f"Example input/output: {text_data_filepaths[0]} --> {new_filepaths[0]}"
)
progressbar = tqdm(
total=num_texts, desc="- Computing scores for each text data file"
)
for input_filepath, output_filepath in zip(
text_data_filepaths, new_filepaths
):
with open(input_filepath, "r") as input_file:
with open(output_filepath, "w") as output_file:
i = 0
for line in input_file:
new_line = []
words = line.strip().split()
pairwise_words = self._pairwise_grouping_iter(words)
for pair in pairwise_words:
left_word, right_word = pair
bigram_word = (
f"{left_word}{self._phrase_sep}{right_word}"
)
pa = self._word_occurrences_counter.get(left_word)
pb = self._word_occurrences_counter.get(right_word)
pab = self._word_occurrences_counter.get(bigram_word)
all_words_in_vocab = pa and pb and pab
# Compute score
if all_words_in_vocab:
score = (
(pab - self._min_word_count)
/ pa
/ pb
* self._total_unigram_words
)
else:
score = 0.0
if score > threshold:
try:
# Skip next pair of words, since we combined current pair into
# a single word.
next(pairwise_words)
except StopIteration:
pass
new_line.append(bigram_word)
else:
new_line.append(left_word)
# Write line to output file
if i > 0:
output_file.write("\n")
output_file.write(" ".join(new_line))
progressbar.update(1)
i += 1
print()
# Change text data filepaths to the newly saved text filepaths
text_data_filepaths = new_filepaths.copy()
``` |
{
"source": "JonasTronco/backend-load-massive",
"score": 3
} |
#### File: backend-load-massive/data_analysis/file_export.py
```python
from constants import BedsFilter, MeasuresFilter
def write_to_file(data, filename):
"""
Writes data on the file with specified name
"""
try:
with open(filename, 'w') as export_file:
export_file.write(data)
print(f'Wrote the results on {filename}!')
except OSError as e:
print(f'Could not write {filename}!')
print(e)
def write_beds_data(general_json, types_json, filter_index):
"""
Writes the entered beds json into a file named according to the chosen
filter
"""
general_name = BedsFilter(filter_index).name + '_GENERAL'
types_name = BedsFilter(filter_index).name + '_TYPES'
general_filename = BedsFilter.EXPORT_FILENAME.value.replace("#",
general_name)
types_filename = BedsFilter.EXPORT_FILENAME.value.replace("#", types_name)
write_to_file(general_json, general_filename)
write_to_file(types_json, types_filename)
def write_measures_data(filter_index, general_json, types_json = None):
"""
Writes the entered measures json into a file named according to the chosen
filter
"""
general_name = MeasuresFilter(filter_index).name + '_GENERAL'
general_filename = MeasuresFilter.EXPORT_FILENAME.value.replace("#",
general_name)
write_to_file(general_json, general_filename)
if (types_json):
types_name = MeasuresFilter(filter_index).name + '_MEASURES'
types_filename = MeasuresFilter.EXPORT_FILENAME.value.replace("#",
types_name)
write_to_file(types_json, types_filename)
``` |
{
"source": "jonastsai/meetate",
"score": 2
} |
#### File: meetate/greencoffees/admin.py
```python
from django.contrib import admin
from models import GreenCoffees, Vendors
class NotEmptyFilter(admin.SimpleListFilter):
title = 'Been Available'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'qty_now'
def lookups(self, request, model_admin):
return (
(None, 'Non-Empty'),
('empty', 'Empty'),
('all', 'All'),
)
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == lookup,
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() == 'empty':
return queryset.filter(qty_now = 0)
if self.value() == 'all':
return queryset.filter()
else:
return queryset.filter(qty_now__gt=0)
class GreenCoffeesAdmin(admin.ModelAdmin):
list_display = ("name", "country", "process", "price", "qty_in", "qty_now", "vendor", "create_at")
radio_fields = {"process": admin.VERTICAL}
list_per_page = int(20)
list_filter = (NotEmptyFilter, 'process', 'vendor')
'''
def formfield_for_choice_field(self, db_field, request, **kwargs):
if db_field.name == "process":
kwargs['choices'] = (
('0', '100'),
('1', '101'),
('2', '102'),
)
return super(GreenCoffeesAdmin, self).formfield_for_choice_field(db_field, request, **kwargs)
'''
class VendorsAdmin(admin.ModelAdmin):
list_display = ("name",)
list_per_page = int(10)
# Register your models here.
admin.site.register(GreenCoffees, GreenCoffeesAdmin)
admin.site.register(Vendors, VendorsAdmin)
```
#### File: meetate/greencoffees/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
#from roastlogs.models import RoastTimelogs, Roastlogs
#from order.models import Orders, Products, SellingItems
#from roastlogs.models import *
#from orders.models import *
class Vendors(models.Model):
name = models.CharField(max_length=40)
contact = models.CharField(max_length=30, blank=True, null=True)
tel = models.CharField(max_length=30, blank=True, null=True)
mobile = models.CharField(max_length=30, blank=True, null=True)
email = models.CharField(max_length=50, blank=True, null=True)
web_page = models.CharField(max_length=100, blank=True, null=True)
comment = models.TextField(blank=True, null=True)
class Meta:
db_table = 'vendors'
app_label = 'greencoffees'
verbose_name_plural = "Green Coffee Vendor"
def __unicode__(self):
return u'%s' % (self.name)
class GreenCoffees(models.Model):
PROCESS_CHOICES = ((0, '水洗'), (1, '日曬'), (2, '蜜處理'))
name = models.CharField(max_length=30)
country = models.CharField(max_length=30, blank=True, null=True)
manor = models.CharField(max_length=30, blank=True, null=True)
grade = models.CharField(max_length=10, blank=True, null=True)
process = models.IntegerField(choices=PROCESS_CHOICES)
price = models.IntegerField()
qty_in = models.IntegerField()
qty_now = models.IntegerField(blank=True, null=True)
defect = models.IntegerField(blank=True, null=True)
vendor = models.ForeignKey(Vendors)
#seller = models.CharField(max_length=30, blank=True, null=True)
official_taste = models.CharField(max_length=100, blank=True, null=True)
official_desc = models.TextField(blank=True, null=True)
self_taste = models.CharField(max_length=60, blank=True, null=True)
create_at = models.DateField(blank=True, null=True)
owner = models.ForeignKey(User, blank=True, null=True)
class Meta:
#managed = False
db_table = 'green_coffees'
app_label = 'greencoffees'
verbose_name_plural = "Green Coffees"
def __unicode__(self):
return u'%s %s, %s' % (self.country, self.name, self.processStr(self.process))
def processStr(self, index):
strs = ['水洗', '日曬', '蜜處理']
return strs[index]
```
#### File: meetate/roastlogs/models.py
```python
from __future__ import unicode_literals
import os
from django.db import models
from django.contrib.auth.models import User
#from orders.models import Orders, Products, SellingItems
from orders.models import *
from greencoffees.models import *
class RoastTimelogs(models.Model):
roastlog = models.ForeignKey('Roastlogs', blank=True, null=True)
second = models.IntegerField()
bt = models.IntegerField()
temp1 = models.IntegerField(blank=True, null=True)
power = models.IntegerField(blank=True, null=True)
fan_level = models.IntegerField(blank=True, null=True)
class Meta:
#managed = False
db_table = 'roast_timelogs'
app_label = 'roastlogs'
verbose_name_plural = "Roast Time Logs"
class Roastlogs(models.Model):
cust_index = models.CharField(max_length=20, blank=True, null=True)
#green_coffee = models.ForeignKey(GreenCoffees, blank=True, null=True)
comment = models.CharField(max_length=200, blank=True, null=True)
taste = models.CharField(max_length=200, blank=True, null=True)
score = models.IntegerField(blank=True, null=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
start_weight = models.IntegerField(blank=True, null=True)
end_weight = models.IntegerField(blank=True, null=True)
env_temp = models.IntegerField(blank=True, null=True)
env_humi = models.IntegerField(blank=True, null=True)
roastor_dev_id = models.CharField(max_length=10, blank=True, null=True)
roastor = models.ForeignKey(User, blank=True, null=True)
order_of_day = models.IntegerField(blank=True, null=True)
category = models.IntegerField(blank=True, null=True)
status = models.IntegerField(blank=True, null=True)
random_str = models.CharField(max_length=10, blank=True, null=True)
time_log_str = models.TextField(blank=True, null=True)
class Meta:
#managed = False
db_table = 'roastlogs'
app_label = 'roastlogs'
verbose_name_plural = "Roast Logs"
def __unicode__(self):
return u'%s, %s' % (self.cust_index, self.start_time)
def duration(self):
if self.start_time != None and self.end_time != None:
return self.end_time - self.start_time
return 'n/a'
def lost_percent(self):
if self.start_weight != None and self.end_weight != None:
lost = self.start_weight - self.end_weight
return '{percent:.1%}'.format(percent=float(lost)/float(self.start_weight))
return 'n/a'
def green_coffee(self):
try:
sale = SellingItems.objects.get(roastlog=self)
if sale != None:
return sale.product.green_coffee
except Exception:
return "No such coffee"
return "You should not see this"
def curve_image_url(self):
# TODO We should get the urlbase according to user's property
urlbase = os.environ.get("MEETATE_URL_BASE", '')
fileName = self.start_time.strftime('%Y%m%d_%H%M%S') + "_" + self.random_str + ".png"
return urlbase + fileName
def curve_image(self):
#return '<img src="http://nviki.qiniudn.com/80909_medium.jpg"/>'
return '<img src="%s" height="320" width="480"/>' % (self.curve_image_url())
curve_image.allow_tags = True
``` |
{
"source": "JonasUJ/cloudstore",
"score": 2
} |
#### File: api/models/file.py
```python
import os
from io import BytesIO
from mimetypes import guess_type
from PIL import Image, UnidentifiedImageError
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.core.files import File as CoreFile
from django.db import models
from django.dispatch import receiver
from private_storage.fields import PrivateFileField
from shortuuid import uuid
class ShareState(models.IntegerChoices):
PRIVATE = 0
PUBLIC = 1
PASSWORD_PROTECTED = 2
class Share(models.Model):
state = models.IntegerField(choices=ShareState.choices, default=ShareState.PRIVATE)
key = models.CharField(max_length=100, blank=True, null=True)
def matches(self, key):
return check_password(key, self.key)
def set_key(self, key):
self.key = make_password(key)
self.save()
return self.key
def __str__(self):
# pylint: disable=no-member
return f'Share(state={ShareState(self.state).name}, file={self.file})'
def _get_share() -> Share:
return Share.objects.create().pk
def get_uuid() -> str:
return uuid()[:8]
def get_filename(instance, filename) -> str: # pylint: disable=unused-argument
return f'{instance.uuid}'
def get_thumbnail_filename(instance, filename) -> str: # pylint: disable=unused-argument
return f'thumb/{get_filename(instance, filename)}'
class NotRequiredPrivateFileField(PrivateFileField):
"""See https://code.djangoproject.com/ticket/13327"""
def _require_file(self):
return
@property
def url(self):
if self:
return self.storage.url(self.name)
return ''
class File(models.Model):
name = models.CharField(max_length=200)
uuid = models.CharField(max_length=8, default=get_uuid, unique=True)
created = models.DateTimeField(auto_now_add=True)
accessed = models.DateTimeField(auto_now=True)
file = PrivateFileField(upload_to=get_filename)
size = models.BigIntegerField(default=0)
thumb = NotRequiredPrivateFileField(upload_to=get_thumbnail_filename, blank=True, null=True)
folder = models.ForeignKey('Folder', related_name='files', on_delete=models.CASCADE)
share = models.OneToOneField(
Share, related_name='file', on_delete=models.CASCADE, default=_get_share
)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='files', on_delete=models.CASCADE
)
def ext(self) -> str:
return os.path.splitext(self.name)[1]
def clean_name(self) -> str:
return os.path.splitext(self.name)[0]
def text(self) -> bool:
mimetype = guess_type(self.name)[0]
if mimetype:
mimetype = mimetype.split('/')[0] == 'text'
return mimetype
def generate_thumbnail(self):
ext = self.ext().strip('.').upper()
if ext == 'JPG':
ext = 'JPEG'
if ext in settings.IMAGE_THUMBNAIL_TYPES:
try:
img = Image.open(self.file.file)
img.thumbnail(settings.IMAGE_THUMBNAIL_SIZE)
img_bytes = BytesIO()
img.save(img_bytes, format=ext)
self.thumb.save(self.thumb.name, CoreFile(img_bytes))
# NOTE: Thumbs can currently exceed the quota
self.size += self.thumb.file.size
self.save()
self.owner.quota.use(self.thumb.file.size)
except UnidentifiedImageError:
pass # We couldn't open the image, probably because it isn't one.
def __str__(self):
return self.name
@receiver(models.signals.post_delete, sender=File)
def remove_file(sender, instance: File, **kwargs): # pylint: disable=unused-argument
instance.owner.quota.free(instance.size)
instance.file.delete(False)
instance.thumb.delete(False)
```
#### File: cloudstore/forms/bulma_mixin.py
```python
from typing import Dict, Sequence, Union
from django.forms import CheckboxInput, Field
class BulmaMixin:
"""
Mixin for handling Bulma classes in a Form
"""
def update_fields(self):
for name, field in self.fields.items():
if self.has_error(name):
self.add_classes(field, 'is-danger')
if not isinstance(field.widget, CheckboxInput):
self.add_classes(field, 'input')
else:
self.add_classes(field, 'is-checkradio')
@staticmethod
def add_classes(field: Field, class_string: str) -> None:
if not field.widget.attrs.get('class', False):
field.widget.attrs['class'] = ''
field.widget.attrs['class'] += ' ' + class_string
def add_attrs(self, field_names: Union[str, Sequence[str]], attrs: Dict[str, str]):
if isinstance(field_names, str):
field_names = {field_names}
for field in field_names:
for name, val in attrs.items():
setattr(self.fields[field], name, val)
```
#### File: cloudstore/forms/set_password.py
```python
from django.contrib.auth.forms import SetPasswordForm
from django.forms import CharField, PasswordInput
from .bulma_mixin import BulmaMixin
class CloudstoreSetPasswordForm(BulmaMixin, SetPasswordForm):
new_password1 = CharField(
label='New password',
strip=False,
widget=PasswordInput(
attrs={'autocomplete': 'new-password', 'class': 'has-background-black-bis'}
),
)
new_password2 = CharField(
label='Confirm new password',
strip=False,
widget=PasswordInput(
attrs={'autocomplete': 'new-password', 'class': 'has-background-black-bis'}
),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.update_fields()
self.add_attrs(('new_password1', 'new_password2'), {'icon_left': 'fa-lock'})
``` |
{
"source": "JonasUJ/lectiotime.xyz",
"score": 3
} |
#### File: lectiotime.xyz/lectioapi/schedule.py
```python
from enum import Enum
from pprint import pformat
from datetime import datetime, timedelta
class DayStatus(Enum):
BEFORE = 0
DURING = 1
BREAK = 2
AFTER = 3
WEEKEND = 4
class TimePeriod:
start = None
end = None
def duration(self) -> timedelta:
return self.end - self.start
def startSameDay(self, other) -> bool:
return self.start.date() == other.start.date()
def isOnDay(self, day) -> bool:
return self.start.date() == day
def __gt__(self, other):
if type(other) is type(self):
return self.end > other.end
if type(other) is datetime:
return self.end > other
raise NotImplementedError()
def __lt__(self, other):
if type(other) is type(self):
return self.end < other.end
if type(other) is datetime:
return self.end < other
raise NotImplementedError()
class Piece(TimePeriod):
def __init__(self, start: datetime, end: datetime, **extra):
self.start = start
self.end = end
self.extra = extra
def isAt(self, time: datetime) -> bool:
return self.start < time < self.end
def json(self):
return dict(start=self.start, end=self.end, **self.extra)
def __bool__(self):
return True
def __contains__(self, other):
if type(other) is datetime:
return self.isAt(other)
raise NotImplementedError()
def __repr__(self):
return f"Piece(start={self.start}, end={self.end}, **{pformat(self.extra)})"
def __eq__(self, other):
if type(other) is type(self):
return self.start == other.start and self.end == other.end
raise NotImplementedError()
class Schedule(TimePeriod):
def __init__(self, name: str, *pieces, offset=timedelta(0)):
self.name = name
self._pieces = list(pieces)
self._pieces.sort()
self.offset = offset
try:
self.start = min(self._pieces).start
self.end = max(self._pieces).end
except ValueError:
self.start = datetime.now() + self.offset
self.end = datetime.now() + self.offset
def pieceNow(self) -> (DayStatus, Piece):
return self.pieceAt(datetime.now() + self.offset)
def pieceAt(self, time: datetime) -> (DayStatus, Piece):
next_piece = self._pieces[0]
for piece in self._pieces:
if time in piece:
return DayStatus.DURING, piece
elif piece.start - time < next_piece.start - time:
next_piece = piece
return self.schoolStatus(), next_piece
def today(self):
date = (datetime.now() + self.offset).date()
day = list(filter(lambda p: p.isOnDay(date), self._pieces))
day.sort()
return day
def schoolStatus(self):
now = datetime.now() + self.offset
today = self.today()
if len(today) == 0:
return DayStatus.WEEKEND
if now < today[0]:
return DayStatus.BEFORE
if now > today[-1]:
return DayStatus.AFTER
for piece in today:
if now in piece:
return DayStatus.DURING
return DayStatus.BREAK
def json(self):
json = {
"name": self.name,
"schoolstatus": self.schoolStatus().value,
"start": self.start,
"end": self.end
}
for i, p in enumerate(self._pieces):
json[str(i)] = p.json()
return json
def jsonToday(self):
today = self.today()
if len(today) == 0:
return {
"start": datetime.now() + self.offset,
"end": datetime.now() + self.offset,
"name": self.name,
"schoolstatus": DayStatus.WEEKEND.value
}
json = {
"name": self.name,
"schoolstatus": self.schoolStatus().value,
"start": today[0].start,
"end": today[-1].end
}
for i, p in enumerate(today):
json[str(i)] = p.json()
return json
def __repr__(self):
return f"Schedule(name={self.name}, *{pformat(self._pieces)})"
def __getitem__(self, item):
for piece in self._pieces:
if item == piece:
return piece
def __setitem__(self, item, value):
for i, piece in enumerate(self._pieces):
if item == piece:
self._pieces[i] = item
``` |
{
"source": "JonasUJ/Pyllow",
"score": 3
} |
#### File: Pyllow/test/AST_test.py
```python
import unittest
from src.AST import AST, TokenStream
from src.chardef import CD
from src.Error import PyllowSyntaxError
from src.Lexer import Lexer, Token
from src.Node import (AdditionExpression, AndExpression, AssignStatement,
BlockNode, DivisionExpression, EqualExpression,
GreaterThanEqualExpression, GreaterThanExpression,
IfStatement, LessThanEqualExpression, LessThanExpression,
MonoExpression, MultiplicationExpression,
NegativeExpression, NotEqualExpression, NotExpression,
OrExpression, PositiveExpression, PowerExpression,
SubtractionExpression)
POSITION = (1, 0, 'test')
KWARGS = {
'tokentype': 'num',
'subtype': 'int',
'value': '0',
'identity': 'id',
'position': POSITION
}
def make_tree(node_and_children):
nodeclass, children = node_and_children
if isinstance(children, tuple) and children[1]:
return nodeclass(children=[make_tree(child) for child in children])
elif isinstance(children, tuple):
return nodeclass(children=[make_tree(children)], **KWARGS)
return nodeclass(**KWARGS)
class ASTTest(unittest.TestCase):
def setUp(self):
self.tree = AST(())
def tearDown(self):
del self.tree
def set_stream(self, raw):
lexer = Lexer()
lexed = lexer.lex(raw)
self.tree._stream = TokenStream(lexed)
self.tree._stream.next()
def test__accept(self):
self.set_stream('A B 1 2')
self.assertTrue(self.tree._accept('A', attr='value'))
self.assertFalse(self.tree._accept('A', attr='value'))
self.assertTrue(self.tree._accept('id'))
self.assertFalse(self.tree._accept('id'))
self.assertTrue(self.tree._accept('id', 'num'))
self.assertFalse(self.tree._accept('id', 'num', attr='value'))
self.assertTrue(self.tree._accept('B', '2', attr='value'))
def test__expect(self):
self.set_stream('A B 1 2')
self.assertTrue(self.tree._expect('A', attr='value'))
with self.assertRaises(PyllowSyntaxError):
self.tree._expect('A', attr='value')
self.assertTrue(self.tree._expect('id'))
with self.assertRaises(PyllowSyntaxError):
self.tree._expect('id')
self.assertTrue(self.tree._expect('id', 'num'))
with self.assertRaises(PyllowSyntaxError):
self.tree._expect('id', 'num', attr='value')
self.assertTrue(self.tree._expect('B', '2', attr='value'))
def test__expression_simpel(self):
self.set_stream('1 + 2 - 3')
structure = make_tree(
(SubtractionExpression, (
(AdditionExpression, (
(MonoExpression, None),
(MonoExpression, None)
)),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_precedence(self):
self.set_stream('1 + 2 * 3')
structure = make_tree(
(AdditionExpression, (
(MonoExpression, None),
(MultiplicationExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_precedence_arg(self):
self.set_stream('1 ^ 2 * 3')
structure = make_tree(
(PowerExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current, precedence=22)
self.assertEqual(expr, structure)
def test__expression_unary(self):
self.set_stream('1 - - 2')
structure = make_tree(
(SubtractionExpression, (
(MonoExpression, None),
(NegativeExpression, (MonoExpression, None))
))
)
expr = self.tree._expression(self.tree._stream.current)
from pprint import pprint
print('struct')
pprint(structure.pprint_list())
print('expr')
pprint(expr.pprint_list())
self.assertEqual(expr, structure)
def test__expression_singleparen(self):
self.set_stream('(1) + (2)')
structure = make_tree(
(AdditionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_singleparen_unary(self):
self.set_stream('(1) + (-2)')
structure = make_tree(
(AdditionExpression, (
(MonoExpression, None),
(NegativeExpression, (MonoExpression, None))
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_double_and_tripleparen(self):
self.set_stream('((1)) + (((2)))')
structure = make_tree(
(AdditionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_double_and_tripleparen_unary(self):
self.set_stream('(1) + (-2)')
structure = make_tree(
(AdditionExpression, (
(MonoExpression, None),
(NegativeExpression, (MonoExpression, None))
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_paren_precedence(self):
self.set_stream('(1 + 2) * 3')
structure = make_tree(
(MultiplicationExpression, (
(AdditionExpression, (
(MonoExpression, None),
(MonoExpression, None)
)),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_paren_nesting(self):
self.set_stream('(1 + (2 - 3)) * 4')
structure = make_tree(
(MultiplicationExpression, (
(AdditionExpression, (
(MonoExpression, None),
(SubtractionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_paren_operation(self):
self.set_stream('(1 - 2) * (3 + 4)')
structure = make_tree(
(MultiplicationExpression, (
(SubtractionExpression, (
(MonoExpression, None),
(MonoExpression, None)
)),
(AdditionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_addition(self):
self.set_stream('1 + 2')
structure = make_tree(
(AdditionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_subtraction(self):
self.set_stream('1 - 2')
structure = make_tree(
(SubtractionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_multiplication(self):
self.set_stream('1 * 2')
structure = make_tree(
(MultiplicationExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_division(self):
self.set_stream('1 / 2')
structure = make_tree(
(DivisionExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_power(self):
self.set_stream('1 ^ 2')
structure = make_tree(
(PowerExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_EQ(self):
self.set_stream('1 == 2')
structure = make_tree(
(EqualExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_NE(self):
self.set_stream('1 != 2')
structure = make_tree(
(NotEqualExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_AND(self):
self.set_stream('1 & 2')
structure = make_tree(
(AndExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_OR(self):
self.set_stream('1 | 2')
structure = make_tree(
(OrExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_GT(self):
self.set_stream('1 > 2')
structure = make_tree(
(GreaterThanExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_LT(self):
self.set_stream('1 < 2')
structure = make_tree(
(LessThanExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_GE(self):
self.set_stream('1 >= 2')
structure = make_tree(
(GreaterThanEqualExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_LE(self):
self.set_stream('1 <= 2')
structure = make_tree(
(LessThanEqualExpression, (
(MonoExpression, None),
(MonoExpression, None)
))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_negative(self):
self.set_stream('-1')
structure = make_tree(
(NegativeExpression, (MonoExpression, None))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_positive(self):
self.set_stream('+1')
structure = make_tree(
(PositiveExpression, (MonoExpression, None))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_not(self):
self.set_stream('!1')
structure = make_tree(
(NotExpression, (MonoExpression, None))
)
expr = self.tree._expression(self.tree._stream.current)
self.assertEqual(expr, structure)
def test__expression_raises_missing(self):
self.set_stream('1 +')
with self.assertRaises(PyllowSyntaxError):
self.tree._expression(self.tree._stream.current)
def test__expression_raises_double(self):
self.set_stream('1 * * 2')
with self.assertRaises(PyllowSyntaxError):
self.tree._expression(self.tree._stream.current)
def test__expression_raises_empty_paren(self):
self.set_stream('1 * () * 2')
with self.assertRaises(PyllowSyntaxError):
self.tree._expression(self.tree._stream.current)
def test__expression_raises_invalid_unary(self):
self.set_stream('* 2')
with self.assertRaises(PyllowSyntaxError):
self.tree._expression(self.tree._stream.current)
def test__expression_raises_invalid_token(self):
self.set_stream('if + 2')
with self.assertRaises(PyllowSyntaxError):
self.tree._expression(self.tree._stream.current)
def test__assignment(self):
self.set_stream('id = expr')
structure = make_tree(
(AssignStatement, (MonoExpression, None))
)
expr = self.tree._assignment()
self.assertEqual(expr, structure)
self.assertTrue(expr.id, 'id')
def test__assignment_no_id(self):
self.set_stream('1')
self.assertFalse(self.tree._assignment())
def test__assignment_no_assign(self):
self.set_stream('id')
self.assertFalse(self.tree._assignment())
def test__assignment_raises_no_expression(self):
self.set_stream('id = ')
with self.assertRaises(PyllowSyntaxError):
self.tree._assignment()
def test__assignment_raises_invalid_expression(self):
self.set_stream('id = if')
with self.assertRaises(PyllowSyntaxError):
self.tree._assignment()
def test__get_block(self):
self.set_stream('{assign1 = 1 assign2 = 2}')
structure = make_tree(
(BlockNode, (
(AssignStatement, (MonoExpression, None)),
(AssignStatement, (MonoExpression, None))
))
)
block = self.tree._get_block()
self.assertEqual(block, structure)
def test__if(self):
self.set_stream('if true {}')
self.tree._accept(CD.IF, attr='value')
expr = self.tree._if()
self.assertFalse(expr.alt)
self.assertTrue(expr.condition.value)
def test__if_block(self):
self.set_stream('if true {test = 0}')
self.tree._accept(CD.IF, attr='value')
structure = make_tree(
(AssignStatement, (MonoExpression, None))
)
expr = self.tree._if()
self.assertEqual(expr.block.children[0], structure)
def test__if_condition(self):
self.set_stream('if 1 == 0 | 1 != 0 {}')
self.tree._accept(CD.IF, attr='value')
structure = make_tree(
(OrExpression, (
(EqualExpression, (
(MonoExpression, None),
(MonoExpression, None))
),
(NotEqualExpression, (
(MonoExpression, None),
(MonoExpression, None))
)
))
)
expr = self.tree._if()
self.assertEqual(expr.condition, structure)
def test__if_else(self):
self.set_stream('if true {} else {test = 0}')
self.tree._accept(CD.IF, attr='value')
structure = make_tree(
(AssignStatement, (MonoExpression, None))
)
expr = self.tree._if()
self.assertEqual(expr.alt.children[0], structure)
def test__if_else_if(self):
self.set_stream('if true {} else if true {test = 0}')
self.tree._accept(CD.IF, attr='value')
structure = make_tree(
(AssignStatement, (MonoExpression, None))
)
expr = self.tree._if()
self.assertEqual(expr.alt.block.children[0], structure)
# Missing _statement, _call because their logic is not finished
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JonasUJ/urlshort",
"score": 2
} |
#### File: urlshort/urlshort/utils.py
```python
from django.core.mail import EmailMessage
from .models import ShortUrl
from .settings import EMAIL_HOST_USER
from .link_actions import get_id
EMAIL_CONTACT_FORMAT = """
Afsender: {}
Afsender email: {}
Emne: {}
Besked: {}"""
ERRORS_API = (
'',
'could not determine action based on passed parameters (are you missing \'key\'?)',
'url with name \'{}\' does not exist',
'url with name \'{}\' already exists',
'\'{}\' is not a valid URL',
'\'{}\' does not point to a reachable address',
'\'{}\' contains illegal characters or is too long',
'\'{}\' is not an allowed link',
'wrong key',
'max length of 256 for \'reason\' exceeded',
'cannot edit the link of an unsafe picourl'
)
ERRORS_HUMAN = (
'',
'Der skete en ukendt fejl.',
'En picourl med det navn eksistere ikke.',
'En picourl med det navn eksistere allerede.',
'Linket er ikke en valid URL',
'Serveren bag linket svarer ikke',
'Navnet er ikke tilladt',
'Linket er ikke tilladt',
'Forkert nøgle',
'Den maksimale længde af begrundelsen er opnået',
'Linket på en usikker picourl kan ikke ændres'
)
def send_contact_email(contact_form):
mail = EmailMessage(
f'[Picourl/Contact] {contact_form.cleaned_data.get("emne", "ikke angivet")}',
EMAIL_CONTACT_FORMAT.format(
contact_form.cleaned_data.get("navn", "ikke angivet"),
contact_form.cleaned_data.get("email"),
contact_form.cleaned_data.get("emne", "ikke angivet"),
contact_form.cleaned_data.get("besked")),
to=[EMAIL_HOST_USER])
mail.send(False)
def urlname_exists(urlname):
# pylint: disable=no-member
return ShortUrl.objects.filter(pk=get_id(urlname)).exists()
```
#### File: urlshort/urlshort/views.py
```python
import json
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.core.mail import EmailMessage
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.views.decorators.http import require_safe, require_http_methods
from django.views.decorators.csrf import csrf_exempt
from . import models, utils
from .link_actions import get_urlname, is_picourl, get_id, only_allowed_chars
from .settings import EMAIL_HOST_USER, EMAIL_ADMIN_USER
from .api import responseFromQuery, retrieve, edit, delete
from .forms import ContactForm, LookupEditForm
@require_http_methods(['GET', 'POST', 'HEAD'])
def main(request):
return render(request, 'main_page.html')
@require_safe
def link(request, name):
try:
if len(name) > 9 or not only_allowed_chars(name):
return handler404(request, 'invalid', name=name)
# pylint: disable=no-member
url = models.ShortUrl.objects.get(pk=get_id(name))
except ObjectDoesNotExist:
return handler404(request, 'nourl', name=name)
if not url.active.is_active:
return render(request, 'deactivated_page.html', {'reason': url.active.reason})
if url.is_safe == models.SafeState.NO:
return render(request, 'unsafe_page.html', {'link': url.link})
url.uses += 1
url.save()
return redirect(url)
@csrf_exempt
@require_http_methods(['GET', 'POST', 'DELETE'])
def api(request):
query = request.GET or request.POST
if query:
return JsonResponse(responseFromQuery(request, query.dict()))
elif request.method in ('POST', 'DELETE') and request.body:
try:
query = json.loads(request.body)
except json.JSONDecodeError:
return render(request, 'api_page.html')
return JsonResponse(responseFromQuery(request, query))
else:
return render(request, 'api_page.html')
@require_safe
def about(request):
return render(request, 'about_page.html')
@require_http_methods(['GET', 'POST', 'HEAD'])
def lookup(request):
urlname = request.GET.get('urlname')
if urlname:
if is_picourl(urlname):
urlname = get_urlname(urlname)
data = {}
error_msg = ''
success = False
form = LookupEditForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if form.is_valid():
submit = request.POST.get('submit', '')
success = True
res = {}
if submit == 'submit':
res = edit(request, {
'urlname': urlname,
'newlink': form.cleaned_data['link'],
'key': form.cleaned_data['nøgle']})
elif submit == 'delete':
res = delete(request, {
'urlname': urlname,
'key': form.cleaned_data['nøgle']})
elif submit == 'activate':
res = edit(request, {
'urlname': urlname,
'active': 'true',
'reason': '',
'key': form.cleaned_data['nøgle']})
elif submit == 'deactivate':
res = edit(request, {
'urlname': urlname,
'active': 'false',
'reason': 'Deaktiveret af ejer',
'key': form.cleaned_data['nøgle']})
else:
success = False
error_msg = 'Der skete en ukendt fejl.'
if success and res.get('error_code', 0) != 0:
error_msg = utils.ERRORS_HUMAN[res['error_code']]
if urlname:
if len(urlname) > 9 or not only_allowed_chars(urlname):
return handler404(request, 'invalid', name=urlname)
data = retrieve(request, {'urlname': urlname})
if data['error_code'] == 2:
return handler404(request, 'nourl', urlname)
form = LookupEditForm(
request.POST or None,
request.FILES or None,
initial={
'link': data['link'],
'antal_besøg': data['uses'],
'oprettet': data['created_at'],
'redigeret': data.get('edited_at', ''),
'deaktiveret_siden': data.get('deactivated_since', '')})
return render(request, 'lookup_page.html', {
'urlname': urlname,
'data': data,
'form': form,
'error': error_msg,
'success': success})
@require_http_methods(['GET', 'POST', 'HEAD'])
def contact(request):
success = False
form = ContactForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if form.is_valid():
utils.send_contact_email(form)
success = True
return render(request, 'contact_page.html', {
'success': success,
'form': form,
'admin': EMAIL_ADMIN_USER})
def handler404(request, exception, name=''):
resp = render(request, 'error_page.html', {
'error_code': 404,
'name': name,
'error_msg': 'Picourl ikke fundet' if exception == 'nourl' else 'Siden findes ikke'})
resp.status_code = 404
return resp
def handler500(request):
resp = render(request, 'error_page.html', {
'error_code': 500,
'error_msg': 'Intern serverfejl'})
resp.status_code = 500
return resp
``` |
{
"source": "jonasundderwolf/django-cms-helpers",
"score": 2
} |
#### File: django-cms-helpers/cms_helpers/cms_toolbars.py
```python
from cms.extensions.toolbar import ExtensionToolbar
from cms.utils import get_language_list
from django.utils.encoding import force_text
from django.utils.translation import get_language_info
class TitleExtensionToolbar(ExtensionToolbar):
model = None
insert_after = None
def get_item_position(self, menu):
position = None
for items in menu._memo.values():
for item in items:
if force_text(getattr(item, 'name', None)) in (
force_text(self.insert_after),
'{0}...'.format(self.insert_after)
):
position = menu._item_position(item) + 1
break
return position
def populate(self):
current_page_menu = self._setup_extension_toolbar()
if not current_page_menu or not self.page:
return
languages = get_language_list(self.current_site.pk)
is_single_lang = len(languages) < 2
position = self.get_item_position(current_page_menu)
urls = self.get_title_extension_admin()
page = self._get_page()
titleset = page.title_set.filter(language__in=languages)
if hasattr(self.toolbar, 'edit_mode_active'):
not_edit_mode = not self.toolbar.edit_mode_active
else:
not_edit_mode = not self.toolbar.edit_mode
extended_menu = current_page_menu if is_single_lang else (
current_page_menu.get_or_create_menu(
key='{0}_menu'.format(self.model._meta.db_table),
verbose_name=self.model._meta.verbose_name,
position=position, disabled=not_edit_mode))
nodes = [(title_extension, url, title) for (
(title_extension, url), title) in zip(urls, titleset)]
for title_extension, url, title in nodes:
item_position = position if is_single_lang else None
language_str = get_language_info(title.language)['name_translated']
name = '{0}{1}'.format(
'' if is_single_lang else (language_str + ' '),
self.model._meta.verbose_name)
extended_menu.add_modal_item(
name, url=url, disabled=not_edit_mode, position=item_position)
```
#### File: resources/cmsapp/models.py
```python
from cms.extensions import TitleExtension
from cms.extensions.extension_pool import extension_pool
from django.db import models
try:
from cms_helpers.filer_fields import FilerFileField
except ImportError:
FilerFileField = None
@extension_pool.register
class ExtensionModel(TitleExtension):
name = models.CharField(max_length=255)
class Meta:
verbose_name = 'Extension'
def __str__(self):
return self.name
if FilerFileField:
class FileModel(models.Model):
file1 = FilerFileField(null=True)
file2 = FilerFileField(blank=True)
file3 = FilerFileField()
``` |
{
"source": "jonasundderwolf/pytest-sentry",
"score": 2
} |
#### File: jonasundderwolf/pytest-sentry/pytest_sentry.py
```python
from __future__ import absolute_import
import os
import pytest
import wrapt
import sentry_sdk
from sentry_sdk.integrations import Integration
from sentry_sdk import Hub, capture_exception
from sentry_sdk.scope import add_global_event_processor
_ENVVARS_AS_TAGS = frozenset(
[
"GITHUB_WORKFLOW", # The name of the workflow.
"GITHUB_RUN_ID", # A unique number for each run within a repository. This number does not change if you re-run the workflow run.
"GITHUB_RUN_NUMBER", # A unique number for each run of a particular workflow in a repository. This number begins at 1 for the workflow's first run, and increments with each new run. This number does not change if you re-run the workflow run.
"GITHUB_ACTION", # The unique identifier (id) of the action.
"GITHUB_ACTOR", # The name of the person or app that initiated the workflow. For example, octocat.
"GITHUB_REPOSITORY", # The owner and repository name. For example, octocat/Hello-World.
"GITHUB_EVENT_NAME", # The name of the webhook event that triggered the workflow.
"GITHUB_EVENT_PATH", # The path of the file with the complete webhook event payload. For example, /github/workflow/event.json.
"GITHUB_WORKSPACE", # The GitHub workspace directory path. The workspace directory is a copy of your repository if your workflow uses the actions/checkout action. If you don't use the actions/checkout action, the directory will be empty. For example, /home/runner/work/my-repo-name/my-repo-name.
"GITHUB_SHA", # The commit SHA that triggered the workflow. For example, ffac537e6cbbf934b08745a378932722df287a53.
"GITHUB_REF", # The branch or tag ref that triggered the workflow. For example, refs/heads/feature-branch-1. If neither a branch or tag is available for the event type, the variable will not exist.
"GITHUB_HEAD_REF", # Only set for pull request events. The name of the head branch.
"GITHUB_BASE_REF", # Only set for pull request events. The name of the base branch.
"GITHUB_SERVER_URL", # Returns the URL of the GitHub server. For example: https://github.com.
"GITHUB_API_URL", # Returns the API URL. For example: https://api.github.com.
]
)
class PytestIntegration(Integration):
# Right now this integration type is only a carrier for options, and to
# disable the pytest plugin. `setup_once` is unused.
identifier = "pytest"
def __init__(self, always_report=None):
if always_report is None:
always_report = os.environ.get(
"PYTEST_SENTRY_ALWAYS_REPORT", ""
).lower() in ("1", "true", "yes")
self.always_report = always_report
@staticmethod
def setup_once():
@add_global_event_processor
def procesor(event, hint):
if Hub.current.get_integration(PytestIntegration) is None:
return event
for key in _ENVVARS_AS_TAGS:
value = os.environ.get(key)
if not value:
continue
event.tags["pytest_environ.{}".format(key)] = value
return event
class Client(sentry_sdk.Client):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dsn", os.environ.get("PYTEST_SENTRY_DSN", None))
kwargs.setdefault("traces_sample_rate", 1.0)
kwargs.setdefault("_experiments", {}).setdefault(
"auto_enabling_integrations", True
)
kwargs.setdefault("environment", "test")
kwargs.setdefault("integrations", []).append(PytestIntegration())
sentry_sdk.Client.__init__(self, *args, **kwargs)
def hookwrapper(itemgetter, **kwargs):
"""
A version of pytest.hookimpl that sets the current hub to the correct one
and skips the hook if the integration is disabled.
Assumes the function is a hookwrapper, ie yields once
"""
@wrapt.decorator
def _with_hub(wrapped, instance, args, kwargs):
item = itemgetter(*args, **kwargs)
hub = _resolve_hub_marker_value(item.get_closest_marker("sentry_client"))
if hub.get_integration(PytestIntegration) is None:
yield
else:
with hub:
gen = wrapped(*args, **kwargs)
while True:
try:
with hub:
chunk = next(gen)
y = yield chunk
with hub:
gen.send(y)
except StopIteration:
break
def inner(f):
return pytest.hookimpl(hookwrapper=True, **kwargs)(_with_hub(f))
return inner
def pytest_load_initial_conftests(early_config, parser, args):
early_config.addinivalue_line(
"markers",
"sentry_client(client=None): Use this client instance for reporting tests. You can also pass a DSN string directly, or a `Hub` if you need it.",
)
@hookwrapper(itemgetter=lambda item: item)
def pytest_runtest_protocol(item):
with sentry_sdk.push_scope() as scope:
scope.add_event_processor(_process_event)
yield
@hookwrapper(itemgetter=lambda item: item)
def pytest_runtest_call(item):
op = "pytest.runtest.call"
name = item.nodeid
# Assumption: Parameters are full of unreadable garbage and the test
# timings are going to be comparable. The product can then identify slowest
# runs anyway.
if name.endswith("]"):
params_start = name.rfind("[")
if params_start != -1:
name = name[:params_start]
with sentry_sdk.start_transaction(op=op, name=u"{} {}".format(op, name)):
yield
@hookwrapper(itemgetter=lambda fixturedef, request: request._pyfuncitem)
def pytest_fixture_setup(fixturedef, request):
op = "pytest.fixture.setup"
with sentry_sdk.start_transaction(
op=op, name=u"{} {}".format(op, fixturedef.argname)
):
yield
@hookwrapper(tryfirst=True, itemgetter=lambda item, call: item)
def pytest_runtest_makereport(item, call):
sentry_sdk.set_tag("pytest.result", "pending")
report = yield
sentry_sdk.set_tag("pytest.result", report.get_result().outcome)
if call.when == "call":
cur_exc_chain = getattr(item, "pytest_sentry_exc_chain", [])
if call.excinfo is not None:
item.pytest_sentry_exc_chain = cur_exc_chain = cur_exc_chain + [
call.excinfo
]
integration = Hub.current.get_integration(PytestIntegration)
if (cur_exc_chain and call.excinfo is None) or integration.always_report:
for exc_info in cur_exc_chain:
capture_exception((exc_info.type, exc_info.value, exc_info.tb))
DEFAULT_HUB = Hub(Client())
def _resolve_hub_marker_value(marker_value):
if marker_value is None:
marker_value = DEFAULT_HUB
else:
marker_value = marker_value.args[0]
if callable(marker_value):
marker_value = marker_value()
if marker_value is None:
# user explicitly disabled reporting
return Hub()
if isinstance(marker_value, str):
return Hub(Client(marker_value))
if isinstance(marker_value, dict):
return Hub(Client(**marker_value))
if isinstance(marker_value, Client):
return Hub(marker_value)
if isinstance(marker_value, Hub):
return marker_value
raise RuntimeError(
"The `sentry_client` value must be a client, hub or string, not {}".format(
repr(type(marker_value))
)
)
@pytest.fixture
def sentry_test_hub(request):
"""
Gives back the current hub.
"""
item = request.node
return _resolve_hub_marker_value(item.get_closest_marker("sentry_client"))
def _process_event(event, hint):
if "exception" in event:
for exception in event["exception"]["values"]:
if "stacktrace" in exception:
_process_stacktrace(exception["stacktrace"])
if "stacktrace" in event:
_process_stacktrace(event["stacktrace"])
return event
def _process_stacktrace(stacktrace):
for frame in stacktrace["frames"]:
frame["in_app"] = not frame["module"].startswith(
("_pytest.", "pytest.", "pluggy.")
)
```
#### File: pytest-sentry/tests/test_tracing.py
```python
from __future__ import absolute_import
import pytest
import sentry_sdk
import pytest_sentry
transactions = []
class MyTransport(sentry_sdk.Transport):
def __init__(self):
pass
def capture_envelope(self, envelope):
transactions.append(envelope.get_transaction_event())
pytestmark = pytest.mark.sentry_client(pytest_sentry.Client(transport=MyTransport()))
@pytest.fixture
def foo_fixture():
return 42
def test_basic(foo_fixture):
assert foo_fixture == 42
@pytest.fixture(scope="session", autouse=True)
def assert_report():
yield
self_transaction, fixture_transaction, test_transaction = transactions
assert self_transaction["type"] == "transaction"
assert self_transaction["transaction"] == "pytest.fixture.setup assert_report"
assert fixture_transaction["type"] == "transaction"
assert fixture_transaction["transaction"] == "pytest.fixture.setup foo_fixture"
assert test_transaction["type"] == "transaction"
assert (
test_transaction["transaction"]
== "pytest.runtest.call tests/test_tracing.py::test_basic"
)
``` |
{
"source": "jonasvandervennet/sudoku-generator",
"score": 3
} |
#### File: jonasvandervennet/sudoku-generator/execute.py
```python
import os
import time
from multiprocessing.dummy import Pool as ThreadPool
from sudoku.sudokupuzzle import SudokuPuzzle
from sudoku.textfiles import puzzle2text, show_errors_in_file
def fill_file(runs, filename='boards.txt'):
if isinstance(runs, list):
runs, filename, index = runs
print(f'Started thread {index}')
else:
index = None
for i in range(runs):
total_start = time.time()
sp = SudokuPuzzle(size=9, verbose=False, _diff=175)
total_time = time.time() - total_start
# print(f'Size: {sp.size}x{sp.size}; Diff: {sp.difficulty}\ncalc_time: {sp.calculation_time}ms; total_time: {total_time}s')
if index is not None:
print(f'{i+1} (thread {index}) time: {total_time}s')
else:
print(f'({i+1})time: {total_time}s')
puzzle2text(sp, filename=filename)
print('done')
def create_pool(amount=10, sudokus=1000, destination='boards.txt'):
mapping = [[sudokus, f'.boards{i+1}.txt', i + 1] for i in range(amount)]
# Make the Pool of workers
pool = ThreadPool(amount)
results = pool.imap(fill_file, mapping)
# close the pool and wait for the work to finish
pool.close()
pool.join()
print(results)
print('Saving results')
for i in range(amount):
filename = f'.boards{i+1}.txt'
with open(filename, 'r') as infile:
data = infile.readlines()
os.remove(filename)
with open(destination, 'a') as outfile:
for line in data:
outfile.write(line)
print('Done creating and saving sudokus')
start = time.time()
# create_pool(amount=10, sudokus=10, destination='boards.txt')
fill_file(10000)
print('Verifying created sudokus...')
print(show_errors_in_file())
print(f'Completion in {time.time() - start} seconds')
```
#### File: sudoku-generator/sudoku/sudoku.py
```python
import numpy as np
import random
import time
from sudoku.node import Node
class Sudoku():
def __init__(self, size=9, custom=None, verbose=False, debug=False):
# assume size is perfect square (TODO: assert square)
# size is defined as the length of one side
"""
Custom should be a list of lists containing each row of the sudoku.
Empty spots should be represented by a 0.
"""
self.verbose = verbose
self.debug = debug
self.size = size
self._tilesize = int(np.sqrt(size))
initstart = time.time()
self.nodes, self._rows, self._cols, self._tiles = self.initnodes()
self.connect_nodes()
after_init = time.time() - initstart
self.print(f'Node initialisation took {after_init}s')
if custom is not None:
startcustom = time.time()
self.fillgrid(custom)
self.print(f'Loading custom input took {time.time() - startcustom}s')
def get_all_rows(self):
return self._rows
def get_row(self, row):
return self._rows[row]
def get_col(self, col):
return self._cols[col]
def get_tile(self, tile):
return self._tiles[tile]
def initnodes(self):
nodes, rows, cols, tiles = [], [[] for _ in range(self.size)], [[] for _ in range(self.size)], [[] for _ in range(self.size)]
for row in range(self.size):
for col in range(self.size):
node = Node(row, col)
nodes.append(node)
rows[row].append(node)
cols[col].append(node)
# Tiles are for example the 3*3 squares in default sudoku
tilenr = self.calculate_tile(row, col)
tiles[tilenr].append(node)
return nodes, rows, cols, tiles
def calculate_tile(self, row, col):
tilerow = row // self._tilesize
tilecol = col // self._tilesize
return tilerow * self._tilesize + tilecol
def connect_nodes(self):
for node in self.nodes:
for connected_node in self.get_row(node.row) + self.get_col(node.col) + self.get_tile(self.calculate_tile(node.row, node.col)):
node.connected_nodes.add(connected_node)
node.connected_nodes -= set([node])
def fillgrid(self, custom):
try:
for i, row in enumerate(self._rows):
for j, node in enumerate(row):
if custom[i][j] != 0:
node.original = True
node.value = custom[i][j]
except IndexError:
raise IndexError("Custom sudoku layout was not of the right format!")
except Exception as e: # Other error, just raise
raise e
self.print("Custom input submitted and processed:")
self.print(self)
@property
def empty(self):
empty = 0
for node in self.nodes:
if node.value == 0:
empty += 1
self.print(f'{empty} empty values')
return empty
@property
def is_valid(self):
for node in self.nodes:
if not node.is_valid:
return False
return True
def print(self, msg):
if self.verbose:
print(msg)
def equals(self, other):
try:
for i, row in enumerate(self._rows):
for j, node in enumerate(row):
if not node.equals(other.get_row(i)[j]):
return False
except Exception:
return False
return True
def __eq__(self, other):
if not isinstance(other, Sudoku):
return False
return self.equals(other)
def __ne__(self, other):
if not isinstance(other, Sudoku):
return False
return not self.equals(other)
def copy(self):
"""
Returns new sudoku instance with new nodes containing the same values.
"""
custom_input = [[node.value for node in row] for row in self._rows]
self.print('Copying data into new Sudoku.')
newSudoku = Sudoku(size=self.size, custom=custom_input, verbose=self.verbose)
self.print('Verifying data of new Sudoku.')
# Check for original
for node in self.nodes:
for newnode in newSudoku.nodes:
if node.equals(newnode):
newnode.original = node.original
self.print('Data verified.\n')
return newSudoku
def get_options(self, node):
return list(set([i for i in range(1, self.size + 1)]) - node.get_neighbor_values())
def __str__(self):
result = ""
for row in self._rows:
result += str([node.value for node in row]) + '\n'
return result
def solve_smart(self, returnBranching=False, test_unique=False):
to_solve = self.copy()
# This needs to be an object to be easily modified in executeFill
unique = {'solved_once': False} # Used in testing uniqueness
def gather_best_node(sudoku):
"""
Searches nodes with least amount of options, selects one randomly
"""
best_nodes = []
current_min_options = sudoku.size
# Gather a list of nodes with the least
for node in sudoku.nodes:
if not node.value == 0:
continue
options = sudoku.get_options(node)
if len(options) < current_min_options:
# New best node found
best_nodes = [node]
current_min_options = len(options)
elif len(options) == current_min_options:
best_nodes.append(node)
return random.choice(best_nodes) if len(best_nodes) != 0 else None
def executeFill(depth=0):
if self.debug and depth % 50 == 0 and depth != 0:
to_solve.print(f'On rec depth {depth}')
to_solve.print(to_solve)
node = gather_best_node(to_solve)
if node is None:
return {'result': True, 'branchfactor': 1}
options = to_solve.get_options(node)
random.shuffle(options)
branch = 1 # for detetermining branch factor (difficulty)
for option in options:
node.value = option
results = executeFill(depth=depth + 1)
if results['result']:
if test_unique and unique['solved_once']:
# not unique, return as a valid response
return {'result': True}
elif test_unique and not unique['solved_once']:
# first solution found, keep searching
# while keeping track of solution found
unique['solved_once'] = True
continue
else:
if returnBranching:
branch = (branch - 1)**2
branch += results['branchfactor'] # keeping summation going
return {'result': True, 'branchfactor': branch}
branch += 1
# base case
node.value = 0
return {'result': False}
queue = [node for node in to_solve.nodes if not node.original]
if len(queue) == 0:
# The sudoku was already completely full, check if valid or not
if not to_solve.is_valid:
to_solve.print("Given solution is not valid!")
to_solve.print(to_solve)
return False
else:
to_solve.print("Success! Given solution was valid!")
to_solve.print(to_solve)
return True
to_solve.print('Trying to fill board...')
starttime = time.time()
executionResults = executeFill()
interval = time.time() - starttime
to_solve.calculation_time = interval * 1000 # Calc_time in ms
if (not executionResults['result']) or (not to_solve.is_valid):
if test_unique and unique['solved_once']:
return True
to_solve.print("Unable to fill board!")
raise Exception("Unable to fill board!")
else: # Successfully filled the board!
if test_unique:
return not unique['solved_once']
branchingFactor = executionResults.get('branchfactor', None)
to_solve.print("Filled board!")
to_solve.print(f"\nSolution:\n{to_solve}")
to_solve.print(f"Solution found in {interval}s")
if returnBranching:
return to_solve, branchingFactor
return to_solve
@property
def is_unique(self):
return self.solve_smart(test_unique=True)
def _reset_random_node(self):
random.choice(self.nodes).value = 0
return True
def make_puzzle(self, diff=500, retry=5):
if not self.is_valid:
# Self is assumed to be a filled grid
raise ValueError('Sudoku should be a filled grid in order to make a puzzle.')
puzzle = self.copy()
cur_diff = 0
tries = 0
while diff > cur_diff:
prev_diff = cur_diff
prev_puzzle = puzzle.copy()
puzzle._reset_random_node()
if not puzzle.is_unique:
# Puzzle was not unique anymore: if too many retries, return previous iteration
tries += 1
if tries > retry:
puzzle.print('Retried too much!')
return prev_puzzle, prev_diff
else:
puzzle, cur_diff = prev_puzzle, prev_diff
else:
tries = 0
cur_diff = puzzle.estimate_difficulty(iterations=50)
# Sometimes difficulty lowers, only take max diff
if (cur_diff < prev_diff):
puzzle, cur_diff = prev_puzzle, prev_diff
return puzzle, cur_diff
def _diff_from_branching(self, branching):
return branching * 100 + self.empty
def estimate_difficulty(self, iterations=20):
total = 0
for i in range(iterations):
total += self._diff_from_branching(self.solve_smart(returnBranching=True)[1])
return int(total / iterations)
``` |
{
"source": "jonasvdd/DS-python-data-analysis",
"score": 3
} |
#### File: notebooks/_solutions/case2_observations_processing18.py
```python
def transform_utm_to_wgs(row):
"""Converts the x and y coordinates
Parameters
----------
row : pd.Series
Single DataFrame row
Returns
-------
pd.Series with longitude and latitude
"""
transformer = Transformer.from_crs("EPSG:32612", "epsg:4326")
return pd.Series(transformer.transform(row['xutm'], row['yutm']))
``` |
{
"source": "JonasVervloet/RL-Coverage-Planner",
"score": 2
} |
#### File: RL-Coverage-Planner/deep_rl/trainer.py
```python
import numpy as np
import matplotlib.pyplot as plt
import torch
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'done'))
class DeepRLTrainer:
NB_EPISODES = 3000
SAVE_EVERY = 500
INFO_EVERY = 50
SOFT_MAX = False
DEVICE = 'cpu'
def __init__(self, environment, agent, save_path):
self.env = environment
self.agent = agent
self.save_path = save_path
self.total_rewards = []
self.avg_rewards = []
self.tiles_visited = []
self.avg_tiles_visited = []
self.nb_steps = []
self.avg_nb_steps = []
self.cc_counter = 0
self.nb_complete_cov = []
self.terrain_diffs = []
self.avg_terrain_diffs = []
def train(self):
for i in range(DeepRLTrainer.NB_EPISODES):
current_state = torch.tensor(self.env.reset(), dtype=torch.float,
device=DeepRLTrainer.DEVICE)
done = False
info = {}
self.agent.update_epsilon(i)
while not done:
action = self.agent.select_action(
current_state, soft_max=DeepRLTrainer.SOFT_MAX
)
n_state, reward, done, info = self.env.step(action)
action = torch.tensor(action, dtype=torch.int64,
device=DeepRLTrainer.DEVICE)
n_state = torch.tensor(n_state, dtype=torch.float,
device=DeepRLTrainer.DEVICE)
reward = torch.tensor(reward, dtype=torch.float,
device=DeepRLTrainer.DEVICE)
done = torch.tensor(done, dtype=torch.bool,
device=DeepRLTrainer.DEVICE)
self.agent.observe_transition(Transition(
current_state, action, n_state, reward, done
), device=DeepRLTrainer.DEVICE)
current_state = n_state
if info["full_cc"]:
self.cc_counter += 1
print(f"COMPLETE COVERAGE: {self.cc_counter}")
self.total_rewards.append(info["total_reward"])
self.nb_steps.append(info["nb_steps"])
self.tiles_visited.append(info["total_covered_tiles"])
self.nb_complete_cov.append(self.cc_counter)
self.terrain_diffs.append(info["total_pos_terr_diff"])
avg_start = 0 if i < DeepRLTrainer.SAVE_EVERY else -DeepRLTrainer.SAVE_EVERY
self.avg_rewards.append(np.average(self.total_rewards[avg_start:]))
self.avg_tiles_visited.append(np.average(self.tiles_visited[avg_start:]))
self.avg_nb_steps.append(np.average(self.nb_steps[avg_start:]))
self.avg_terrain_diffs.append(np.average(self.terrain_diffs[avg_start:]))
episode_nb = i + 1
if episode_nb % DeepRLTrainer.INFO_EVERY == 0:
print(f"Episode {episode_nb}")
print(f"average total reward: {self.avg_rewards[-1]}")
print(f"average nb steps: {self.avg_nb_steps[-1]}")
print(f"average nb tiles visited: {self.avg_tiles_visited[-1]}")
print(f"average positive terrain diff: {self.avg_terrain_diffs[-1]}")
print(f"epsilon: {self.agent.epsilon}")
print()
if episode_nb % DeepRLTrainer.SAVE_EVERY == 0:
x = range(episode_nb)
plt.clf()
plt.plot(x, self.total_rewards, x, self.avg_rewards)
plt.legend(['total rewards', 'average total rewards'])
plt.title('Total reward for every episode')
plt.savefig(self.save_path + f"rewards.png")
np.save(self.save_path + f"rewards.npy", self.total_rewards)
np.save(self.save_path + f"avg_rewards.npy", self.avg_rewards)
plt.clf()
plt.plot(x, self.tiles_visited, x, self.avg_tiles_visited)
plt.legend(['nb tiles visited', 'average nb tile visited'])
plt.title('Number of tiles visited for every episode')
plt.savefig(self.save_path + f"tiles_visited.png")
np.save(self.save_path + f"tiles_visited.npy", self.tiles_visited)
np.save(self.save_path + f"avg_tiles_visited.npy", self.avg_tiles_visited)
plt.clf()
plt.plot(x, self.nb_steps, x, self.avg_nb_steps)
plt.legend(['nb steps', 'average nb steps'])
plt.title('Number of steps for every episode')
plt.savefig(self.save_path + f"nb_steps.png")
np.save(self.save_path + f"nb_steps.npy", self.nb_steps)
np.save(self.save_path + f"avg_nb_steps.npy", self.avg_nb_steps)
plt.clf()
plt.plot(x, self.nb_complete_cov)
plt.legend(['nb complete coverage runs'])
plt.title('Nb of complete coverage runs')
plt.savefig(self.save_path + f"nb_complete_covs.png")
np.save(self.save_path + f"nb_complete_covs.npy", self.nb_complete_cov)
plt.clf()
plt.plot(x, self.terrain_diffs, x, self.avg_terrain_diffs)
plt.legend(['terrain differences', 'average terrain differences'])
plt.title('Total terrain differences for every episode')
plt.savefig(self.save_path + f"terrain_diffs.png")
np.save(self.save_path + f"terrain_diffs.npy", self.terrain_diffs)
np.save(self.save_path + f"avg_terrain_diffs.npy", self.avg_terrain_diffs)
self.agent.save(self.save_path, episode_nb)
```
#### File: RL-Coverage-Planner/environments/env_representation.py
```python
import numpy as np
import json
from turorials.perlin_noise.obstacle_generation import flood_grid
class EnvironmentRepresentation:
def __init__(self):
self.obstacle_map = None
self.terrain_map = None
self.start_positions = None
self.nb_free_tiles = 0
self.dim = (8, 8)
self.extra_spacing = (0, 0)
def set_dimension(self, n_dim):
self.dim = n_dim
def set_extra_spacing(self, n_spacing):
self.extra_spacing = n_spacing
def get_dimension(self):
return self.dim
def get_obstacle_map(self, extra_spacing=False):
if not extra_spacing:
x_tot, y_tot = self.obstacle_map.shape
return self.obstacle_map[
self.extra_spacing[0]:x_tot-self.extra_spacing[0],
self.extra_spacing[1]:y_tot-self.extra_spacing[1]
]
else:
return self.obstacle_map
def get_terrain_map(self, extra_spacing=False):
if not extra_spacing:
x_tot, y_tot = self.obstacle_map.shape
return self.terrain_map[
self.extra_spacing[0]:x_tot-self.extra_spacing[0],
self.extra_spacing[1]:y_tot-self.extra_spacing[1]
]
else:
return self.terrain_map
def has_terrain_info(self):
return self.terrain_map is not None
def save(self, path, name):
json_to_save = {}
obstacle_path = f"{path}{name}_obstacle_grid.npy"
np.save(obstacle_path, self.obstacle_map)
json_to_save['obstacle_grid'] = obstacle_path
json_to_save['terrain_grid'] = None
if self.terrain_map is not None:
terrain_path = f"{path}{name}_terrain_grid.npy"
np.save(terrain_path, self.terrain_map)
json_to_save['terrain_grid'] = terrain_path
json_to_save['start_positions'] = self.start_positions
json_to_save['nb_free_tiles'] = self.nb_free_tiles
with open(f'{path}{name}.txt', 'w') as output_file:
json.dump(json_to_save, output_file)
def load(self, path, name):
with open(f'{path}{name}.txt') as input_file:
input_data = json.load(input_file)
obstacle_path = input_data['obstacle_grid']
self.obstacle_map = np.load(obstacle_path)
terrain_path = input_data['terrain_grid']
if terrain_path is not None:
self.terrain_map = np.load(terrain_path)
start_positions_array = np.array(input_data['start_positions'])
self.start_positions = [pos for pos in zip(start_positions_array[:, 0], start_positions_array[:, 1])]
self.nb_free_tiles = input_data['nb_free_tiles']
class GeneralEnvironmentRepresentation:
def __init__(self, n_obstacle_map, nb_free_tiles, stat_positions,
n_terrain_map, extra_spacing=0):
assert(n_obstacle_map.shape == n_terrain_map.shape)
self.extra_spacing = extra_spacing
self.obstacle_map = n_obstacle_map
self.nb_free_tiles = nb_free_tiles
self.start_positions = stat_positions
self.terrain_map = n_terrain_map
def get_nb_free_tiles(self):
return self.nb_free_tiles
def get_start_positions(self):
return self.start_positions
def get_obstacle_map(self, extra_spacing=0):
assert(extra_spacing <= self.extra_spacing)
offset = self.extra_spacing - extra_spacing
x_tot, y_tot = self.obstacle_map.shape
return self.obstacle_map[
offset:x_tot - offset,
offset:y_tot - offset
]
def get_terrain_map(self, extra_spacing=0):
assert (extra_spacing <= self.extra_spacing)
offset = self.extra_spacing - extra_spacing
x_tot, y_tot = self.terrain_map.shape
return self.terrain_map[
offset:x_tot-offset,
offset:y_tot-offset
]
def save(self, path, name):
json_to_save = {}
obstacle_path = f"{path}{name}_obstacle_grid.npy"
np.save(obstacle_path, self.obstacle_map)
json_to_save['obstacle_grid'] = obstacle_path
terrain_path = f"{path}{name}_terrain_grid.npy"
np.save(terrain_path, self.terrain_map)
json_to_save['terrain_grid'] = terrain_path
json_to_save['start_positions'] = self.start_positions
json_to_save['nb_free_tiles'] = self.nb_free_tiles
json_to_save['extra_spacing'] = self.extra_spacing
with open(f'{path}{name}.txt', 'w') as output_file:
json.dump(json_to_save, output_file)
def load(self, path, name):
with open(f'{path}{name}.txt') as input_file:
input_data = json.load(input_file)
obstacle_path = input_data['obstacle_grid']
self.obstacle_map = np.load(obstacle_path)
terrain_path = input_data['terrain_grid']
self.terrain_map = np.load(terrain_path)
start_positions_array = np.array(input_data['start_positions'])
self.start_positions = [pos for pos in zip(start_positions_array[:, 0], start_positions_array[:, 1])]
self.nb_free_tiles = input_data['nb_free_tiles']
self.extra_spacing = input_data['extra_spacing']
if __name__ == "__main__":
save_path = "D:/Documenten/Studie/2020-2021/Masterproef/Reinforcement-Learner-For-Coverage-Path-Planning/data/"
name = "test_grid.npy"
obstacle_grid = np.load(save_path + name)
env_repr = EnvironmentRepresentation()
env_repr.obstacle_map = obstacle_grid
regions = flood_grid(obstacle_grid)
if regions[0][0] == 0:
env_repr.start_positions = regions[0][1]
env_repr.nb_free_tiles = len(regions[0][1]) + len(regions[0][2])
print(regions[0][1])
if regions[1][0] == 0:
env_repr.start_positions = regions[1][1]
env_repr.nb_free_tiles = len(regions[1][1]) + len(regions[1][2])
print(regions[1][1])
env_repr.save(save_path, "test_representation")
env_repr2 = EnvironmentRepresentation()
env_repr2.load(save_path, "test_representation")
print(env_repr2.nb_free_tiles)
print(env_repr2.start_positions)
```
#### File: JonasVervloet/RL-Coverage-Planner/train.py
```python
import sys, getopt
import torch.optim as optim
import pprint
import json
from networks.simple_q_network import SimpleDeepQNetworkGenerator
from networks.simple_q_network import SimpleDeepQNetworkGenerator2
from deep_rl.deep_q_agent import DeepQAgent
from deep_rl.double_dqn_agent import DoubleDeepQAgent
from load import load_arguments, default_arguments, initialize_objects
from load import GENERATORS, AGENTS, OPTIMIZERS
SHORT_OPTIONS = ""
LONG_OPTIONS = [
"loadAgent=",
"loadArguments=",
"disableCuda",
"dim=",
"hFreq=",
"oFreq=",
"fillRatio=",
"loadEnv=",
"agentSize=",
"fov=",
"turn",
"terrain",
"movePunish=",
"terrainPunish=",
"obstaclePunish=",
"discoverReward=",
"coverageReward=",
"maxStepMultiplier=",
"gamma=",
"networkGen=",
"rlAgent=",
"epsilonDecay=",
"targetUpdate=",
"queueLength=",
"optim=",
"lr=",
"nbEpisodes=",
"printEvery=",
"saveEvery=",
"savePath="
]
def main(argv):
try:
options, args = getopt.getopt(argv, SHORT_OPTIONS, LONG_OPTIONS)
except getopt.GetoptError:
print("badly formatted command line arguments")
arguments = default_arguments()
for option, argument in options:
if option == "--loadAgent":
argument_split = argument.split(",")
arguments.update(load_arguments(argument_split[0], "arguments"))
arguments["loadPath"] = argument_split[0]
arguments["loadEpisode"] = int(argument_split[1])
if option == "--loadArguments":
argument_split = argument.split(",")
arguments.update(load_arguments(argument_split[0], argument_split[1]))
if option == "--disableCuda":
arguments["cuda"] = False
if option == "--dim":
arguments["dim"] = tuple(tuple(map(int, argument.split(","))))
if option == "--hFreq":
arguments["hFreq"] = tuple(map(int, argument.split(",")))
if option == "--oFreq":
arguments["oFreq"] = tuple(map(int, argument.split(",")))
if option == "--fillRatio":
arguments["fillRatio"] = float(argument)
if option == "--loadEnv":
arguments["loadEnv"] = tuple(argument.split(","))
if option == "--agentSize":
arguments["agentSize"] = int(argument)
if option == "--fov":
arguments["fov"] = int(argument)
if option == "--turn":
arguments["turn"] = True
if option == "--terrain":
arguments["terrain"] = True
if option == "--movePunish":
arguments["movePunish"] = float(argument)
if option == "--terrainPunish":
arguments["terrainPunish"] = float(argument)
if option == "--obstaclePunish":
arguments["obstaclePunish"] = float(argument)
if option == "--discoverReward":
arguments["discoverReward"] = float(argument)
if option == "--coverageReward":
arguments["coverageReward"] = float(argument)
if option == "--maxStepMultiplier":
arguments["maxStepMultiplier"] = int(argument)
if option == "--gamma":
arguments["gamma"] = float(argument)
assert(float(argument) <= 1.0)
if option == "--networkGen":
if argument in GENERATORS:
arguments["networkGen"] = argument
else:
raise Exception("TRAIN.py: given network generator is not defined...")
if option == "--optim":
if argument in OPTIMIZERS:
arguments["optim"] = argument
else:
raise Exception("TRAIN.py: given optimizer is not defined...")
if option == "--lr":
arguments["lr"] = float(argument)
if option == "--rlAgent":
if argument in AGENTS:
arguments["rlAgent"] = argument
else:
raise Exception("TRAIN.py: given agent is not defined...")
if option == "--epsilonDecay":
arguments["epsilonDecay"] = int(argument)
if option == "--targetUpdate":
arguments["targetUpdate"] = int(argument)
if option == "--queueLength":
arguments["queueLength"] = int(argument)
if option == "--nbEpisodes":
arguments["nbEpisodes"] = int(argument)
if option == "--printEvery":
arguments["printEvery"] = int(argument)
if option == "--saveEvery":
arguments["saveEvery"] = int(argument)
if option == "--savePath":
arguments["savePath"] = argument
env, agent, trainer = initialize_objects(arguments, trainer_required=True)
with open(f"{arguments['savePath']}arguments.txt", 'w') as output_file:
json.dump(arguments, output_file)
print("ARGUMENTS:")
pprint.pprint(arguments)
trainer.train()
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: turorials/perlin_noise/obstacle_generation.py
```python
import matplotlib.pyplot as plt
import numpy as np
from turorials.perlin_noise.open_simplex import generate_noise_map
# DIM = (8, 8)
DIM = (16, 16)
# DIM = (32, 32)
# FILL_RATIO = 0.30
FILL_RATIO = 0.35
# FILL_RATIO = 0.40
BOUNDARY_SIZE = 1
# NB_SMOOTHING = 2
NB_SMOOTHING = 2
# NB_SMOOTHING = 3
SMOOTH_SIZE = 1
RULE_THRESHOLD = 4
NB_GRIDS = 10
def generate_obstacle_grid():
random_grid = np.random.rand(DIM[0], DIM[1])
clipped_grid = np.zeros(DIM)
clipped_grid[random_grid < FILL_RATIO] = 1
clipped_grid[:BOUNDARY_SIZE, :] = 1
clipped_grid[:, :BOUNDARY_SIZE] = 1
clipped_grid[-BOUNDARY_SIZE:, :] = 1
clipped_grid[:, -BOUNDARY_SIZE:] = 1
for i in range(NB_SMOOTHING):
bound_x_low = BOUNDARY_SIZE
bound_y_low = BOUNDARY_SIZE
bound_x_high = DIM[0] - BOUNDARY_SIZE
bound_y_high = DIM[1] - BOUNDARY_SIZE
neighbor_sum = np.zeros((DIM[0] - 2 * BOUNDARY_SIZE, DIM[1] - 2 * BOUNDARY_SIZE))
for delta_x in range(-SMOOTH_SIZE, SMOOTH_SIZE + 1):
for delta_y in range(-SMOOTH_SIZE, SMOOTH_SIZE + 1):
if delta_x == 0 and delta_y == 0:
continue
neighbor_sum += clipped_grid[
bound_x_low + delta_x: bound_x_high + delta_x,
bound_y_low + delta_y: bound_y_high + delta_y
]
clipped_grid_middle = clipped_grid[bound_x_low:bound_x_high, bound_y_low:bound_y_high]
clipped_grid_middle[neighbor_sum > RULE_THRESHOLD] = 1
clipped_grid_middle[neighbor_sum < RULE_THRESHOLD] = 0
clipped_grid[bound_x_low:bound_x_high, bound_y_low:bound_y_high] = clipped_grid_middle
return clipped_grid
def flood_grid(grid):
visited_tiles = np.zeros(grid.shape)
regions = []
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
if visited_tiles[(i, j)] == 1:
continue
region = flood_tile(grid, visited_tiles, (i, j))
regions.append(region)
return regions
def flood_tile(grid, visited_tiles, tile):
tile_value = grid[tile]
middle_tiles = []
border_tiles = []
queue = []
border = grow_queue(grid, visited_tiles, queue, tile, tile_value)
if border:
border_tiles.append(tile)
else:
middle_tiles.append(tile)
visited_tiles[tile] = 1
while len(queue) != 0:
n_tile = queue.pop(0)
border = grow_queue(grid, visited_tiles, queue, n_tile, tile_value)
if border:
border_tiles.append(n_tile)
else:
middle_tiles.append(n_tile)
visited_tiles[n_tile] = 1
return [tile_value, border_tiles, middle_tiles]
def grow_queue(grid, visited_tiles, queue, tile, tile_value):
border_tile = False
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
n_tile_x = tile[0] + i
n_tile_y = tile[1] + j
if n_tile_x < 0 or n_tile_y < 0:
continue
if n_tile_x == grid.shape[0] or n_tile_y == grid.shape[1]:
continue
if grid[n_tile_x, n_tile_y] == tile_value:
if visited_tiles[(n_tile_x, n_tile_y)] == 1:
continue
if (n_tile_x, n_tile_y) in queue:
continue
queue.append((n_tile_x, n_tile_y))
else:
border_tile = True
return border_tile
if __name__ == "__main__":
for i in range(NB_GRIDS):
print(f"GRID NB {i}")
grid = generate_obstacle_grid()
plt.imshow(grid, cmap="gray")
plt.show()
regions = flood_grid(grid)
print(regions)
masked_grid = np.copy(grid)
for region in regions:
region_value = region[0]
middle_tiles = region[2]
middle_color = 0.25 if region_value == 0 else 0.75
for tile in middle_tiles:
masked_grid[tile] = middle_color
fig, axs = plt.subplots(2)
axs[0].imshow(grid, cmap='gray')
axs[1].imshow(masked_grid, cmap='gray')
plt.show()
```
#### File: turorials/perlin_noise/open_simplex.py
```python
from opensimplex import OpenSimplex
import matplotlib.pyplot as plt
import numpy as np
MAX_NOISE_SEED = 1024
MAX_OFFSET = 1024.0
def generate_noise_map(dim, res, seed, nb_octaves, persistence=0.5):
np.random.seed(seed)
noise_generator = OpenSimplex(np.random.randint(0, MAX_NOISE_SEED))
image = np.zeros(dim)
for octave_nb in range(nb_octaves):
offset = np.random.random((2,)) * MAX_OFFSET
octave_res = np.array(res) * (2**octave_nb)
amplitude = 1.0 * (persistence ** octave_nb)
print(octave_res)
print(amplitude)
image += amplitude * generate_simple_noise_map(dim, octave_res, offset, noise_generator)
return image / np.max(np.abs(image))
def generate_simple_noise_map(dim, res, offset, generator):
return np.array([[
generator.noise2d(x + offset[0], y + offset[1])
for x in np.arange(0, res[0], res[0]/dim[0])]
for y in np.arange(0, res[1], res[1]/dim[1])]
)
if __name__ == "__main__":
fig, axs = plt.subplots(2, 2)
image1 = generate_noise_map((256, 256), (5, 5), 10, 2, 0.25)
axs[0][0].imshow(image1, cmap='gray')
image2 = generate_noise_map((256, 256), (5, 5), 10, 2, 0.5)
axs[0][1].imshow(image2, cmap='gray')
image3 = generate_noise_map((256, 256), (5, 5), 10, 2, 0.75)
axs[1][0].imshow(image3, cmap='gray')
image4 = generate_noise_map((256, 256), (5, 5), 10, 2, 1.0)
axs[1][1].imshow(image4, cmap='gray')
plt.show()
```
#### File: turorials/pygame/blob_pygame.py
```python
import pygame
import numpy as np
from turorials.pygame.environment import Environment
FPS = 2
NB_TILES = 4
TILE_WIDTH = 100
TILE_BORDER = 3
EXTRA_SPACING = 100
FIELD_OF_VIEW = 1
DIMENSION = NB_TILES * (TILE_WIDTH + TILE_BORDER) + TILE_BORDER + EXTRA_SPACING
BORDER_COLOR = "tan"
TILE_COLOR = "moon_glow"
VISITED_COLOR = ""
CURRENT_COLOR = "white"
OBSTACLE_COLOR = "coffee_brown"
VIEW_COLOR = "magenta"
OBSTACLES = np.array([
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 1],
[1, 0, 0, 0]
])
START_POS = [
(0, 0), (0, 1), (0, 2)
]
current_pos = (1, 2)
env = Environment(OBSTACLES, START_POS)
COLORS = {
"white": (255, 255, 255),
"black": (0, 0, 0),
"maroon4": (139, 28, 98, 255),
"magenta": (255,0,230),
"forest_green": (0,50,0),
"tan": (230,220,170),
"coffee_brown": (200,190,140),
"moon_glow": (235,245,255)
}
pygame.init()
screen = pygame.display.set_mode((DIMENSION, DIMENSION))
screen.fill(COLORS["white"])
clock = pygame.time.Clock()
def draw_tiles(surface, current_pos, visited_tiles, obstacles):
offset = EXTRA_SPACING / 2
for i in range(NB_TILES):
x = offset + i * (TILE_WIDTH + TILE_BORDER)
for j in range(NB_TILES):
y = offset + j * (TILE_WIDTH + TILE_BORDER)
color = COLORS[TILE_COLOR]
if (i, j) == current_pos:
color = COLORS[CURRENT_COLOR]
elif visited_tiles[i, j] == 1:
color = COLORS[VISITED_COLOR]
elif obstacles[i, j] == 1:
color = COLORS[OBSTACLE_COLOR]
tile_square = pygame.Rect(y + TILE_BORDER, x + TILE_BORDER, TILE_WIDTH, TILE_WIDTH)
pygame.draw.rect(surface, color, tile_square)
def draw_fov(surface, current_pos, fov):
fov_x = max(offset + (TILE_WIDTH + TILE_BORDER) * (current_pos[0] - fov), offset)
fov_y = max(offset + (TILE_WIDTH + TILE_BORDER) * (current_pos[1] - fov), offset)
width = (fov * 2 + 1) * (TILE_WIDTH + TILE_BORDER) + TILE_BORDER
height = (fov * 2 + 1) * (TILE_WIDTH + TILE_BORDER) + TILE_BORDER
fov_square = pygame.Rect(fov_y, fov_x, width, height)
pygame.draw.rect(surface, COLORS[VIEW_COLOR], fov_square)
border_square = pygame.Rect(fov_y + TILE_BORDER, fov_x + TILE_BORDER, width - 2*TILE_BORDER, height - 2*TILE_BORDER)
pygame.draw.rect(surface, COLORS[BORDER_COLOR], border_square)
def draw_state(surface, state):
offset = EXTRA_SPACING / 2
width = (TILE_WIDTH + TILE_BORDER) * NB_TILES + TILE_BORDER
height = (TILE_WIDTH + TILE_BORDER) * NB_TILES + TILE_BORDER
border_square = pygame.Rect(offset, offset, width, height)
pygame.draw.rect(screen, COLORS[BORDER_COLOR], border_square)
current_pos, visited_tiles, obstacles = state
draw_tiles(surface, current_pos, visited_tiles, obstacles)
running = True
done = False
state = env.reset()
while running:
enter_pressed = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
enter_pressed = True
if not done:
action = np.random.randint(4)
state, _, done = env.step(action)
else:
print("waiting for enter")
if enter_pressed:
state = env.reset()
done = False
draw_state(screen, state)
pygame.display.update()
clock.tick(FPS)
pygame.quit()
```
#### File: turorials/taxi/taxi.py
```python
import gym
import numpy as np
import matplotlib.pyplot as plt
NB_EPISODES = 10000
PRINT_EVERY = 1000
ALPHA = 0.1
DISCOUNT = 0.9
EPSILON_START = 0.99
MIN_EPSILON = 0.01
EPSILON_DECAY = 1.0/NB_EPISODES
env = gym.make("Taxi-v3")
OBS_SPACE = env.observation_space.n
ACT_SPACE = env.action_space.n
print(OBS_SPACE)
print(ACT_SPACE)
def select_action(table, s, eps):
if np.random.random() > eps:
return np.argmax(table[s])
else:
return np.random.randint(0, ACT_SPACE)
def update_q_value(table, s, a, r, s_n):
max_next = np.max(table[s_n])
curr_value = table[s][a]
table[s][a] = curr_value + ALPHA * (r + DISCOUNT * max_next - curr_value)
return table
def decay_epsilon(eps):
return max(MIN_EPSILON, eps - EPSILON_DECAY)
q_table = np.zeros((OBS_SPACE, ACT_SPACE))
epsilon = EPSILON_START
nbs_steps = []
avg_nbs_steps = []
total_rewards = []
avg_total_rewards = []
print(q_table.shape)
for episode in range(NB_EPISODES):
current_state = env.reset()
nb_steps = 0
total_reward = 0
done = False
while not done:
action = select_action(q_table, current_state, epsilon)
new_state, reward, done, _ = env.step(action)
nb_steps += 1
total_reward += reward
q_table = update_q_value(
q_table,
current_state, action,
reward, new_state
)
current_state = new_state
nbs_steps.append(nb_steps)
avg_nbs_steps.append(np.average(nbs_steps))
total_rewards.append(total_reward)
avg_total_rewards.append(np.average(total_rewards))
epsilon = decay_epsilon(epsilon)
if episode % PRINT_EVERY == 0:
print()
print(f"EPISODE {episode}")
print(f"average reward {avg_nbs_steps[episode]}")
print(f"avg nb steps {avg_total_rewards[episode]}")
plt.plot(range(NB_EPISODES), total_rewards)
plt.plot(range(NB_EPISODES), avg_total_rewards)
plt.show()
plt.plot(range(NB_EPISODES), nbs_steps)
plt.plot(range(NB_EPISODES), avg_nbs_steps)
plt.show()
current_state = env.reset()
env.render()
done = False
while not done:
action = np.argmax(q_table[current_state])
new_state, reward, done, _ = env.step(action)
env.render()
print(reward)
current_state = new_state
``` |
{
"source": "JonasVil/dssdata",
"score": 3
} |
#### File: tools/lines/__init__.py
```python
import pandas as pd
from ... import SystemClass
from ...decorators import tools
from ..._formatters import (
__identify_ph_config,
__get_mag_vanish,
__get_ang_vanish,
__remove_nones_from_lists,
__check_elements,
)
from typing import List
@tools
def get_infos(distSys: SystemClass, names: List[str]) -> pd.DataFrame:
"""
Get some relevant infos from lines.
Ex:
| | name | bus1 | ph_bus1 | bus2 | ph_bus2 | I(A)_bus1_ph_a | I(A)_bus1_ph_b | I(A)_bus1_ph_c | I(A)_bus2_ph_a | I(A)_bus2_ph_b | I(A)_bus2_ph_c | ang_bus1_ph_a | ang_bus1_ph_b | ang_bus1_ph_c | ang_bus2_ph_a | ang_bus2_ph_b | ang_bus2_ph_c | kw_losses | kvar_losses | emergAmps | normAmps | perc_NormAmps | perc_EmergAmps |
|----|--------|------|---------|------|---------|----------------|----------------|----------------|----------------|----------------|----------------|---------------|---------------|---------------|---------------|---------------|---------------|-----------|-------------|-----------|----------|---------------|----------------|
| 0 | 650632 | rg60 | abc | 632 | abc | 562.609 | 419.029 | 591.793 | 562.61 | 419.03 | 591.794 | -28.7 | -141.3 | 93.4 | 151.3 | 38.7 | -86.6 | 60.737 | 196.015 | 600.0 | 400.0 | 1.479 | 0.986 |
| 1 | 632670 | 632 | abc | 670 | abc | 481.916 | 218.055 | 480.313 | 481.916 | 218.055 | 480.313 | -27.2 | -135.2 | 99.6 | 152.8 | 44.8 | -80.4 | 12.991 | 41.495 | 600.0 | 400.0 | 1.205 | 0.803 |
| 2 | 670671 | 670 | abc | 671 | abc | 473.795 | 188.824 | 424.942 | 473.795 | 188.824 | 424.942 | -27.0 | -132.6 | 101.3 | 153.0 | 47.4 | -78.7 | 22.729 | 72.334 | 600.0 | 400.0 | 1.184 | 0.79 |
Args:
distSys : An instance of [SystemClass][dssdata.SystemClass].
names : Lines names.
Returns:
Lines infos.
""" # noqa
__check_elements(names, distSys.dss.Lines.AllNames())
def build_line_dicts(distSys: SystemClass, line_name: str) -> dict:
def vanish_line_infos(bus_raw: list, current_raw: list) -> tuple:
bus_name = bus_raw[0]
phs_raw = list(map(lambda bus: int(bus), bus_raw[1:]))
phs_data = phs_raw if phs_raw != [] else [1, 2, 3]
phs = __identify_ph_config(phs_data)
currents_mag = __get_mag_vanish(phs_data, current_raw)
currents_ang = __get_ang_vanish(phs_data, current_raw)
return (bus_name, phs, currents_mag, currents_ang)
distSys.dss.Lines.Name(line_name)
losses = distSys.dss.CktElement.Losses()
normalAmps = distSys.dss.CktElement.NormalAmps()
emergAmps = distSys.dss.CktElement.EmergAmps()
currents_raw = distSys.dss.CktElement.CurrentsMagAng()
currents_raw_bus1 = currents_raw[: int(len(currents_raw) / 2)]
currents_raw_bus2 = currents_raw[int(len(currents_raw) / 2):]
bus_raw = distSys.dss.Lines.Bus1().split(".")
(bus_name1, phs1, currents_mag1, currents_ang1) = vanish_line_infos(
bus_raw, currents_raw_bus1
)
bus_raw = distSys.dss.Lines.Bus2().split(".")
(bus_name2, phs2, currents_mag2, currents_ang2) = vanish_line_infos(
bus_raw, currents_raw_bus2
)
currents_mag1_calc = __remove_nones_from_lists(currents_mag1)
currents_mag2_calc = __remove_nones_from_lists(currents_mag2)
return {
"name": line_name,
"bus1": bus_name1,
"ph_bus1": phs1,
"bus2": bus_name2,
"ph_bus2": phs2,
"I(A)_bus1_ph_a": currents_mag1[0],
"I(A)_bus1_ph_b": currents_mag1[1],
"I(A)_bus1_ph_c": currents_mag1[2],
"I(A)_bus2_ph_a": currents_mag2[0],
"I(A)_bus2_ph_b": currents_mag2[1],
"I(A)_bus2_ph_c": currents_mag2[2],
"ang_bus1_ph_a": currents_ang1[0],
"ang_bus1_ph_b": currents_ang1[1],
"ang_bus1_ph_c": currents_ang1[2],
"ang_bus2_ph_a": currents_ang2[0],
"ang_bus2_ph_b": currents_ang2[1],
"ang_bus2_ph_c": currents_ang2[2],
"kw_losses": losses[0] / 1000,
"kvar_losses": losses[1] / 1000,
"emergAmps": emergAmps,
"normAmps": normalAmps,
"perc_NormAmps": max(currents_mag1_calc + currents_mag2_calc)
/ normalAmps,
"perc_EmergAmps": max(currents_mag1_calc + currents_mag2_calc)
/ emergAmps,
}
return pd.DataFrame(
tuple(
map(lambda line_name: build_line_dicts(distSys, line_name), names,)
)
)
@tools
def get_all_infos(distSys: SystemClass) -> pd.DataFrame:
"""
Get some relevant infos from all lines. See [get_infos][dssdata.tools.lines.get_infos].
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
Returns:
All lines infos
""" # noqa
line_names = distSys.dss.Lines.AllNames()
return get_infos(distSys, line_names)
```
#### File: tools/losses/__init__.py
```python
import pandas as pd
from ... import SystemClass
from ..._formatters import __check_elements
def __build_pd_dicts(
distSys: SystemClass, element_name: str, element_type: str
) -> dict:
distSys.dss.PDElements.Name(str(element_type) + "." + str(element_name))
typ = distSys.dss.CktElement.Name().replace("." + str(element_name), "")
losses = distSys.dss.CktElement.Losses()
return {
"type": typ,
"name": element_name,
"kw_losses": losses[0] / 1000,
"kvar_losses": losses[1] / 1000,
}
def pd_element_loss(
distSys: SystemClass, element_name: str, element_type: str
) -> pd.DataFrame:
"""
Get PD Element loss.
Ex:
| | type | name | kw_losses | kvar_losses |
|:-:|:-----------:|:----:|:-----------------:|:-----------------:|
| 0 | Transformer | xfm1 | 5.552671994055243 | 10.09627035828575 |
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
element_name: The name of the desired PD element
element_type: The type of the PD element (Line or Transformer)
Returns:
A DataFrame containing the losses of the desired PD element.
""" # noqa
__check_elements(
list((str(element_type) + "." + str(element_name)).split()),
distSys.dss.Circuit.AllElementNames(),
)
pd_loss = []
pd_loss.append(__build_pd_dicts(distSys, element_name, element_type))
return pd.DataFrame(pd_loss)
def pd_element_loss_list(
distSys: SystemClass, element_names: list, element_type: str
) -> pd.DataFrame:
"""
Get PD Element loss List.
Ex:
| | type | name | kw_losses | kvar_losses |
|:-:|:-----------:|:----:|:-------------------:|:-------------------:|
| 0 | Transformer | sub | 0.03228776756674051 | 0.26246840671868993 |
| 1 | Transformer | reg1 | 0.12209426402417012 | 0.12385869008488953 |
| 2 | Transformer | reg2 | 0.06534502545557916 | 0.06707698704162612 |
| 3 | Transformer | reg3 | 0.1350894299906213 | 0.13685391995031387 |
| 4 | Transformer | xfm1 | 5.552671994055243 | 10.09627035828575 |
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
element_names: A list of names of the desired PD elements
element_type: The type of the PD elements (Line or Transformer)
Returns:
A DataFrame containing the losses of the desired list of PD elements.
""" # noqa
if element_type == "Line":
__check_elements(element_names, distSys.dss.Lines.AllNames())
elif element_type == "Transformer":
__check_elements(element_names, distSys.dss.Transformers.AllNames())
return pd.DataFrame(
tuple(
map(
lambda element_name: __build_pd_dicts(
distSys, element_name, element_type
),
element_names,
)
)
)
def get_all_line_losses(distSys: SystemClass) -> pd.DataFrame:
"""
Get all lines losses.
Ex:
| | type | name | kw_losses | kvar_losses |
|:--:|:----:|:------:|:---------------------:|:----------------------:|
| 0 | Line | 650632 | 60.73738438443188 | 196.01456922721653 |
| 1 | Line | 632670 | 12.990633124585496 | 41.49451118066639 |
| 2 | Line | 670671 | 22.728758590972518 | 72.33414340631373 |
| 3 | Line | 671680 | 8.613828479544935e-12 | -0.004169229516017848 |
| 4 | Line | 632633 | 0.8244871671261499 | 1.0561418323197722 |
| 5 | Line | 632645 | 2.75857850181032 | 2.4159107795492454 |
| 6 | Line | 645646 | 0.5274715389783668 | 0.41973513183818434 |
| 7 | Line | 692675 | 4.1629544212549225 | 2.419339661740261 |
| 8 | Line | 671684 | 0.5794876384501113 | 0.47068061342113654 |
| 9 | Line | 684611 | 0.3824044250881998 | 0.38734916932047053 |
| 10 | Line | 684652 | 0.7998267312559038 | 0.230879175578375 |
| 11 | Line | 671692 | 9.054614813067019e-06 | 4.3655745685100556e-14 |
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
Returns:
A DataFrame containing all line losses.
""" # noqa
element_type = "Line"
return pd.DataFrame(
tuple(
map(
lambda element_name: __build_pd_dicts(
distSys, element_name, element_type
),
distSys.dss.Lines.AllNames(),
)
)
)
def get_all_transformers_losses(distSys: SystemClass) -> pd.DataFrame:
"""
Get all transformers losses.
Ex:
| | type | name | kw_losses | kvar_losses |
|:-:|:-----------:|:----:|:-------------------:|:-------------------:|
| 0 | Transformer | sub | 0.03228776756674051 | 0.26246840671868993 |
| 1 | Transformer | reg1 | 0.12209426402417012 | 0.12385869008488953 |
| 2 | Transformer | reg2 | 0.06534502545557916 | 0.06707698704162612 |
| 3 | Transformer | reg3 | 0.1350894299906213 | 0.13685391995031387 |
| 4 | Transformer | xfm1 | 5.552671994055243 | 10.09627035828575 |
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
Returns:
A DataFrame containing all transformers losses.
""" # noqa
element_type = "Transformer"
return pd.DataFrame(
tuple(
map(
lambda element_name: __build_pd_dicts(
distSys, element_name, element_type
),
distSys.dss.Transformers.AllNames(),
)
)
)
def get_all_pd_elements_losses(distSys: SystemClass) -> pd.DataFrame:
"""
Get all PD Elements losses.
Ex:
| | type | name | kw_losses | kvar_losses |
|:--:|:-----------:|:------:|:---------------------:|:----------------------:|
| 0 | Transformer | sub | 0.03228776756674051 | 0.26246840671868993 |
| 1 | Transformer | reg1 | 0.12209426402417012 | 0.12385869008488953 |
| 2 | Transformer | reg2 | 0.06534502545557916 | 0.06707698704162612 |
| 3 | Transformer | reg3 | 0.1350894299906213 | 0.13685391995031387 |
| 4 | Transformer | xfm1 | 5.552671994055243 | 10.09627035828575 |
| 5 | Line | 650632 | 60.73738438443188 | 196.01456922721653 |
| 6 | Line | 632670 | 12.990633124585496 | 41.49451118066639 |
| 7 | Line | 670671 | 22.728758590972518 | 72.33414340631373 |
| 8 | Line | 671680 | 8.613828479544935e-12 | -0.004169229516017848 |
| 9 | Line | 632633 | 0.8244871671261499 | 1.0561418323197722 |
| 10 | Line | 632645 | 2.75857850181032 | 2.4159107795492454 |
| 11 | Line | 645646 | 0.5274715389783668 | 0.41973513183818434 |
| 12 | Line | 692675 | 4.1629544212549225 | 2.419339661740261 |
| 13 | Line | 671684 | 0.5794876384501113 | 0.47068061342113654 |
| 14 | Line | 684611 | 0.3824044250881998 | 0.38734916932047053 |
| 15 | Line | 684652 | 0.7998267312559038 | 0.230879175578375 |
| 16 | Line | 671692 | 9.054614813067019e-06 | 4.3655745685100556e-14 |
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
Returns:
A DataFrame containing all PD Elements losses.
""" # noqa
line_losses = get_all_line_losses(distSys)
transformer_losses = get_all_transformers_losses(distSys)
return pd.concat([transformer_losses, line_losses])
def get_total_pd_elements_losses(distSys: SystemClass) -> pd.DataFrame:
"""
Get Total PD Elements losses.
Ex:
| | name | kw_losses_total | kvar_losses_total |
|:-:|:---------------:|:------------------:|:-----------------:|
| 0 | all_pd_elements | 112.39948405966965 | 327.9256193105294 |
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
Returns:
A DataFrame containing the sum of losses of all PD Elements.
""" # noqa
data_loss = []
data = {
"name": "all_pd_elements",
"kw_losses_total": sum(
get_all_pd_elements_losses(distSys)["kw_losses"]
),
"kvar_losses_total": sum(
get_all_pd_elements_losses(distSys)["kvar_losses"]
),
}
data_loss.append(data)
return pd.DataFrame(data_loss)
```
#### File: dssdata/test/test_Class.py
```python
import unittest
from dssdata import SystemClass
from .load_datas import load_data_static
class Verifica_Class(unittest.TestCase):
def setUp(self):
self.path_of_system = "test/syste_test_IEEE13bus/IEEE13Nodeckt.dss"
self.value_of_kV = [115, 4.16, 0.48]
self.value_of_load_mult = 1
self.distSys = SystemClass(
path=self.path_of_system,
kV=self.value_of_kV,
loadmult=self.value_of_load_mult,
)
(
self.bus_names,
self.line_names,
self.reg_names,
_,
_,
_,
) = load_data_static()
def test_get_path(self):
self.assertTrue(self.distSys.path == self.path_of_system)
def test_get_kV(self):
self.assertTrue(self.distSys.kV == self.value_of_kV)
def test_get_loadmult(self):
self.assertTrue(self.distSys.loadmult == self.value_of_load_mult)
if __name__ == "__main__":
unittest.main()
```
#### File: dssdata/test/test_Losses.py
```python
import unittest
from dssdata import SystemClass
from dssdata.pfmodes import run_static_pf
from dssdata.tools import losses
from pandas._testing import assert_frame_equal
from .load_loss_data import load_loss_data
class Verifica_Losses_Tools(unittest.TestCase):
def setUp(self):
path_of_system = "test/syste_test_IEEE13bus/IEEE13Nodeckt.dss"
value_of_kV = [115, 4.16, 0.48]
value_of_load_mult = 1
self.distSys = SystemClass(
path=path_of_system, kV=value_of_kV, loadmult=value_of_load_mult
)
(
self.pd_losses,
self.pd_element_loss,
self.pd_line_losses,
self.pd_trafos_losses,
self.pd_total_losses,
) = load_loss_data()
def test_pd_element_loss(self):
[element_loss] = run_static_pf(
self.distSys,
tools=[
lambda distSys: losses.pd_element_loss(
self.distSys, element_name='xfm1',
element_type='Transformer'
)
]
)
try:
assert_frame_equal(
self.pd_element_loss,
element_loss,
check_dtype=False,
)
except AssertionError as err:
raise err
self.assertTrue(True)
def test_pd_list_loss(self):
[element_list_loss] = run_static_pf(
self.distSys, tools=[
lambda distSys: losses.pd_element_loss_list(
self.distSys,
distSys.dss.Transformers.AllNames(),
element_type='Transformer'
)
]
)
try:
assert_frame_equal(
self.pd_trafos_losses.reset_index(drop=True),
element_list_loss.reset_index(drop=True),
check_dtype=False,
)
except AssertionError as err:
raise err
self.assertTrue(True)
def test_pd_loss(self):
[LossDataFrame] = run_static_pf(
self.distSys, tools=[
losses.get_all_pd_elements_losses
]
)
try:
assert_frame_equal(
self.pd_losses.reset_index(drop=True),
LossDataFrame.reset_index(drop=True),
check_dtype=False,
)
except AssertionError as err:
raise err
self.assertTrue(True)
def test_line_loss(self):
[LinesDataFrame] = run_static_pf(
self.distSys, tools=[
losses.get_all_line_losses
]
)
try:
assert_frame_equal(
self.pd_line_losses.reset_index(drop=True),
LinesDataFrame.reset_index(drop=True),
check_dtype=False,
)
except AssertionError as err:
raise err
self.assertTrue(True)
def test_trafo_loss(self):
[TrafoDataFrame] = run_static_pf(
self.distSys, tools=[
losses.get_all_transformers_losses
]
)
try:
assert_frame_equal(
self.pd_trafos_losses.reset_index(drop=True),
TrafoDataFrame.reset_index(drop=True),
check_dtype=False,
)
except AssertionError as err:
raise err
self.assertTrue(True)
def test_total_loss(self):
[TotalDataFrame] = run_static_pf(
self.distSys, tools=[
losses.get_total_pd_elements_losses
]
)
try:
assert_frame_equal(
self.pd_total_losses.reset_index(drop=True),
TotalDataFrame,
check_dtype=False,
)
except AssertionError as err:
raise err
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JonasVil/MunozDelgado2014",
"score": 3
} |
#### File: JonasVil/MunozDelgado2014/Data_138Bus.py
```python
import numpy as np
import pandas as pd
def power_out(k,speed):
if k == 1:
WG = np.array([[3, 4.0],
[4, 20.0],
[5, 50.0],
[6, 96.0],
[7, 156.0],
[8, 238.0],
[9, 340.0],
[10, 466.0],
[11, 600.0],
[12, 710.0],
[13, 790.0],
[14, 850.0],
[15, 880.0],
[16, 905.0],
[17, 910.0]]
)
elif k == 2:
WG = np.array([[2, 3.0],
[3, 25.0],
[4, 82.0],
[5, 174.0],
[6, 321.0],
[7, 532.0],
[8, 815.0],
[9, 1180.0],
[10, 1580.0],
[11, 1810.0],
[12, 1980.0],
[13, 2050.0]]
)
if k == 1 and speed < 3:
Pr = 0
elif k == 1 and speed >= 17:
Pr = 0.91
elif k == 2 and speed < 2:
Pr = 0
elif k == 2 and speed >= 13:
Pr = 2.05
else:
speed_aux1 = int(speed)
speed_aux2 = speed_aux1 + 1
loc_aux1 = np.where(speed_aux1 == WG[:,0])[0].item()
loc_aux2 = np.where(speed_aux2 == WG[:,0])[0].item()
Pr_aux1 = (speed*WG[loc_aux1,1])/speed_aux1
Pr_aux2 = (speed*WG[loc_aux2,1])/speed_aux2
Pr = ((Pr_aux1+Pr_aux2)/2)/1000
return Pr
# =============================================================================
# System Data
# =============================================================================
n_bus = 138 #Number of buses
n_branches = 151 #Number of branches
load_factor = [0.7, 0.83, 1]
#EFF = Existing Fixed Feeder
#ERF = Existing Replaceable Feeder
#NRF = New Replacement Feeder
#NAF = New Added Feeder
line_data = pd.read_csv("138_line_data.csv")
branch = []
for i in range(line_data.shape[0]):
s = line_data['From'][i]
r = line_data['to'][i]
l = np.round(line_data['Lenght'][i],2)
tYpe = line_data['Type'][i]
branch.append(((s,r), l, tYpe))
load_zone = pd.read_csv("138_load_zone.csv")
peak_demand = np.full((load_zone.shape[0],10),0,dtype=float)
for i in range(0,load_zone.shape[0]):
for j in range(1,10+1):
peak_demand[i,j-1] = load_zone[str(j)][i]/1000
#Zones A = 1, B = 2, C = 3
#Buses= 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 ... 138
node_zone = np.full((1,load_zone.shape[0]),0,dtype=int)
for i in range(0,load_zone.shape[0]):
if load_zone['Zone'][i] == 'A':
node_zone[0,i] = 1
elif load_zone['Zone'][i] == 'B':
node_zone[0,i] = 2
elif load_zone['Zone'][i] == 'C':
node_zone[0,i] = 3
wind_speed = np.array([#Load Level (m/s)
#1 2 3
[8.53, 9.12, 10.04], #Zone A
[6.13, 7.26, 7.11], #Zone B
[4.13, 5.10, 5.56] #Zone C
])
# =============================================================================
# Sets of Indexes
# =============================================================================
B = np.arange(1, len(load_factor)+1, dtype=int) #Set of Load Levels
T = np.arange(1, np.shape(peak_demand)[1]+1, dtype=int) #Set of Time Stages
L = ["EFF", "ERF", "NRF", "NAF"] #Set of Feeder Types
#C = Conventional
#W = Wind Generation
P = ["C", "W"] #Set of Generator Types
#ET = Existing Transformer
#NT = New Transformer
TR = ["ET", "NT"] #Set of Transformer Types
# =============================================================================
# Sets of Alternatives
# =============================================================================
K_l = {"EFF": [1], #Sets of available alternatives for feeders
"ERF": [1],
"NRF": [1, 2],
"NAF": [1, 2]
}
K_p = {"C": [1, 2], #Sets of available alternatives for generators
"W": [1, 2]
}
K_tr = {"ET": [1], #Sets of available alternatives for transformers
"NT": [1, 2]
}
# =============================================================================
# Sets of Branches
# =============================================================================
Upsilon_l = {"EFF": [],
"ERF": [],
"NRF": [],
"NAF": []
}
for branch_type in L: #Set of branches with feeders of type l
for b in branch:
if b[2] == branch_type:
s = b[0][0]
r = b[0][1]
Upsilon_l[branch_type].append((s,r))
Upsilon_l["NRF"] = Upsilon_l["ERF"]
# =============================================================================
# Sets of Nodes
# =============================================================================
Omega_SS = [136, 137, 138] #Sets of nodes connected to node s by substation nodes
Omega_SSE = [136, 137] # Fixing eq14
Omega_SSN = [138] # Fixing eq14
Omega_l_s = {"EFF": [[] for i in range(0,n_bus)], #Sets of nodes connected to node s by a feeder of type l
"ERF": [[] for i in range(0,n_bus)],
"NRF": [[] for i in range(0,n_bus)],
"NAF": [[] for i in range(0,n_bus)]
}
for branch_type in L:
for (s,r) in Upsilon_l[branch_type]:
Omega_l_s[branch_type][(s,r)[0]-1].append((s,r)[1])
Omega_l_s[branch_type][(s,r)[1]-1].append((s,r)[0])
Omega_LN_t = {1: [indx+1 for indx,value in enumerate(peak_demand[:, 0]) if value > 0], #Sets of nodes connected to node s by load nodes
2: [indx+1 for indx,value in enumerate(peak_demand[:, 1]) if value > 0],
3: [indx+1 for indx,value in enumerate(peak_demand[:, 2]) if value > 0],
4: [indx+1 for indx,value in enumerate(peak_demand[:, 3]) if value > 0],
5: [indx+1 for indx,value in enumerate(peak_demand[:, 4]) if value > 0],
6: [indx+1 for indx,value in enumerate(peak_demand[:, 5]) if value > 0],
7: [indx+1 for indx,value in enumerate(peak_demand[:, 6]) if value > 0],
8: [indx+1 for indx,value in enumerate(peak_demand[:, 7]) if value > 0],
9: [indx+1 for indx,value in enumerate(peak_demand[:, 8]) if value > 0],
10: [indx+1 for indx,value in enumerate(peak_demand[:, 9]) if value > 0],
}
Omega_N = np.arange(1, n_bus+1, dtype=int) #Sets of nodes connected to node s by system nodes
Omega_p = {"C": [10, 28, 38, 53, 64, 94, 108, 117, 126, 133], #Sets of nodes connected to node s by distributed generation
"W": [31, 52, 78, 94, 103, 113, 114, 116, 120, 122]
}
# =============================================================================
# Energy Costs
# =============================================================================
#Load Levels
# 1 2 3
C_SS_b = [57.7, 70, 85.3] #the costs of the energy supplied by all substations
#DG units
C_Ep_k = {"C": [47, 45], #Conventional DG
"W": [0, 0] #Windy DG
}
#Cost for unserved energy
C_U = 2000
# =============================================================================
# Investment Costs
# =============================================================================
C_Il_k = {"NRF": [29870, 39310], #Investment cost coefficients of feeders
"NAF": [25030, 34920]
}
C_INT_k = [500000, 950000] #Investment cost coefficients of new transformers
C_Ip_k = {"C": [500000, 490000], #Investment cost coefficients of generators
"W": [1850000, 1840000]
}
C_ISS_s = {136: 100000, #Investment cost coefficients of substations
137: 100000,
138: 150000
}
# =============================================================================
# Maintenance Costs
# =============================================================================
C_Ml_k = {"EFF": [450], #Maintenance cost coefficients of feeders
"ERF": [450],
"NRF": [450, 450],
"NAF": [450, 450]
}
C_Mp_k = {"C": [0.05*0.9*500000*1, 0.05*0.9*490000*2], #Maintenance cost coefficients of generators
"W": [0.05*0.9*1850000*0.91, 0.05*0.9*1840000*2.05]
}
C_Mtr_k = {"ET": [2000], #Maintenance cost coefficients of transformers
"NT": [1000, 3000]
}
# =============================================================================
# System's Data
# =============================================================================
D__st = peak_demand #Actual nodal peak demand
Dtio_stb = np.full((np.shape(Omega_N)[0],np.shape(T)[0],np.shape(B)[0]),0,dtype=float) #fictitious nodal demand
for s in range(np.shape(Omega_N)[0]):
for t in range(np.shape(T)[0]):
for b in range(np.shape(B)[0]):
if (s+1 in Omega_p["C"] or s+1 in Omega_p["W"]) and s+1 in Omega_LN_t[t+1]:
Dtio_stb[s,t,b] = 1
else:
Dtio_stb[s,t,b] = 0
Fup_l_k = {"EFF": [6.28], #Upper limit for actual current flows through (MVA)
"ERF": [6.28],
"NRF": [9.00, 12.00],
"NAF": [6.28, 9.00]
}
Gup_p_k = {"C": [1.00, 2.00], #Rated capacities of generators
"W": [0.91, 2.05]
}
# Ref: https://wind-turbine.com/download/101655/enercon_produkt_en_06_2015.pdf
Gmax_W_sktb = np.full((np.shape(Omega_N)[0],np.shape(K_p["W"])[0],np.shape(T)[0],np.shape(B)[0]),0,dtype=float) #maximum wind power availability.
for s in range(np.shape(Omega_N)[0]): #Bus
for k in range(np.shape(K_p["W"])[0]): #Option
for t in range(np.shape(T)[0]): #Stage
for b in range(np.shape(B)[0]): #Load Level
zone = node_zone[0,s]
speed = wind_speed[zone-1,b]
Gmax_W_sktb[s,k,t,b] = power_out(k+1,speed)
Gup_tr_k = {"ET": [12], #Upper limit for current injections of transformers.
"NT": [7.5, 15]
}
Vbase = 13.8 #kV
V_ = 0.95*Vbase #Lower bound for nodal voltages
Vup = 1.05*Vbase #Upper bound for nodal voltages
V_SS = 1.05*Vbase #Voltage at the substations
l__sr = np.full((np.shape(Omega_N)[0],np.shape(Omega_N)[0]),0,dtype=float) #Feeder length.
for b in branch:
s, r = b[0]
l__sr[s-1,r-1] = b[1]
l__sr[r-1,s-1] = b[1]
n__DG = np.add.reduce([np.shape(Omega_p[p]) for p in P])[0] #Number of candidate nodes for installation of distributed generation
n__T = np.shape(T)[0] #number of time stages
pf = 0.9 #System power factor
H = Vup - V_ #Ref: DOI: 10.1109/TPWRS.2017.2764331
# =============================================================================
# Assets Data
# =============================================================================
i = 7.1/100 #Annual interest rate.
IB__t = [5000000, 5000000, 5000000, 5000000, 5000000, 5000000, 5000000, 5000000, 5000000, 5000000] #Investment budget for stage t
Eta_l = {"NRF": 25, #Lifetimes of feeders in year
"NAF": 25
}
Eta_NT = 15 #Lifetime of new transformers
Eta_p = {"C": 20, #Lifetime of generators
"W": 20
}
Eta_SS = 100 #Lifetime of substations
RR_l = {"NRF": (i*(1+i)**Eta_l["NRF"])/((1+i)**Eta_l["NRF"] - 1), #Capital recovery rates for investment in feeders
"NAF": (i*(1+i)**Eta_l["NAF"])/((1+i)**Eta_l["NAF"] - 1)
}
RR_NT = (i*(1+i)**Eta_NT)/((1+i)**Eta_NT - 1) #Capital recovery rates for investment in new transformers
RR_p = {"C": (i*(1+i)**Eta_p["C"])/((1+i)**Eta_p["C"] - 1), #Capital recovery rates for investment in generators
"W": (i*(1+i)**Eta_p["W"])/((1+i)**Eta_p["W"] - 1)
}
RR_SS = i #Capital recovery rates for investment in substations.
Z_l_k = {"EFF": [0.557], #Unitary impedance magnitude of feeders
"ERF": [0.557],
"NRF": [0.478, 0.423],
"NAF": [0.557, 0.478]
}
Z_tr_k = {"ET": [0.16], #impedance magnitude of transformers
"NT": [0.25, 0.13]
}
Delta__b = [2000, 5760, 1000] #Duration of load level b
Mi__b = load_factor #Loading factor of load level b
#Vare = 0.25 #Penetration limit for distributed generation.
# =============================================================================
# Piecewise Linearization
# =============================================================================
n__V = 3 #number of blocks of the piecewise linear energy losses
M_l_kV = {"EFF": [[]], #Slope of block V of the piecewise linear energy losses for feeders
"ERF": [[]],
"NRF": [[], []],
"NAF": [[], []]
}
A_l_kV = {"EFF": [[]], #Width of block V of the piecewise linear energy losses for feeders
"ERF": [[]],
"NRF": [[], []],
"NAF": [[], []]
}
for l in L:
for k in K_l[l]:
for V in range(1,n__V+1,1):
M_l_kV[l][k-1].append((2*V - 1)*Z_l_k[l][k-1]*Fup_l_k[l][k-1]/(n__V*(Vbase**2)))
A_l_kV[l][k-1].append(Fup_l_k[l][k-1]/n__V)
M_tr_kV = {"ET": [[]], #Slope of block V of the piecewise linear energy losses for transformers
"NT": [[],[]]
}
A_tr_kV = {"ET": [[]], #Width of block V of the piecewise linear energy losses for transformers
"NT": [[],[]]
}
for tr in TR:
for k in K_tr[tr]:
for V in range(1,n__V+1,1):
M_tr_kV[tr][k-1].append((2*V - 1)*Z_tr_k[tr][k-1]*Gup_tr_k[tr][k-1]/(n__V*(V_SS**2)))
A_tr_kV[tr][k-1].append(Gup_tr_k[tr][k-1]/n__V)
``` |
{
"source": "jonasvj/MLOps",
"score": 2
} |
#### File: src/models/model.py
```python
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchmetrics
from torch.optim import Adam
def conv_dim(input_height, input_width, kernel_size, stride, padding):
new_height = int(
(input_height - kernel_size[0] + 2 * padding[0]) / stride[0] + 1)
new_width = int(
(input_width - kernel_size[1] + 2 * padding[1]) / stride[1] + 1)
return (new_height, new_width)
class ConvNet(nn.Module):
def __init__(self, height=28, width=28, channels=1, classes=10, dropout=0.25):
super(ConvNet, self).__init__()
self.width = width
self.height = height
self.channels = channels
self.classes = classes
self.dropout_rate = dropout
self.conv_1 = nn.Conv2d(
in_channels=self.channels,
out_channels=16,
kernel_size=8,
stride=1,
padding=0,
)
self.conv_1_dim = conv_dim(
self.height,
self.width,
self.conv_1.kernel_size,
self.conv_1.stride,
self.conv_1.padding,
)
self.conv_2 = nn.Conv2d(
in_channels=16,
out_channels=8,
kernel_size=4,
stride=1,
padding=0,
)
self.conv_2_dim = conv_dim(
self.conv_1_dim[0],
self.conv_1_dim[1],
self.conv_2.kernel_size,
self.conv_2.stride,
self.conv_2.padding,
)
self.max_pool = nn.MaxPool2d(
kernel_size=2,
stride=1,
padding=0)
self.max_pool_dim = conv_dim(
self.conv_2_dim[0],
self.conv_2_dim[1],
(self.max_pool.kernel_size, self.max_pool.kernel_size),
(self.max_pool.stride, self.max_pool.stride),
(self.max_pool.padding, self.max_pool.padding),
)
self.linear = nn.Linear(
in_features=self.conv_2.out_channels
* self.max_pool_dim[0]
* self.max_pool_dim[1],
out_features=self.classes,
)
self.dropout = nn.Dropout(p=self.dropout_rate)
self.embeddings = None
def forward(self, x):
if x.ndim not in [3, 4]:
raise ValueError('Expected input to be a 3D or 4D tensor')
x = self.dropout(F.relu(self.conv_1(x)))
x = self.dropout(F.relu(self.conv_2(x)))
x = self.dropout(self.max_pool(x))
self.embeddings = x.reshape(-1, self.linear.in_features)
x = self.linear(self.embeddings)
return x
@staticmethod
def add_model_specific_args(parent_parser):
parser = parent_parser.add_argument_group('ConvNet')
parser.add_argument('--height', type=int, default=28)
parser.add_argument('--width', type=int, default=28)
parser.add_argument('--channels', type=int, default=1)
parser.add_argument('--classes', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.25)
return parent_parser
@staticmethod
def from_argparse_args(namespace):
ns_dict = vars(namespace)
args = {
'height': ns_dict.get('height', 28),
'width': ns_dict.get('width', 28),
'channels': ns_dict.get('channels', 1),
'classes': ns_dict.get('classes', 10),
'dropout': ns_dict.get('dropout', 0.25),
}
return args
class ImageClassifier(pl.LightningModule):
def __init__(self, model, lr=3e-4):
super(ImageClassifier, self).__init__()
self.model = model
self.lr = lr
self.loss_func = nn.CrossEntropyLoss()
self.train_acc = torchmetrics.Accuracy()
self.val_acc = torchmetrics.Accuracy()
self.test_acc = torchmetrics.Accuracy()
self.save_hyperparameters()
def forward(self, x):
# use forward for inference/predictions
return self.model(x)
def training_step(self, batch, batch_idx):
images, targets = batch
preds = self.model(images)
loss = self.loss_func(preds, targets)
return {'loss': loss, 'preds': preds, 'targets': targets}
def training_step_end(self, outputs):
self.train_acc(F.softmax(outputs['preds'], dim=1), outputs['targets'])
self.log_dict(
{'train_acc': self.train_acc, 'train_loss': outputs['loss']},
on_step=True,
on_epoch=True,
prog_bar=True)
return outputs
def validation_step(self, batch, batch_idx):
images, targets = batch
preds = self.model(images)
loss = self.loss_func(preds, targets)
return {'loss': loss, 'preds': preds, 'targets': targets}
def validation_step_end(self, outputs):
self.val_acc(F.softmax(outputs['preds'], dim=1), outputs['targets'])
self.log_dict(
{'val_acc': self.val_acc, 'val_loss': outputs['loss']},
on_step=True,
on_epoch=True,
prog_bar=True)
def test_step(self, batch, batch_idx):
images, targets = batch
preds = self.model(images)
loss = self.loss_func(preds, targets)
return {'loss': loss, 'preds': preds, 'targets': targets}
def test_step_end(self, outputs):
self.test_acc(F.softmax(outputs['preds'], dim=1), outputs['targets'])
self.log_dict(
{'test_acc': self.test_acc, 'test_loss': outputs['loss']},
on_step=False,
on_epoch=True,
prog_bar=False)
def configure_optimizers(self):
return Adam(self.parameters(), lr=self.lr)
@staticmethod
def add_model_specific_args(parent_parser):
parser = parent_parser.add_argument_group('ImageClassifier')
parser.add_argument('--lr', default=3e-4, type=float)
return parent_parser
@staticmethod
def from_argparse_args(namespace):
ns_dict = vars(namespace)
args = {
'lr': ns_dict.get('lr', 3e-4),
}
return args
```
#### File: MLOps/tests/test_training.py
```python
import torch
import wandb
from argparse import Namespace
from src.models.train_model import train, evaluate
from src.utils import get_data
wandb.init(mode='disabled')
args_dict = {
'epochs': 2,
'dropout': 0.25,
'lr': 3e-4,
'mb_size': 64,
'data_path': 'data/'
}
args = Namespace(**args_dict)
train_loader, test_loader = get_data(args)
class TestTrain:
def test_weights(self):
model, init_weights = train(args, train_loader, test_steps=2)
assert not torch.all(torch.eq(init_weights, model.linear.weight))
def test_evaluate(self):
model, init_weights = train(args, train_loader, test_steps=1)
accuracy = evaluate(model, test_loader)
assert isinstance(accuracy, float)
assert 0 <= accuracy and accuracy <= 1
``` |
{
"source": "jonasvj/protein-generation",
"score": 3
} |
#### File: src/models/gru.py
```python
import torch
import torch.nn as nn
class GruNet(nn.Module):
"""GRU network that accepts a mini batch"""
def __init__(self, n_tokens, embedding_size, hidden_size, n_layers,
dropout=0.5, bidirectional=False, pad_idx=0):
super(GruNet, self).__init__()
self.model = "gru"
self.n_tokens = n_tokens
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.pad_idx = pad_idx
if self.bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
self.encoder = nn.Embedding(self.n_tokens, self.embedding_size,
padding_idx=self.pad_idx)
self.drop = nn.Dropout(self.dropout)
self.rnn = nn.GRU(input_size=self.embedding_size,
hidden_size=self.hidden_size,
num_layers=self.n_layers,
dropout=self.dropout,
bidirectional=self.bidirectional)
self.decoder = nn.Linear(self.hidden_size*self.num_directions,
self.n_tokens)
def forward(self, input_tensor, input_lengths):
"""Expects input tensor of shape (batch, max_seq_len)"""
# Embed input
emb = self.encoder(input_tensor)
emb = self.drop(emb)
# Reshape from (batch, max_seq_len, emb) to (max_seq_len, batch, emb)
emb = emb.permute(1, 0, 2)
# RNN
emb = nn.utils.rnn.pack_padded_sequence(emb, input_lengths)
output, hidden_state = self.rnn(emb)
output, _ = nn.utils.rnn.pad_packed_sequence(output)
output = self.drop(output)
# Get embedding of sequence
emb_seq = hidden_state[-1]
# Decode
decoded = self.decoder(output)
# Reshape from (max_seq_len, batch, n_tokens) to
# (batch, n_tokens, max_seq_length)
decoded = decoded.permute(1,2,0)
return {'output': decoded, 'emb_1': emb_seq, 'emb_2': emb_seq}
if __name__ == '__main__':
models_args = {'n_tokens': 10,
'embedding_size': 5,
'hidden_size': 32,
'n_layers': 2,
'dropout': 0.5,
'bidirectional': False,
'pad_idx': 0}
net = GruNet(**models_args)
input_ = torch.LongTensor([[1,4,5,1,2],[1,2,3,0,0]])
input_lengths = [5, 3]
output = net(input_, input_lengths)
``` |
{
"source": "jonasvj/TFDE",
"score": 3
} |
#### File: TFDE/datasets/synthetic.py
```python
import datasets
import numpy as np
class EightGaussians:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'synthetic/8gaussians.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
class Checkerboard:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'synthetic/checkerboard.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
class TwoSpirals:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'synthetic/2spirals.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(root_path):
rng = np.random.RandomState(seed=42)
data = np.load(root_path)
rng.shuffle(data)
n_train = int((1/3) * data.shape[0])
n_val = int((1/3) * data.shape[0])
data_train = data[0:n_train]
data_val = data[n_train:n_train+n_val]
data_test = data[n_train+n_val:]
return data_train, data_val, data_test
def load_data_normalised(root_path):
data_train, data_val, data_test = load_data(root_path)
mu = data_train.mean(axis=0)
s = data_train.std(axis=0)
data_train = (data_train - mu) / s
data_val = (data_val - mu) / s
data_test = (data_test - mu) / s
return data_train, data_val, data_test
```
#### File: jonasvj/TFDE/plot_grid.py
```python
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def tt_params(K, M):
return K + 3*M*K**2
def tt_dof(K, M):
return (K-1) + M*K*(K-1) + 2*M*K**2
def bic(n, k, nllh_per_sample):
log_lh = -1*nllh_per_sample*n
return k*np.log(n) - 2*log_lh
def aic(n, k, nllh_per_sample):
log_lh = -1*nllh_per_sample*n
return 2*k - 2*log_lh
def n_params(model, K, M):
if model == 'TT':
return (K-1) + M*K*(K-1) + 2*M*K*K
elif model == 'CP':
return (K-1) + 2*M*K
elif model == 'GMM':
return (K-1) + (2*M + M*(M-1)/2)*K
sizes = {
'power': {'n_train': 1659917, 'n_val': 184435, 'n_test': 204928, 'M': 6},
'gas': {'n_train': 852174, 'n_val': 94685, 'n_test': 105206, 'M': 8},
'hepmass': {'n_train': 315123, 'n_val': 35013, 'n_test': 174987, 'M': 21},
'miniboone': {'n_train': 29556, 'n_val': 3284, 'n_test': 3648, 'M': 43},
'bsds300': {'n_train': 1000000, 'n_val': 50000, 'n_test': 250000, 'M': 63},
'8gaussians': {'n_train': 30000, 'n_val': 30000, 'n_test': 30000, 'M': 2},
'checkerboard': {'n_train': 30000, 'n_val': 30000, 'n_test': 30000, 'M': 2},
'2spirals': {'n_train': 30000, 'n_val': 30000, 'n_test': 30000, 'M': 2}
}
df = pd.read_csv('results/grid_results.txt', index_col=0)
df_gmm = pd.read_csv('results/gmm_results.txt', index_col=0)
df = df.append(df_gmm, ignore_index=True)
df = df[df.optimal_order == 1]
print(df)
# Add new columns
df['M'] = df.apply(lambda row: sizes[row.dataset]['M'], axis=1)
df['dof'] = df.apply(lambda row: n_params(row.model_type, row.K, row.M), axis=1)
datasets = ['hepmass', 'miniboone']
subsample_sizes = [1750, 7000, 28000]
groups = df.groupby(['dataset', 'subsample_size'])
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(24, 12),
sharex='all', sharey='row')
for i, (group, frame) in enumerate(groups):
row_idx = datasets.index(group[0])
col_idx = subsample_sizes.index(group[1])
model_groups = frame.groupby(['model_type'])
for model, model_frame in model_groups:
mean = model_frame.groupby('dof').mean()
sem = model_frame.groupby('dof').sem()
min_ = model_frame.groupby('dof').min()
axes[row_idx, col_idx].errorbar(
mean.index, mean.nllh_test, yerr=sem.nllh_test, fmt='.:',
label=model, alpha=.75, capsize=3, capthick=1)
axes[row_idx, col_idx].set_xlabel('Free parameters')
axes[row_idx, col_idx].set_ylabel(f'Test NLLH per sample ({group[0]})')
axes[row_idx, col_idx].set_title(f'Subsample size: {group[1]}')
axes[row_idx, col_idx].legend()
fig.savefig('plots/' + 'grid_plot.pdf')
plt.close()
``` |
{
"source": "jonasw234/bookcut",
"score": 3
} |
#### File: bookcut/bookcut/repositories.py
```python
from bs4 import BeautifulSoup as soup
import mechanize
from bookcut.mirror_checker import pageStatus, main as mirror_checker, CONNECTION_ERROR_MESSAGE
import pandas as pd
ARCHIV_URL = 'https://export.arxiv.org/find/grp_cs,grp_econ,grp_eess,grp_math,grp_physics,grp_q-bio,grp_q-fin,grp_stat'
ARCHIV_BASE = 'https://export.arxiv.org'
def arxiv(term):
# Searching Arxiv.org and returns a DataFrame with the founded results.
status = pageStatus(ARCHIV_URL)
if status:
br = mechanize.Browser()
br.set_handle_robots(False) # ignore robots
br.set_handle_refresh(False) #
br.addheaders = [('User-agent', 'Firefox')]
br.open(ARCHIV_URL)
br.select_form(nr=0)
input_form = term
br.form['query'] = input_form
ac = br.submit()
html_from_page = ac
html_soup = soup(html_from_page, 'html.parser')
t = html_soup.findAll('div', {'class': 'list-title mathjax'})
titles = []
for i in t:
raw = i.text
raw = raw.replace('Title: ', '')
raw = raw.replace('\n', "")
titles.append(raw)
authors = []
auth_soup = html_soup.findAll('div', {'class': 'list-authors'})
for i in auth_soup:
raw = i.text
raw = raw.replace('Authors:', '')
raw = raw.replace('\n', "")
authors.append(raw)
extensions = []
urls = []
ext = html_soup.findAll('span', {'class': 'list-identifier'})
for i in ext:
a = i.findAll('a')
link = a[1]['href']
extensions.append(str(a[1].text))
urls.append(ARCHIV_BASE+link)
arxiv_df = pd.DataFrame({'Title': titles, 'Author(s)': authors,
'Url': urls, 'Extension': extensions})
return arxiv_df
else:
print(CONNECTION_ERROR_MESSAGE.format('ArXiv'))
return None
def libgen_repo(term):
# Searching LibGen and returns results DataFrame
try:
url = mirror_checker()
if url is not None:
br = mechanize.Browser()
br.set_handle_robots(False) # ignore robots
br.set_handle_refresh(False) #
br.addheaders = [('User-agent', 'Firefox')]
br.open(url)
br.select_form('libgen')
input_form = term
br.form['req'] = input_form
ac = br.submit()
html_from_page = ac
html_soup = soup(html_from_page, 'html.parser')
table = html_soup.find_all('table')[2]
table_data = []
mirrors = []
extensions = []
for i in table:
j = 0
try:
td = i.find_all('td')
for tr in td:
# scrape mirror links
if j == 9:
temp = tr.find('a', href=True)
mirrors.append(temp['href'])
j = j + 1
row = [tr.text for tr in td]
table_data.append(row)
extensions.append(row[8])
except:
pass
# Clean result page
for j in table_data:
j.pop(0)
del j[8:15]
headers = ['Author(s)', 'Title', 'Publisher', 'Year', 'Pages',
'Language', 'Size', 'Extension']
tabular = pd.DataFrame(table_data)
tabular.columns = headers
tabular['Url'] = mirrors
return tabular
except ValueError:
#create emptyDataframe
df = pd.DataFrame()
return df
```
#### File: jonasw234/bookcut/conftest.py
```python
def pytest_addoption(parser):
parser.addoption('--web', action='store_true', dest="web",
default=False, help="enable tests requiring an internet connection")
def pytest_configure(config):
if not config.option.web:
setattr(config.option, 'markexpr', 'not web')
```
#### File: bookcut/tests/test_bookcut.py
```python
import pytest
from click.testing import CliRunner
from bookcut import __version__
from bookcut.bookcut import entry
def test_entry_with_version_option():
cli_output = CliRunner().invoke(entry, ["--version"])
assert cli_output.exit_code == 0
assert cli_output.output == f"commands, version {__version__}\n"
``` |
{
"source": "jonasw234/systeminfo.py",
"score": 2
} |
#### File: systeminfo.py/systeminfo/systeminfo.py
```python
import os
import sys
from datetime import datetime, timedelta
from docopt import docopt
from regipy.exceptions import RegistryKeyNotFoundException
from regipy.registry import ConstError, RegistryHive
def determine_current_control_set(system_hive: RegistryHive) -> str:
"""
Determine the current control set.
Parameters
----------
system_hive : RegistryHive
The system hive to parse
Returns
-------
str
The path to the current control set
"""
current_control_set = system_hive.get_key("\\Select").get_value("Current")
for control_set in system_hive.CONTROL_SETS:
if int(control_set[-3:]) == current_control_set:
current_control_set = control_set
break
else:
raise ValueError("Error determining current control set.")
return current_control_set
def parse_system_hive(system_hive: RegistryHive) -> dict:
"""
Parse system hive and return needed information.
Parameters
----------
system_hive : RegistryHive
The system hive to parse
Returns
-------
dict
Dictionary with the information for systeminfo
"""
# Determine current control set
current_control_set = determine_current_control_set(system_hive)
# Determine current hardware config
try:
current_hardware_config = system_hive.get_key(
"SYSTEM\\HardwareConfig"
).get_value("LastConfig")
except RegistryKeyNotFoundException:
current_hardware_config = None
# Hostname
system_hive_dict = {
"hostname": system_hive.get_key(
f"{current_control_set}\\Services\\Tcpip\\Parameters"
).get_value("Hostname")
}
# BIOS Version
if current_hardware_config:
bios_version = system_hive.get_key(
f"SYSTEM\\HardwareConfig\\{current_hardware_config}"
).get_value("BIOSVersion")
bios_vendor = system_hive.get_key(
f"SYSTEM\\HardwareConfig\\{current_hardware_config}"
).get_value("BIOSVendor")
bios_release_date = system_hive.get_key(
f"SYSTEM\\HardwareConfig\\{current_hardware_config}"
).get_value("BIOSReleaseDate")
system_hive_dict[
"bios_version"
] = f"{bios_vendor} {bios_version}, {bios_release_date}"
else:
system_hive_dict["bios_version"] = "UNKNOWN UNKNOWN, UNKNOWN"
# Domain
system_hive_dict["domain"] = system_hive.get_key(
f"{current_control_set}\\Services\\Tcpip\\Parameters"
).get_value("Domain")
system_hive_dict["domain"] = (
system_hive_dict["domain"] if system_hive_dict["domain"] != 0 else "WORKGROUP"
)
# Page file locations
system_hive_dict["page_file_locations"] = system_hive.get_key(
f"{current_control_set}\\Control\\Session Manager\\Memory Management"
).get_value("PagingFiles")[::3]
# TODO This could probably be improved if I could find the system drive letter in the registry
for idx, page_file_location in enumerate(system_hive_dict["page_file_locations"]):
if page_file_location[0] == "?":
system_hive_dict["page_file_locations"][idx] = page_file_location.replace(
"?",
system_hive.get_key(
f"{current_control_set}\\Control\\Session Manager\\Memory Management"
).get_value("ExistingPageFiles")[0][4],
)
# Page file max size
system_hive_dict["page_file_max_sizes"] = system_hive.get_key(
f"{current_control_set}\\Control\\Session Manager\\Memory Management"
).get_value("PagingFiles")[2::3]
# Boot device
system_hive_dict["boot_device"] = system_hive.get_key("SYSTEM\\Setup").get_value(
"SystemPartition"
)
if current_hardware_config:
# System manufacturer
system_hive_dict["manufacturer"] = system_hive.get_key(
f"SYSTEM\\HardwareConfig\\{current_hardware_config}"
).get_value("SystemManufacturer")
# System model
system_hive_dict["model"] = system_hive.get_key(
f"SYSTEM\\HardwareConfig\\{current_hardware_config}"
).get_value("SystemProductName")
else:
system_hive_dict["manufacturer"] = "UNKNOWN"
system_hive_dict["model"] = "UNKNOWN"
# System type
system_hive_dict["type"] = (
system_hive.get_key(f"{current_control_set}\\Enum\\ROOT\\ACPI_HAL\\0000")
.get_value("DeviceDesc")
.split(";")[1]
.replace("ACPI ", "")
)
# Network adapters
# MAC address can optionally be changed with NetworkAddress entry
network_adapters = dict()
for network_adapter in system_hive.get_key(
"".join(
[
current_control_set,
"\\Control\\Class\\{4d36e972-e325-11ce-bfc1-08002be10318}",
]
)
).iter_subkeys():
if network_adapter.get_value("NetCfgInstanceId"):
network_adapters[network_adapter.get_value("NetCfgInstanceId")] = (
network_adapter.get_value("DriverDesc"),
network_adapter.get_value("NetworkAddress"),
)
interfaces = dict()
for interface in system_hive.get_key(
"".join([current_control_set, "\\Services\\Tcpip\\Parameters\\Interfaces"])
).iter_subkeys():
if not network_adapters.get(interface.name.upper()):
continue
interfaces[interface.name] = {
"desc": network_adapters[interface.name.upper()][0],
"mac": network_adapters[interface.name.upper()][1],
"dhcp_activated": interface.get_value("EnableDHCP") == 1,
"dhcp_server": interface.get_value("DhcpServer"),
"ip_addresses": [interface.get_value("DhcpIPAddress")]
if interface.get_value("DhcpIPAddress")
else interface.get_value("IPAddress"),
"connection_name": system_hive.get_key(
"".join(
[
current_control_set,
"\\Control\\Network\\{4D36E972-E325-11CE-BFC1-08002BE10318}\\",
interface.name.upper(),
"\\Connection",
]
)
).get_value("Name"),
}
if not interfaces[interface.name]["ip_addresses"]:
del interfaces[interface.name]
system_hive_dict["network_cards"] = interfaces
# Processor(s)
system_hive_dict["processors"] = system_hive.get_key(
f"{current_control_set}\\Control\\Session Manager\\Environment"
).get_value(
"PROCESSOR_IDENTIFIER"
) # This is technically not correct, because the real value is in the volatile HKLM\\HARDWARE\\DESCRIPTION\\System\\CentralProcessor subkeys
# Windows/System directory
lsa_library = system_hive.get_key(
f"{current_control_set}\\Services\\Lsa\\Performance"
).get_value(
"Library"
) # It’s a bit of a hack, but I can’t find the real key to read
system_hive_dict["windows_directory"] = "\\".join(lsa_library.split("\\")[:2])
system_hive_dict["system_directory"] = "\\".join(lsa_library.split("\\")[:3])
# Return results
return system_hive_dict
def parse_software_hive(software_hive: RegistryHive) -> dict:
"""
Parse software hive and return needed information.
Parameters
----------
software_hive : RegistryHive
The software hive to parse
Returns
-------
dict
Dictionary with the information for systeminfo
"""
# Registered owner
software_hive_dict = {
"registered_owner": software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("RegisteredOwner")
}
# OS name
software_hive_dict["os_name"] = " ".join(
[
"Microsoft",
software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("ProductName"),
]
)
# OS build type
software_hive_dict["os_build_type"] = software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("CurrentType")
# Product ID
software_hive_dict["product_id"] = software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("ProductId")
# Install date
software_hive_dict["install_date"] = software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value(
"InstallDate"
) # UTC, Needs timezone offset
# Hotfixes
software_hive_dict["hotfix"] = set(
hotfix.get_value("InstallName").split("_for_")[1].split("~")[0]
for hotfix in software_hive.get_key(
"Software\\Microsoft\\Windows\\CurrentVersion\\Component Based Servicing\\Packages"
).iter_subkeys()
if "_for_KB" in hotfix.get_value("InstallName")
and hotfix.get_value("CurrentState") == 112
) # 112 is successfully installed
software_hive_dict["hotfix"].update(
set(
hotfix.get_value("InstallLocation").split("-")[1]
for hotfix in software_hive.get_key(
"Software\\Microsoft\\Windows\\CurrentVersion\\Component Based Servicing\\Packages"
).iter_subkeys()
if "RollupFix" in hotfix.get_value("InstallName")
and hotfix.get_value("CurrentState") == 112
)
) # 112 is successfully installed
# OS Version
software_hive_dict["os_version"] = " ".join(
[
software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("ProductName"),
"N/A Build",
software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("CurrentBuild"),
]
)
# Registered organization
software_hive_dict["registered_organization"] = software_hive.get_key(
"Software\\Microsoft\\Windows NT\\CurrentVersion"
).get_value("RegisteredOrganization")
# Return results
return software_hive_dict
def parse_timezone_information(
system_hive: RegistryHive, software_hive: RegistryHive
) -> dict:
"""
Parse system and software hives and return needed information.
Parameters
----------
system_hive : RegistryHive
The system hive to parse
software_hive : RegistryHive
The software hive to parse
Returns
-------
dict
Dictionary with the information for systeminfo
"""
# Determine current control set
current_control_set = determine_current_control_set(system_hive)
# Timezone information
timezone_key_name = (
system_hive.get_key(f"{current_control_set}\\Control\\TimeZoneInformation")
.get_value("TimeZoneKeyName")
.replace(str(b"\x00"), "")
)
timezone_key_name = timezone_key_name[
: timezone_key_name.find("Time") + len("Time")
]
timezone_information = {
"timezone_desc": software_hive.get_key(
f"Software\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\\{timezone_key_name}"
).get_value("Display")
}
timezone_information["timezone_offset"] = (
timezone_information["timezone_desc"]
.replace("-", "+")
.split("+")[1]
.split(")")[0]
)
# Return results
return timezone_information
def parse_default_hive(default_hive: RegistryHive) -> dict:
"""
Parse default hive and return needed information.
Parameters
----------
default_hive : RegistryHive
The default hive to parse
Returns
-------
dict
Dictionary with the information for systeminfo
"""
default_hive_dict = {
"system_locale": ";".join(
[
default_hive.get_key(
".DEFAULT\\Control Panel\\International"
).get_value("LocaleName"),
default_hive.get_key(
".DEFAULT\\Control Panel\\International"
).get_value("sCountry"),
]
)
}
# Return results
return default_hive_dict
def main():
"""Find registry hives and invoke parsers."""
# Parse command line arguments
args = docopt(__doc__)
if not os.path.isdir(args["--mountpoint"]):
print(f'Error: {args["--mountpoint"]} is not a directory.')
sys.exit(1)
# Read registry hives
software_hive = None
system_hive = None
try:
# System hive
system_hive_full_path = os.path.join(
args["--mountpoint"], "Windows", "System32", "config", "SYSTEM"
)
if os.path.isfile(os.path.join(args["--mountpoint"], "SYSTEM")):
system_hive = RegistryHive(os.path.join(args["--mountpoint"], "SYSTEM"))
elif os.path.isfile(system_hive_full_path):
system_hive = RegistryHive(system_hive_full_path)
else:
print(
f'Error: Neither {os.path.join(args["--mountpoint"], "SYSTEM")} nor {system_hive_full_path} seem to be correct. Please set the mountpoint directly to the path for the registry hives.'
)
sys.exit(1)
# Software hive
software_hive_full_path = os.path.join(
args["--mountpoint"], "Windows", "System32", "config", "SOFTWARE"
)
if os.path.isfile(os.path.join(args["--mountpoint"], "SOFTWARE")):
software_hive = RegistryHive(os.path.join(args["--mountpoint"], "SOFTWARE"))
elif os.path.isfile(software_hive_full_path):
software_hive = RegistryHive(software_hive_full_path)
else:
print(
f'Error: Neither {os.path.join(args["--mountpoint"], "SOFTWARE")} nor {software_hive_full_path} seem to be correct. Please set the mountpoint directly to the path for the registry hives.'
)
sys.exit(1)
# Default hive
default_hive = None
default_hive_full_path = os.path.join(
args["--mountpoint"], "Windows", "System32", "config", "DEFAULT"
)
if os.path.isfile(os.path.join(args["--mountpoint"], "DEFAULT")):
default_hive = RegistryHive(os.path.join(args["--mountpoint"], "DEFAULT"))
elif os.path.isfile(
os.path.join(
args["--mountpoint"], "Windows", "System32", "config", "DEFAULT"
)
):
default_hive = RegistryHive(default_hive_full_path)
else:
print(
f'Warning: Neither {os.path.join(args["--mountpoint"], "DEFAULT")} nor {default_hive_full_path} seem to be correct. System locale will not be correct.'
)
except ConstError:
print("Invalid registry hives found.")
sys.exit(1)
# Call parsing methods
systeminfo = parse_system_hive(system_hive)
systeminfo.update(parse_software_hive(software_hive))
systeminfo.update(parse_timezone_information(system_hive, software_hive))
if default_hive:
systeminfo.update(parse_default_hive(default_hive))
# Prepare systeminfo-like output
output = f"""Host Name: {systeminfo['hostname'].upper()}
OS Name: {systeminfo['os_name']}
OS Version: {systeminfo['os_version']}
OS Manufacturer: Microsoft Corporation *
OS Configuration: Standalone Workstation *
OS Build Type: {systeminfo['os_build_type']}
Registered Owner: {systeminfo['registered_owner']}
Registered Organization: {systeminfo['registered_organization'] if systeminfo['registered_organization'] else ''}
Product ID: {systeminfo['product_id']}
Original Install Date: {(datetime.fromtimestamp(systeminfo['install_date']) + timedelta(hours=int(systeminfo['timezone_offset'].split(':')[0]), minutes=int(systeminfo['timezone_offset'].split(':')[1]))).strftime('%d-%m-%Y, %H:%M:%S')}
System Boot Time: 0-0-0000, 00:00:00
System Manufacturer: {systeminfo['manufacturer']}
System Model: {systeminfo['model']}
System Type: {systeminfo['type']}
Processor(s): 1 Processor(s) Installed.
[01]: {systeminfo['processors']}
BIOS Version: {systeminfo['bios_version']}
Windows Directory: {systeminfo['windows_directory']}
System Directory: {systeminfo['system_directory']}
Boot Device: {systeminfo['boot_device']}
System Locale: {systeminfo.get('system_locale', 'UNKNOWN')}
Input Locale: en-us;English (United States) *
Time Zone: {systeminfo['timezone_desc']}
Total Physical Memory: 0 MB
Available Physical Memory: 0 MB
virtual Memory: Max Size: {sum([int(size) for size in systeminfo['page_file_max_sizes']]):,}{' + x' if any([size for size in systeminfo['page_file_max_sizes'] if size == '0']) or not systeminfo['page_file_max_sizes'] else ''} MB
Vrtual Memory: Available: 0 MB
Virtual Memory: In Use: 0 MB
Page File Location(s): """
padding = ""
for page_file_location in systeminfo["page_file_locations"]:
output += f"{padding}{page_file_location}\n"
padding = " "
output += f"""Domain: {systeminfo['domain']}
Logon Server: \\\\UNKNOWN
Hotfix(s): {len(systeminfo['hotfix'])} Hotfix(s) Installed.
"""
for idx, hotfix in enumerate(systeminfo["hotfix"], start=1):
output += f" [{str(idx).zfill(2)}]: {hotfix}\n"
output += f'Network Card(s): {len(systeminfo["network_cards"])} NIC(s) Installed.'
for idx, network_card in enumerate(systeminfo["network_cards"].values(), start=1):
output += f"""
[{str(idx).zfill(2)}]: {network_card['desc']}
Connection Name: {network_card['connection_name']}
DHCP Enabled: {'Yes' if network_card['dhcp_activated'] else 'No'}
IP address(es)"""
for idx2, ip_address in enumerate(network_card["ip_addresses"], start=1):
output += f"\n [{str(idx2).zfill(2)}]: {ip_address}"
output += """
Hyper-V Requirements: VM Monitor Mode Extensions: UNKOWN
Virtualization Enabled In Firmware: UNKOWN
Second Level Address Translation: UNKOWN
Data Execution Prevention Available: UNKOWN
"""
print(output)
if __name__ == "__main__":
main()
``` |
{
"source": "jonasw234/TAINTalyzing",
"score": 3
} |
#### File: modules/c/grammar.py
```python
from __future__ import annotations
import logging
from typing import Generator
from pyparsing import *
from ..abstract_grammar import AbstractGrammar
class Grammar(AbstractGrammar):
"""Grammar definition for C files."""
logger = logging.getLogger('taintalyzing')
def __init__(self, file_):
"""Constructor for a grammar object.
Parameters
----------
file_ : InputFile
The file to parse
"""
super().__init__(file_)
ParserElement.enablePackrat()
# Helpers
self.attribute_separator = oneOf('. ->')
self.ident = Word(alphas, alphanums + '_')
self.ident = Combine(ZeroOrMore(self.ident + self.attribute_separator)('object_name*') +
self.ident('ident*'))
self.vartype = Suppress(Combine(Optional(oneOf('signed unsigned')) + self.ident +
Optional(Word('*')), adjacent=False))
self.array_index = '[' + Word(nums) + ']'
self.rvalue = Forward()
self.func_call = Forward()
self.operators = Suppress(oneOf('|| && | & ^ . -> + - * / % << >> == != < <= > >='))
self.expression = Group(self.rvalue + ZeroOrMore(self.operators + self.rvalue |
self.func_call))
self.expression |= Group(self.func_call + ZeroOrMore(self.operators + (self.rvalue |
self.func_call)))
self.stmt = Forward()
# Function calls
self.param_list = Optional(delimitedList(self.expression))
self.func_call << self.ident('name') + Suppress('(') + self.param_list('args') + \
Suppress(')')
# Control structures -> increase edge count
self.control_structures = ((Keyword('case') + self.expression + ':') |
(Keyword('default') + ':') |
(Keyword('while') + '(' + self.expression + ')') |
(Keyword('for') + '(' + Optional(self.expression) + ';' +
Optional(self.expression) + ';' +
Optional(self.expression) + ')') |
(Keyword('goto') + self.ident))('control_structure')
# Mutually exclusive combinations: else if-else if, else if-else, else-if, if-else if, if-else, if,
# else
self.mutually_exclusive_helper_expr = Suppress('(') + self.expression + Suppress(')')
self.mutually_exclusive_helper_body = Suppress('{') + ZeroOrMore(self.stmt) + Suppress('}')
self.mutually_exclusive_helper_body |= self.stmt
self.mutually_exclusive = (Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body))('alternative')
self.mutually_exclusive |= (Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else') +
self.mutually_exclusive_helper_body))('alternative')
self.mutually_exclusive |= (Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body)('alternative')
self.mutually_exclusive |= (Keyword('if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body))
self.mutually_exclusive |= (Keyword('if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else') + self.mutually_exclusive_helper_body))
self.mutually_exclusive |= (Keyword('if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body)
self.mutually_exclusive |= (Keyword('else') +
self.mutually_exclusive_helper_body)('alternative-end')
# Function body
self.prototype = Forward()
self.func_body = Group(OneOrMore(Group(SkipTo(self.stmt | self.control_structures,
failOn=self.prototype, include=True))))
# Assignments
self.assignment = self.ident('lvalue') + Optional(self.array_index) + \
Suppress(oneOf('= -= += ^= &= |= *= %= /=')) + self.expression('expression')
self.assignment |= self.vartype + self.assignment
# Return
self.return_ = Suppress(Keyword('return')) + self.rvalue('return_value')
# Statements
self.stmt << (self.func_call('func_call') | self.assignment('assignment') |
self.return_('return')) + Suppress(';')
self.rvalue << (self.func_call | self.ident + Optional(self.array_index) | Word(nums) |
quotedString)
# Function definitions
self.arg_list = Optional(delimitedList(Group(self.vartype + self.ident('name') +
Suppress(ZeroOrMore('[]')))))
self.prototype << self.vartype('type') + self.ident('name') + Suppress('(') + \
self.arg_list('args') + Suppress(')')
self.func_def = self.prototype + Suppress('{') + self.func_body('body') + Suppress('}')
self.func_def.ignore(cppStyleComment)
def get_statement_count(self, start: int, end: int) -> int:
"""Return the number of statements between `start` and `end`.
Statements are all lines that have an actual effect on the program flow, e.g. method calls
or loops.
Parameters
----------
start : int
The start column
end : int
The end column
Returns
-------
int
The number of statements between `start` and `end`.
"""
return len(list(self.control_structures.scanString(self.file_contents[start:end]))) + \
len(list(self.mutually_exclusive.scanString(self.file_contents[start:end]))) + \
len(list(self.stmt.scanString(self.file_contents[start:end])))
def get_edge_count(self, start: int, end: int) -> int:
"""Return the edge count between `start` and `end`.
Edges are all statements that can branch into two paths, e.g. loops, conditions etc.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
int
The edge count between `start` and `end`.
"""
# Loops have three edges: Going into the loop, skipping the loop and returning from the last
# position in the loop to the start of the loop
# Mutually exclusive blocks have two edges, entering or not entering them
return len(list(self.control_structures.scanString(self.file_contents[start:end]))) * 3 + \
len(list(self.mutually_exclusive.scanString(self.file_contents[start:end]))) * 2 + \
len(list(self.stmt.scanString(self.file_contents[start:end])))
def get_mutually_exclusive_positions(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for all mutually exclusive positions from `start` to `end`.
That is return the start and end position for all the statements where a mutually exclusive
block begins and where it ends.
Parameters
----------
start : int
The start column
end : int
The end column
Returns
-------
Generator
Generator for all mutually exclusive paths from `start` to `end`.
"""
return self.mutually_exclusive.scanString(self.file_contents[start:end])
def get_method_definitions(self) -> Generator[list, None, None]:
"""Return a generator for all methods with their bodies.
Returns
-------
Generator
Generator for all function definitions with their bodies
"""
return self.func_def.scanString(self.file_contents)
def get_method_calls(self, start, end) -> Generator[list, None, None]:
"""Return a generator for all function calls between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all function calls
"""
return self.func_call.scanString(self.file_contents[start:end])
def get_parameters(self, start: int, end: int) -> dict:
"""Return a dictionary of all parameters between `start` and `end` with their default value.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
dict
Dictionary with parameter: default value
"""
try:
args = self.prototype.parseString(self.file_contents[start:end]).get('args', [])
parameters = dict()
for parameter in args:
parameters[parameter['name']] = None # There are no default values in C
return parameters
except ParseException:
Grammar.logger.error('Tried to parse parameters in "{file}", but no match at start '
'column {start}.', file=self.file_.path, start=start)
def get_declarations(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for variable declarations between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all declarations
"""
declarations = Suppress(self.ident) + self.ident + Suppress(Optional(self.array_index)) + \
Suppress(';')
return declarations.scanString(self.file_contents[start:end])
def get_global_variables(self) -> list:
"""Return a list of all global variables.
Returns
-------
list
List of all global variables
"""
# First step: Find all the functions
func_defs = self.get_method_definitions()
func_defs_positions = [(function[1], function[2]) for function in func_defs]
# Second step: Globals are by definition outside of functions
start = -1
outside_func_defs = []
for position in func_defs_positions:
outside_func_defs.append([start + 1, position[0] - 1 if position[0] > 0 else 0])
start = position[1]
if start + 1 <= len(self.file_contents):
outside_func_defs.append([start + 1, len(self.file_contents)])
# Third step: Find declarations and assignments in these regions
globals_ = list()
for start, end in outside_func_defs:
assignments = list(self.get_assignments(start, end))
assignments = [assignment[0] for assignment in assignments]
globals_.extend(assignments)
globals_.extend(list(self.get_declarations(start, end)))
return globals_
def get_assignments(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for all assignments betweeen `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all assignments
"""
return self.assignment.scanString(self.file_contents[start:end])
def get_control_structures(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for all control structures between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all control structures
"""
return self.control_structures.scanString(self.file_contents[start:end])
def get_returns(self, start, end) -> Generator[list, None, None]:
"""Return a generator for all return values between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all return values
"""
return self.return_.scanString(self.file_contents[start:end])
```
#### File: testfiles/parser/python_default_parameters.py
```python
class DefaultParameters:
def __init__(self, a=5):
self.a = a
```
#### File: testfiles/parser/python_detect_classes.py
```python
class ClassName:
def __init__(self):
self.value = 5
class Another_Class_Name(ClassName):
def __init__(self):
self.value = 7
```
#### File: testfiles/parser/python_multiple_functions.py
```python
def test_function(z: int) -> int:
c = z
"""Multiline comment
test."""
print(c)
if c * 5:
print(z // 4) # Test
return 'a'
b = 5
def next_function(a, b, c):
print(a * b * c)
```
#### File: TAINTalyzing/Projekt/test.py
```python
from __future__ import annotations
from copy import copy
import logging
import os
import unittest
from main import find_files
from analysis import Analysis
from input_file import InputFile
from ruleset import Ruleset
from modules.c.grammar import Grammar as CGrammar
from modules.php.grammar import Grammar as PHPGrammar
from modules.python.grammar import Grammar as PythonGrammar
from method import Method
from sink import Sink
from source import Source
def replace_sink_rules(ruleset: Ruleset, new_rules: list):
"""Replace existing sink rules from a ruleset with `new_rules`.
Parameters
----------
ruleset : Ruleset
Replace the rules in this ruleset
new_rules : dict
New rules to replace the old ones.
"""
ruleset.sinks = []
for rule in new_rules:
sink_ = Sink(rule)
ruleset.sinks.append(sink_)
def replace_source_rules(ruleset: Ruleset, new_rules: list):
"""Replace existing source rules from a ruleset with `new_rules`.
Parameters
----------
ruleset : Ruleset
Replace the rules in this ruleset
new_rules : dict
New rules to replace the old ones.
"""
ruleset.sources = []
for rule in new_rules:
source_ = Source(rule)
ruleset.sources.append(source_)
class TestMain(unittest.TestCase):
"""Test cases for main module."""
def test_find_files(self):
"""Check if all files are found."""
should_find = [os.sep.join(['testfiles', 'main', 'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'unix.txt']),
os.sep.join(['testfiles', 'main', 'exclude-subdirectory',
'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'exclude-subdirectory',
'excluded-file.txt']),
os.sep.join(['testfiles', 'main', 'exclude-subdirectory', 'excluded-dir',
'excluded-subdir-file']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'dos.txt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'file.swp']),
]
self.assertListEqual(should_find, list(find_files(os.sep.join(['testfiles', 'main']))))
def test_find_files_exclude(self):
"""Test if single excludes work."""
should_find = [os.sep.join(['testfiles', 'main', 'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'unix.txt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'dos.txt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'file.swp']),
]
self.assertListEqual(should_find, list(find_files(os.sep.join(['testfiles', 'main']),
exclude=['exclude-subdirectory'])))
def test_find_files_multiple_excludes(self):
"""Test if multiple excludes work independently from each other."""
should_find = [os.sep.join(['testfiles', 'main', 'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'unix.txt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'binary-file.odt']),
os.sep.join(['testfiles', 'main', 'subdirectory', 'dos.txt']),
]
self.assertListEqual(should_find, list(find_files(os.sep.join(['testfiles', 'main']),
exclude=['exclude-subdirectory',
r'\.swp$'])))
def test_find_files_exclude_all(self):
"""Test if no error is thrown if everything is excluded."""
should_find = []
self.assertListEqual(should_find, list(find_files(os.sep.join(['testfiles', 'main']),
exclude=[''])))
class TestInputFile(unittest.TestCase):
"""Test cases for InputFile module."""
def test_read_file_ascii_dos(self):
"""Check if an ASCII file with DOS line endings can be read."""
input_file = InputFile(os.sep.join(['testfiles', 'main', 'subdirectory', 'dos.txt']))
self.assertEqual('file contents!\n', input_file.read_file())
def test_read_file_ascii_utf8_unix(self):
"""Check if an ASCII, utf8 encoded file with Unix line endings can be read."""
input_file = InputFile(os.sep.join(['testfiles', 'main', 'unix.txt']))
self.assertEqual('file with unix line endings\n', input_file.read_file())
def test_read_binary_file(self):
"""Make sure binary files are handled gracefully."""
input_file = InputFile(os.sep.join(['testfiles', 'main', 'binary-file.odt']))
try:
input_file.read_file()
except:
self.fail()
def test_extension_detection(self):
"""Make sure files can be detected based on extension alone."""
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection', 'empty.cpp']))
input_file.detect_filetype()
self.assertEqual(input_file.module, 'cpp')
def test_magic_detection(self):
"""Make sure files can be detected based on the magic number."""
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection', 'python-magic.py']))
input_file.detect_filetype()
self.assertEqual(input_file.module, 'python')
def test_multiple_magic_detection(self):
"""Make sure files can be correctly detected even if multiple magic numbers match."""
# C++ files are detected as C files, so multiple matches, but single match for file extension
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection',
'multiple-magic.cpp']))
input_file.detect_filetype()
self.assertEqual(input_file.module, 'cpp')
def test_heuristic_detection(self):
"""Make sure heuristic correctly identifies files based on majority of recognized filetypes
in the same directory."""
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection', 'project-folder',
'include_statement']))
self.assertEqual(input_file.detect_heuristic(), 'c')
def test_heuristic_detection_empty(self):
"""Make sure heuristic detection just returns an empty string when no modules can be
identified for any of the files in the same directory.
"""
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection', 'unknown-project',
'include_statement']))
self.assertEqual(input_file.detect_heuristic(), '')
def test_heuristic_detection_respect_possibilities(self):
"""Make sure that the heuristic detection respect the list of possible detections from other
detection methods.
"""
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection', 'project-folder',
'include_statement']))
# Force invalid possibilities to check if heuristic detection overrides it
self.assertEqual(input_file.detect_heuristic([('python', ['.py'], 'Python script')]), "")
def test_fallback_detection(self):
"""Make sure fallback detection works if nothing else matches."""
input_file = InputFile(os.sep.join(['testfiles', 'filetype_detection',
'unknown_extension.python']), 'python')
input_file.detect_filetype()
self.assertEqual(input_file.module, 'python')
class TestCGrammar(unittest.TestCase):
"""Test cases for C grammar file for parsing C files."""
def test_single_instruction(self):
"""Make sure files with a single instruction in the method body are parsed correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_single_instruction.c']))
grammar = CGrammar(input_file)
parsetree = next(grammar.get_method_definitions())
self.assertEqual(parsetree[0]['name']['ident'][0], 'main')
self.assertEqual(parsetree[0]['args'][0]['name']['ident'][0], 'argc')
self.assertEqual(parsetree[0]['args'][1]['name']['ident'][0], 'argv')
self.assertEqual(parsetree[0]['body'][0]['name']['ident'][0], 'printf')
def test_multiple_instructions(self):
"""Make sure multiple instructions in the method body are parsed correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_multiple_instructions.c']))
grammar = CGrammar(input_file)
parsetree = next(grammar.get_method_definitions())
for idx, method in enumerate(parsetree[0]['body']):
self.assertEqual(method['name']['ident'][0], 'printf' if idx == 0 else 'add')
def test_unknown_tokens(self):
"""Make sure unknown tokens don't confuse the parser."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_unknown_tokens.c']))
grammar = CGrammar(input_file)
parsetree = next(grammar.get_method_definitions())
self.assertEqual(parsetree[0]['name']['ident'][0], 'main')
self.assertEqual(parsetree[0]['body'][0]['name']['ident'][0], 'printf')
self.assertEqual(parsetree[0]['body'][1]['lvalue']['ident'][0], 'i')
self.assertEqual(len(parsetree[0]['body'][3]['args']), 3)
self.assertEqual(parsetree[0]['body'][3]['name']['ident'][0], 'printf')
def test_multiple_functions(self):
"""Make sure multiple functions are detected correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_multiple_functions.c']))
grammar = CGrammar(input_file)
definitions = grammar.get_method_definitions()
parsetree_first_function = next(definitions)
self.assertEqual(parsetree_first_function[0]['name']['ident'][0], 'add')
self.assertEqual(parsetree_first_function[0]['body'][1]['return_value'], 'c')
parsetree_second_function = next(definitions)
self.assertEqual(parsetree_second_function[0]['body'][1]['name']['ident'][0], 'printf')
self.assertEqual(len(parsetree_second_function[0]['body'][1]['args']), 2)
def test_identify_returns(self):
"""Make sure multiple returns are detected correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_multiple_returns.c']))
grammar = CGrammar(input_file)
returns = grammar.get_returns(input_file.line_to_column(2), input_file.line_to_column(4))
first_return = next(returns)
self.assertEqual(first_return[0]['return_value'], '1')
second_return = next(returns)
self.assertEqual(second_return[0]['return_value'], '0')
def test_assignments(self):
"""Make sure assignments are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_multiple_functions.c']))
grammar = CGrammar(input_file)
assignments = grammar.get_assignments(60, 166)
first = next(assignments)
self.assertEqual(first[0]['lvalue']['ident'][0], 'a')
self.assertEqual(first[0]['expression'][0]['name']['ident'][0], 'add')
self.assertEqual(first[0]['expression'][0]['args'][0][0], '2')
self.assertEqual(first[0]['expression'][0]['args'][1][0], '3')
second = next(assignments)
self.assertEqual(second[0]['lvalue']['ident'][0], 'a')
self.assertEqual(second[0]['expression'][0][0], '5')
def test_follow_variables(self):
"""Make sure that variables are followed correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_multiple_functions.c']))
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
method = Method(60, 167, {'ident': ['main']})
dataflow = analysis.follow_variables(method)
for variable, flow in dataflow.items():
if variable[1] == 'argc':
self.assertListEqual(flow, [[{'ident': ['argc'], 'lvalue': ['argc'], 'expression':
[{'name': {'ident': [None]}}]}, 0, 0]])
elif variable[1] == 'argv':
self.assertListEqual(flow, [[{'ident': ['argv'], 'lvalue': ['argv'], 'expression':
[{'name': {'ident': [None]}}]}, 0, 0]])
elif variable[1] == 'a':
self.assertEqual(len(flow), 3)
self.assertEqual(flow[1][0]['name']['ident'][0], 'printf')
else:
self.fail(f'Unknown variable: {variable[1]}')
def test_detect_parameters(self):
"""Make sure parameters and default values are detected correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'c_single_instruction.c']))
grammar = CGrammar(input_file)
parameters = grammar.get_parameters(0, 64)
for parameter, value in parameters.items():
if parameter['ident'][0] == 'argc':
self.assertEqual(value, None)
elif parameter['ident'][0] == 'argv':
self.assertEqual(value, None)
else:
self.fail('Unknown parameter.')
class TestPHPGrammar(unittest.TestCase):
"""Test cases for PHP grammar file for parsing PHP files."""
def test_detect_parameters(self):
"""Make sure parameters and default values are detected correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'php_default_parameters.php']))
grammar = PHPGrammar(input_file)
parameters = grammar.get_parameters(6, 94)
for parameter, value in parameters.items():
if parameter['ident'][0] == 'typ':
self.assertEqual(value, '"Cappuccino"')
else:
self.fail('Unknown parameter.')
def test_multiple_instructions(self):
"""Make sure multiple instructions in the method body are parsed correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser',
'php_multiple_instructions.php']))
grammar = PHPGrammar(input_file)
parsetree = next(grammar.get_method_definitions())
self.assertEqual(parsetree[0]['name']['ident'][0], 'divideNumbers')
self.assertEqual(len(parsetree[0]['args']), 2)
self.assertEqual(parsetree[0]['args'][1]['name']['ident'][0], 'divisor')
self.assertListEqual(parsetree[0]['body'][0]['expression'].asList(), [['$dividend',
'$divisor']])
self.assertEqual(parsetree[0]['body'][1]['lvalue']['ident'][0], 'array')
def test_find_classes(self):
"""Make sure classes are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'php_detect_classes.php']))
grammar = PHPGrammar(input_file)
self.assertDictEqual(grammar.get_class_definitions(), {'MyClass': 6, 'MyOtherClass': 136})
class TestPythonGrammar(unittest.TestCase):
"""Test cases for Python grammar file for parsing Python files."""
def test_detect_parameters(self):
"""Make sure parameters and default values are detected correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'python_default_parameters.py']))
grammar = PythonGrammar(input_file)
parameters = grammar.get_parameters(29, 73)
for parameter, value in parameters.items():
if parameter['ident'][0] == 'a':
self.assertEqual(value, '5')
else:
self.fail('Unknown parameter')
def test_multiple_instructions(self):
"""Make sure multiple instructions in the method body are parsed correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'python_multiple_functions.py']))
grammar = PythonGrammar(input_file)
defs = grammar.get_method_definitions()
parsetree_first_function = next(defs)
self.assertEqual(parsetree_first_function[0]['name']['ident'][0], 'test_function')
self.assertEqual(parsetree_first_function[0]['body'][0][1]['name']['ident'][0], 'print')
self.assertEqual(parsetree_first_function[0]['body'][0][3]['return_value'], "'a'")
parsetree_second_function = next(defs)
self.assertListEqual(parsetree_second_function[0]['body'][0][0]['args'][0][0].asList(),
['a', 'b', 'c'])
def test_find_classes(self):
"""Make sure classes are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'parser', 'python_detect_classes.py']))
grammar = PythonGrammar(input_file)
self.assertDictEqual(grammar.get_class_definitions(), {'ClassName': 48,
'Another_Class_Name': 113})
class TestRuleset(unittest.TestCase):
"""Test cases for rulesets."""
def test_load_sinks(self):
"""Make sure that sinks are loaded successfully."""
ruleset = Ruleset('c')
sinks = ruleset.sinks
for sink in sinks:
methods = sink.methods
for method in methods:
if method['Methodname'] == 'strcpy':
self.assertListEqual(method['Parameters'], [None, '$TAINT'])
self.assertNotEqual(method['Comment'], '')
break
def test_load_sanitizers(self):
"""Make sure that sanitizers are loaded successfully."""
ruleset = Ruleset('c')
replace_sink_rules(
ruleset,
[{None: {
'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing purposes '
'only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing purposes '
'only.'}]}}]}]}}])
sinks = ruleset.sinks
for sink in sinks:
methods = sink.methods
for method in methods:
if method['Methodname'] == 'printf':
self.assertEqual(len(method['Sanitizers'][0].methods), 2)
self.assertEqual(method['Sanitizers'][0].methods[1]['Methodname'], 'test2')
break
def test_load_sources(self):
"""Make sure that sources are loaded successfully."""
ruleset = Ruleset('c')
sources = ruleset.sources
for source in sources:
methods = source.methods
for method in methods:
if method['Methodname'] == 'scanf':
self.assertEqual(len(method['Parameters']), 2)
break
class TestAnalysis(unittest.TestCase):
"""Test cases for analyses."""
def test_find_methods(self):
"""Make sure all methods are recognized correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'format-string.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
if method.method_name['ident'][0] == 'vuln':
self.assertEqual(method.start, 20)
self.assertEqual(method.end, 67)
elif method.method_name['ident'][0] == 'main':
self.assertEqual(method.start, 69)
self.assertEqual(method.end, 283)
else:
self.fail('Unknown method found.')
def test_find_variable_simple(self):
"""Make sure that simple variable sources are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'simple-source.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
method = Method(20, 286, {'ident': ['main']})
analysis.follow_variables(method)
trail = analysis.find_variable_source(method, None, 'userControlledToo', 202)
self.assertEqual(len(trail), 4)
self.assertEqual(trail[0][1], 38)
self.assertEqual(trail[1][1], 105)
self.assertEqual(trail[2][1], 138)
self.assertEqual(trail[3][1], 202)
def test_find_variable_source(self):
"""Make sure that variable sources over multiple functions are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis',
'variable-source-multiple-functions.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
method = Method(20, 114, {'ident': ['vuln']})
analysis.follow_variables(method)
trail = analysis.find_variable_source(method, None, 'userInputUsed', 93)
self.assertEqual(len(trail), 3)
self.assertEqual(trail[0][1], 0)
self.assertEqual(trail[1][1], 34)
self.assertEqual(trail[2][1], 72)
def test_find_sources(self):
"""Make sure that sources are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'format-string.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
sources = analysis.find_sources(Method(69, 283, {'ident': ['main']}))
for source_, calls in sources.items():
if source_.object_name is None:
self.assertListEqual(source_.methods, [{'Methodname': 'scanf', 'Parameters':
[None, '$TAINT'], 'Comment':
'Reads formatted input from stdin'}])
self.assertEqual(calls[0][0]['name']['ident'][0], 'scanf')
self.assertEqual(calls[0][1], 105)
self.assertEqual(calls[0][2], 132)
else:
self.fail('Unknown source found.')
def test_find_sinks(self):
"""Make sure that sinks are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'format-string.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
sinks = analysis.find_sinks(Method(20, 67, {'ident': ['vuln']}))
for sink_, calls in sinks.items():
if sink_.object_name is None:
for method in sink_.methods:
if method['Methodname'] == 'printf':
self.assertListEqual(method['Parameters'], ['$TAINT'])
self.assertEqual(calls[0][0]['name']['ident'][0], 'printf')
self.assertEqual(len(calls[0][0]['args']), 1)
self.assertEqual(calls[0][1], 30)
self.assertEqual(calls[0][2], 43)
continue
self.fail('Unknown sink found.')
def test_find_sanitizers(self):
"""Make sure that sanitizers are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles' + os.sep + 'analysis' + os.sep +
'sanitizer.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
method = Method(20, 79, {'ident': ['vuln']})
analysis.find_sinks(method)
sanitizers = analysis.find_sanitizers(method)
for sanitizer_, calls in sanitizers.items():
if sanitizer_.object_name is None:
self.assertEqual(calls[0][0]['name']['ident'][0], 'test')
self.assertEqual(calls[0][1], 30)
self.assertEqual(calls[0][2], 36)
def test_find_simple_taints(self):
"""Make sure taints are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'simple-taint.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
method = Method(20, 286, {'ident': ['main']})
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
taints = method.taints
self.assertEqual(len(taints), 1)
for data in taints.values():
self.assertEqual(data[0]['Comment'], 'Format string vulnerability.')
self.assertEqual(data[0]['Position'], (205, 230))
def test_find_sanitized_simple_taints(self):
"""Make sure that sanitized sinks are also identified as taints with a reference to the
sanitizer.
"""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'sanitized-taint.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
method = Method(20, 286, {'ident': ['main']})
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
taints = method.taints
self.assertEqual(len(taints), 1)
for sinks in taints.values():
self.assertEqual(sinks[0]['Sanitizer'].methods[0]['Methodname'], 'test')
def test_follow_sanitizers(self):
"""Make sure that sanitizers are followed across method calls."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis',
'follow-sanitizer-functions.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
for method in analysis.methods:
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
# Sanitizer should have been added and no unsanitized taints should remain
self.assertEqual(len(analysis.methods[1].sanitizers), 1)
for method in analysis.methods:
if method.method_name['ident'][0] == 'sanitize':
self.assertDictEqual(method.taints, {})
else:
self.assertEqual(len(method.taints), 1)
for sinks in method.taints.values():
for sink in sinks:
self.assertNotEqual(sink.get('Sanitizer'), None)
self.assertEqual(sink['Sanitizer'].level, 1)
def test_follow_sources_parameters(self):
"""Make sure that methods which have parameters that are used as arguments to sources are
added as sources themselves.
"""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'follow-source-functions.c']),
'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
def test_follow_sources_returns(self):
"""Make sure that methods which have returns based on sources are added as sources
themselves.
"""
input_file = InputFile(os.sep.join(['testfiles', 'analysis',
'follow-source-functions-return.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
def test_follow_sources_direct_returns(self):
"""Make sure that methods which have returns based on sources are added as sources
themselves.
"""
input_file = InputFile(os.sep.join(['testfiles', 'analysis',
'follow-source-functions-direct-return.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
def test_follow_taints(self):
"""Make sure that taints are followed across method calls."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'follow-taint-functions.c']),
'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
replace_sink_rules(
analysis.ruleset,
[{None: {'Methods': [{'Methodname': 'printf',
'Parameters': ['$TAINT'],
'Comment': 'Format string vulnerability.',
'Sanitizers': [{None: {'Methods': [{'Methodname': 'test',
'Parameters': [],
'Comment': 'For testing '
'purposes only.'},
{'Methodname': 'test2',
'Parameters': [None],
'Comment': 'For testing '
'purposes '
'only.'}]}}]}]}}])
for method in analysis.methods:
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
def test_follow_taints_classes(self):
"""Make sure that taints are followed across classes."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'follow-taint-classes.php']),
'php')
grammar = PHPGrammar(input_file)
analysis = Analysis(grammar, Ruleset('php'))
replace_sink_rules(analysis.ruleset,
[{None: {
'Methods': [{'Methodname': 'eval',
'Parameters': ['$TAINT'],
'Comment': 'Arbitrary code execution.'}]}}])
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
def test_exclusive_path(self):
"""Make sure a single exclusive path is recognized."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'exclusive-path.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.find_paths_through(method)
self.assertListEqual(analysis.methods[0].paths, [
[(20, 59), (59, 90), (220, 236)],
[(20, 59), (90, 135), (220, 236)],
[(20, 59), (135, 182), (220, 236)],
[(20, 59), (182, 220), (220, 236)]])
def test_exclusive_paths(self):
"""Make sure exclusive paths are recognized."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'exclusive-paths.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.find_paths_through(method)
self.assertListEqual(analysis.methods[0].paths, [
[(20, 59), (59, 89), (89, 126), (126, 164), (206, 252), (252, 283), (386, 403)],
[(20, 59), (59, 89), (89, 126), (126, 164), (206, 252), (283, 320), (386, 403)],
[(20, 59), (59, 89), (89, 126), (126, 164), (206, 252), (320, 357), (386, 403)],
[(20, 59), (59, 89), (89, 126), (126, 164), (206, 252), (357, 386), (386, 403)],
[(20, 59), (59, 89), (89, 126), (164, 206), (206, 252), (252, 283), (386, 403)],
[(20, 59), (59, 89), (89, 126), (164, 206), (206, 252), (283, 320), (386, 403)],
[(20, 59), (59, 89), (89, 126), (164, 206), (206, 252), (320, 357), (386, 403)],
[(20, 59), (59, 89), (89, 126), (164, 206), (206, 252), (357, 386), (386, 403)],
[(20, 59), (59, 89), (89, 126), (206, 206), (206, 252), (252, 283), (386, 403)],
[(20, 59), (59, 89), (89, 126), (206, 206), (206, 252), (283, 320), (386, 403)],
[(20, 59), (59, 89), (89, 126), (206, 206), (206, 252), (320, 357), (386, 403)],
[(20, 59), (59, 89), (89, 126), (206, 206), (206, 252), (357, 386), (386, 403)],
[(20, 59), (89, 89), (89, 126), (126, 164), (206, 252), (252, 283), (386, 403)],
[(20, 59), (89, 89), (89, 126), (126, 164), (206, 252), (283, 320), (386, 403)],
[(20, 59), (89, 89), (89, 126), (126, 164), (206, 252), (320, 357), (386, 403)],
[(20, 59), (89, 89), (89, 126), (126, 164), (206, 252), (357, 386), (386, 403)],
[(20, 59), (89, 89), (89, 126), (164, 206), (206, 252), (252, 283), (386, 403)],
[(20, 59), (89, 89), (89, 126), (164, 206), (206, 252), (283, 320), (386, 403)],
[(20, 59), (89, 89), (89, 126), (164, 206), (206, 252), (320, 357), (386, 403)],
[(20, 59), (89, 89), (89, 126), (164, 206), (206, 252), (357, 386), (386, 403)],
[(20, 59), (89, 89), (89, 126), (206, 206), (206, 252), (252, 283), (386, 403)],
[(20, 59), (89, 89), (89, 126), (206, 206), (206, 252), (283, 320), (386, 403)],
[(20, 59), (89, 89), (89, 126), (206, 206), (206, 252), (320, 357), (386, 403)],
[(20, 59), (89, 89), (89, 126), (206, 206), (206, 252), (357, 386), (386, 403)]])
def test_mutually_exclusive_taint(self):
"""Make sure that mutually exclusive blocks are recognized during taint analysis."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'mutually-exclusive-taint.c']),
'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
all_sources = analysis.find_sources(method)
all_sinks = analysis.find_sinks(method)
analysis.find_paths_through(method)
taints = set()
for path in method.paths:
method.sources = copy(all_sources)
method.sinks = copy(all_sinks)
taints.update(analysis.find_taints(method, path))
self.assertEqual(len(taints), 0)
def test_calculate_complexity(self):
"""Make sure that cyclomatic complexity is calculated correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'euclid-complexity.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.calculate_complexity(method)
self.assertEqual(analysis.methods[0].complexity, 5)
def test_two_sinks(self):
"""Make sure two sinks of the same type are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'two-sinks.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
sinks = []
for method in analysis.methods:
sinks.append(analysis.find_sinks(method))
count = 0
for sink in sinks:
for calls in sink.values():
for _ in calls:
count += 1
self.assertEqual(count, 2)
def test_getenv_sprintf(self):
"""Make sure that the combination of a returned source and a sink with the third parameter
vulnerable on only one path is detected correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'getenv-sprintf.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[0].taints), 1)
def test_subcall(self):
"""Make sure that calls inside expressions are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'subcall.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[0].taints), 1)
def test_subsubcall(self):
"""Make sure that subcalls inside expressions are identified correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'subsubcall.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
def test_subsubcall_two_parameters(self):
"""Make sure that subcalls inside expressions at different positions are identified
correctly.
"""
input_file = InputFile(os.sep.join(['testfiles', 'analysis',
'subsubcall-two-parameters.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[2].taints), 1)
def test_subsubcall_harmless(self):
"""Make sure that harmless subcalls inside expressions are not identified as taints."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'subsubcall-harmless.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[2].taints), 0)
def test_complex_php(self):
"""Make sure that complex PHP files are analyzed correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'complex.php']), 'php')
grammar = PHPGrammar(input_file)
analysis = Analysis(grammar, Ruleset('php'))
try:
for method in analysis.methods:
analysis.follow_variables(method)
analysis.fix_object_names(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
except:
self.fail("Had trouble with at least one of the steps.")
def test_global_taint(self):
"""Make sure globals are recognized correctly."""
input_file = InputFile(os.sep.join(['testfiles', 'analysis', 'global-taint.c']), 'c')
grammar = CGrammar(input_file)
analysis = Analysis(grammar, Ruleset('c'))
for method in analysis.methods:
analysis.follow_variables(method)
analysis.find_sources(method)
analysis.find_sinks(method)
analysis.find_sanitizers(method)
analysis.find_taints(method)
self.assertEqual(len(analysis.methods[1].taints), 1)
# We can't know whether global variables have been changed anywhere, so we always have to
# interpret them as user controlled
self.assertEqual(len(analysis.methods[2].taints), 1)
if __name__ == '__main__':
logging.getLogger('taintalyzing')
logging.basicConfig(level=logging.ERROR)
unittest.main()
``` |
{
"source": "JonasWard/ClayAdventures",
"score": 2
} |
#### File: clay_bricks/PatternBrickLibrary/base_mesh.py
```python
import Rhino.Geometry as rg
import vertexClass_v1 as vc
class MeshObject:
TRI = True
def __init__(self, plane, width, height, spacing):
self.pln = plane
self.w = width
self.h = height
self.s = spacing
self.x = int(width / spacing)
self.y = int(height / spacing)
self.front_pts = []
self._front_init = False
self.back_pts = []
def construct_pts(self):
b_pt = self.pln.Origin
x_a = self.pln.XAxis
y_a = self.pln.YAxis
z_a = self.pln.ZAxis
for i in range(self.x + 1):
for j in range(self.y + 1):
self.front_pts.append(b_pt + x_a * (i - .5 * self.x) * self.s + y_a * j * self.s )
for i in range(self.x + 1):
for j in range(self.y + 1):
self.back_pts.append(b_pt + x_a * (i - .5 * self.x) * self.s + y_a * j * self.s + z_a * self.s )
def add_front_pts(self, pts, inverse = False):
if inverse:
self.front_pts = self._invert_points(pts)
else:
self.front_pts = pts
self._front_init = True
def adjusting_front_pts(self):
# adjusting the front points based on their distance to the base plane
if self._front_init:
distances = []
b_pts = []
for pt in self.front_pts:
c_pt = self.pln.ClosestPoint(pt)
b_pts.append(c_pt)
distances.append(rg.Vector3d.Multiply(rg.Vector3d(pt - c_pt),self.pln.ZAxis))
self.front_pts = []
min_val = min(distances)
print(min_val)
for i, b_pt in enumerate(b_pts):
self.front_pts.append(b_pt + (distances[i] - min_val) * self.pln.ZAxis)
else:
print("Front has not been set")
def construct_layers(self):
layer_set = []
m = self.x + 1
n = self.y + 1
for i in range(self.y + 1):
y_val = i * self.s
local_layer = []
for j in range(self.x + 1):
x_val = j * self.s
loc_pt = self.front_pts[i + j * n]
loc_vertex = vc.Vertex(loc_pt, -self.pln.ZAxis, x_val, y_val)
local_layer.append(loc_vertex)
layer_set.append(local_layer)
return layer_set
def _invert_points(self, pt_list):
m = self.x + 1
inverse_pt_list = [[] for i in range(m)]
for idx, pt in enumerate(pt_list):
m_val = idx % m
n_val = int ( (idx - m_val) / m )
inverse_pt_list[m_val].append(pt)
n_pt_list = []
for pt_set in inverse_pt_list:
n_pt_list.extend(pt_set)
return n_pt_list
def construct_graph(self):
cnt = len(self.back_pts)
face_list = []
if MeshObject.TRI:
for i in range(self.x):
id_x_a = i * (self.y + 1)
id_x_b = (i + 1) * (self.y + 1)
for j in range(self.y):
print(id_x_a + j, id_x_a + j + 1, id_x_b + j + 1, id_x_b + j )
face_list.append([id_x_a + j, id_x_a + j + 1, id_x_b + j + 1])
face_list.append([id_x_a + j, id_x_b + j + 1, id_x_b + j])
face_list.append([id_x_b + j + cnt, id_x_b + j + 1 + cnt, id_x_a + j + 1 + cnt, id_x_a + j + cnt])
else:
for i in range(self.x):
id_x_a = i * (self.y + 1)
id_x_b = (i + 1) * (self.y + 1)
for j in range(self.y):
print(id_x_a + j, id_x_a + j + 1, id_x_b + j + 1, id_x_b + j )
face_list.append([id_x_a + j, id_x_a + j + 1, id_x_b + j + 1, id_x_b + j])
face_list.append([id_x_b + j + cnt, id_x_b + j + 1 + cnt, id_x_a + j + 1 + cnt, id_x_a + j + cnt])
for i in range(self.x):
id_x_a = i * (self.y + 1)
id_x_b = (i + 1) * (self.y + 1)
face_list.append([id_x_a, id_x_b, cnt + id_x_b, cnt + id_x_a])
face_list.append([cnt + id_x_a + self.y, cnt + id_x_b + self.y, id_x_b + self.y, id_x_a + self.y])
for i in range(self.y):
face_list.append([cnt + i, cnt + i + 1, i + 1, i])
face_list.append([(self.y + 1) * self.x + i, (self.y + 1) * self.x + i + 1, (self.y + 1) * self.x + cnt + i + 1, (self.y + 1) * self.x + cnt + i])
return face_list
def construct_mesh(self):
msh = rg.Mesh()
if self._front_init:
# initializing the points
for pt in self.front_pts + self.back_pts:
msh.Vertices.Add(pt)
for f in self.construct_graph():
if len(f) == 4:
msh.Faces.AddFace(f[0], f[1], f[2], f[3])
elif len(f) == 3:
msh.Faces.AddFace(f[0], f[1], f[2])
return msh
else:
print("Front has not been set")
return msh
```
#### File: clay_bricks/PatternBrickLibrary/class_pass_testing.py
```python
class Pattern(object):
def __init__(self, a, b):
self.a = a
self.b = b
def operateOn(self, vertex):
vertex.x += self.a
vertex.y += self.b
class Vertex(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
str_set = [
'this this is a vertex with x value:',
str(self.x),
'and y value:',
str(self.y),
]
return ' '.join(str_set)
def applyPattern(self, pattern):
pattern.operateOn(self)
ver = Vertex(1, 2)
pat = Pattern(.1, .2)
print(ver)
ver.applyPattern(pat)
print(ver)
```
#### File: PatternBrickLibrary/clusterFromCurves/vertex.py
```python
import Rhino.Geometry as rg
import math
def signed_vector_angle(a, b):
return math.atan2( a.X*b.Y - a.Y*b.X, a.X*b.X + a.Y*b.Y )
def positive_vector_angle(a, b):
angle = signed_vector_angle(a, b)
if angle > 0:
return angle
else:
return 6.2831853072 + angle
class Vertex:
MAX_A = 1.8
DIS = 4.0
def __init__(self, location):
self.location = rg.Point3d(location)
self.neighbours = []
self.vectors = []
self.n_locations = []
@property
def count(self):
return len(self.neighbours)
def add_neighbour(self, other_vertex):
self.neighbours.append(other_vertex)
def sort_neighbours(self):
angles = [n.angle(self) for n in self.neighbours]
print(angles)
print(self.neighbours)
angles, self.neighbours = list(zip(*sorted(zip(angles, self.neighbours) ) ) )
print(angles)
print(self.neighbours)
def angle(self, other):
n_vec = rg.Vector3d(self.location - other.location)
return positive_vector_angle(rg.Vector3d.XAxis, n_vec)
def construct_dir_vectors(self):
if self.count == 0:
b_vec = rg.Vector3d(Vertex.DIS, 0, 0)
self.interpolate_vectors_end(b_vec)
elif self.count == 1:
b_vec = rg.Vector3d(self.neighbours[0].location - self.location)
self.vectors.append(b_vec * .5)
self.interpolate_vectors_end(b_vec)
else:
for i in range(self.count):
v_0 = rg.Vector3d(.5 * (self.neighbours[i].location - self.location) )
v_1 = rg.Vector3d(.5 * (self.neighbours[(i + 1) % self.count].location - self.location) )
self.interpolate_vectors(v_0, v_1)
print("vector count : {}".format(len(self.vectors)))
def interpolate_vectors(self, v_0, v_1):
# initialize the first vectors of the two as a normal vector
self.vectors.append(rg.Vector3d(v_0) )
# getting the positive angle
angle = positive_vector_angle(v_0, v_1)
print(angle)
if angle > Vertex.MAX_A:
angle_count = math.ceil( angle / Vertex.MAX_A )
angle_delta = angle / angle_count
print("angle count : {}".format(angle_count) )
b_vec = rg.Vector3d(v_0)
b_vec.Unitize()
b_vec = b_vec * Vertex.DIS
for i in range(1, int(angle_count), 1):
r_matrix = rg.Transform.Rotation(i * angle_delta, self.location)
n_vec = rg.Vector3d(b_vec)
n_vec.Transform(r_matrix)
self.vectors.append(n_vec)
print("added a vector")
def interpolate_vectors_end(self, b_vec):
angle_count = math.ceil( math.pi * 2.0 / Vertex.MAX_A )
angle_delta = math.pi * 2.0 / angle_count
loc_v = rg.Vector3d(b_vec)
loc_v.Unitize()
loc_v = loc_v * Vertex.DIS
for i in range(1, int(angle_count), 1):
r_matrix = rg.Transform.Rotation(i * angle_delta, self.location)
n_vec = rg.Vector3d(loc_v)
n_vec.Transform(r_matrix)
self.vectors.append(n_vec)
def new_location(self):
for i in range(len(self.vectors) ):
self.n_locations.append(rg.Point3d(self.location + self.vectors[i] + self.vectors[(i + 1) % len(self.vectors) ]) )
def curve_representation(self):
return rg.Polyline(self.n_locations + [self.n_locations[0]] ).ToNurbsCurve()
def line_representation(self):
return [rg.Line(self.location, n_l) for n_l in self.n_locations]
```
#### File: clay_bricks/PatternBrickLibrary/patternGenv2_2_1.py
```python
import Rhino.Geometry as rg
import math
import random
from vertexClass import Vertex
from copy import deepcopy as dc
class PatternMap:
DOT_MAP_UV_SHIFT=(0.0,0.0)
DOT_MAP_SHIFT=False
DOT_MAP_HARD_EASING=False
DOT_MAP_RND=False
DOT_MAP_RND_SEED=0
Y_SPACING_FACTOR=1.0
def __init__(self, layer_set, set_tuples, periodic = False, other_set = None):
# set_tuples = (crv_l, div_l, lay_h)
self.pattern_set = layer_set
if not(other_set == None):
self.add_other_set = True
self.other_set = other_set
else:
self.add_other_set = False
self.length, _, self.lay_h = set_tuples
self.div_c = len(self.pattern_set[0])
self.lay_c = len(self.pattern_set)
self.height = self.lay_c * self.lay_h
self.periodic = periodic
self.closed = True
self.curved = False
self.rnd_inout = False
self.rnd_cull = False
self.rnd_radius = False
self.rnd_n_val = False
self.dir = 1.0
def build(self):
self.surface_set = []
for pattern_layer in self.pattern_set:
self.surface_set.extend(pattern_layer)
def set_random_inout(self, percentage = .5, direction = 1.0):
self.rnd_inout = True
self.rnd_inout_percentage = percentage
def random_inout(self):
if self.rnd_inout_percentage < random.random():
return -1.0
else:
return 1.0
def set_random_cull(self, cull_val):
self.rnd_cull = True
self.rnd_cull_val = cull_val
def set_random_radius(self, min_radius, max_radius):
self.rnd_radius = True
self.r_min, self.r_delta = min_radius, max_radius - min_radius
def random_rad(self):
return self.r_min + random.random() * self.r_delta
def set_random_n_val(self, min_n_val, max_n_val):
self.rnd_n_val = True
self.n_min, self.n_delta = min_n_val, max_n_val - min_n_val
def random_n_val(self):
return self.n_min + random.random() * self.n_delta
def edgeEasing(self, zero_length, normal_length):
ease_delta = normal_length - zero_length
for pt in self.surface_set:
if pt.x_val < zero_length or pt.x_val > self.length - zero_length:
pt.n_scale = 0.0
elif pt.x_val < normal_length:
pt.n_scale = abs(pt.x_val - zero_length) / ease_delta
elif pt.x_val > self.length - normal_length:
pt.n_scale = abs(pt.x_val - (self.length - zero_length)) / ease_delta
def sinWarp(self, period, amplitude, phase_shift, direction = True):
if self.periodic:
print("sin periodicizing")
print("updating the period")
print("old period: %s" %period)
period_count = math.ceil( self.length / ( 2 * math.pi * period ) )
period = self.length / (period_count * 2 * math.pi)
print("new period: %s" %period)
for pt in self.surface_set:
local_phase = pt.y_val / self.lay_h * phase_shift
scale_val = math.sin(pt.x_val / period + local_phase) * amplitude
pt.warp_pt(scale_val * self.dir)
def patternGeneration(self, pattern_set, spacing):
if self.periodic:
print("pattern periodicizing")
print("updating the spacing")
print("old spacing: %s" %spacing)
scaling_int_val = math.ceil(self.length / spacing)
spacing = self.length / scaling_int_val
print("new spacing: %s" %spacing)
else:
spacing = spacing
# pattern_map (start, step, count)
# only have to consider x distance
layer_set = []
layer_count = 0
layer_length_vals = []
for pattern in pattern_set:
start, step, count = pattern[0], pattern[1], pattern[2]
if count < 1:
count = 1
layer_vertexes = []
length_vals = []
x_val = start
x_delta = step * spacing
while x_val < self.length:
layer_vertexes.append(Vertex(x_val = x_val))
length_vals.append(x_val)
x_val += x_delta
for i in range(count):
layer_set.append(layer_vertexes)
layer_length_vals.append(length_vals)
layer_count += count
return layer_set, layer_length_vals, layer_count
def curveSplitAtPoints(self, radius, length_vals):
pass
def specialLayerMap(self, spacing, pattern_set, radius, max_val, direction = True):
_, layer_length_vals, layer_count = self.patternGeneration(pattern_set, spacing)
def modBasedDotMap(self, x_spacing = 20.0, y_spacing = 20.0, max_val = 10.0, ellipsoid = True, direction = True, layer_shift = 2.0, shift_a = 0.0, shift_b = 0.0, rot_alfa = 0.0):
if self.periodic:
x_scale_val = 1.0
y_scale_val = 1.0
else:
x_scale_val = 1.0
y_scale_val = 1.0
for pt in self.surface_set:
# distance = pt.distance_function(x_spacing, y_spacing, layer_shift, rot_alfa, x_scale_val, y_scale_val)
distance = pt.distance_function(x_spacing, y_spacing, layer_shift, rot_alfa)
# applying shift values if necessary
if not(shift_a == 0.0):
distance *= shift_a
if not(shift_b == 0.0):
distance += shift_b
# curtaling the distances
if distance < 0.0:
scale_val = 0.0
elif distance > 1.0:
scale_val = max_val
else:
if ellipsoid:
distance = (1 - (1 - distance) ** 2.0) ** .5
scale_val = max_val * distance
pt.warp_pt(scale_val * self.dir)
def layerMap(self, spacing, pattern_set, radius, max_val, direction = True):
# pattern_map (start, step, count)
# only have to consider x distance
layer_set, _, layer_count = self.patternGeneration(pattern_set, spacing)
# subdividing in layers
for pt_i, pt in enumerate(self.surface_set):
layer_index = (pt_i - pt_i % self.div_c) / self.div_c
pattern_layer_index = int(layer_index % layer_count)
dots = layer_set[pattern_layer_index]
dis_set = []
for dot in dots:
dis_set.append(pt.x_distance(dot))
distance = min(dis_set)
if distance < radius:
scale_val = (1 - (distance / radius) ** 2.0) ** .5 * max_val
else:
scale_val = 0.0
pt.warp_pt(scale_val * self.dir)
def dotGen(self, spacing, y_spacing = None, feature_spacing=0.0):
random.seed(PatternMap.DOT_MAP_RND_SEED)
count = 0
x_spacing = 2.0 ** .5 * spacing
if self.periodic:
print("dotGen periodicizing")
print("updating the x_spacing")
print("old x_spacing: %s" %x_spacing)
x_spacing_int = math.ceil(self.length / (x_spacing * 2.0))
x_spacing = (self.length / x_spacing_int) / 2.0
print("new x_spacing: %s" %x_spacing)
if y_spacing == None:
y_spacing = x_spacing * PatternMap.Y_SPACING_FACTOR
dots = []
# setting the start and end conditions of the dot_map
if PatternMap.DOT_MAP_SHIFT:
# print("shifting the pattern in the code as well")
x_shift=PatternMap.DOT_MAP_UV_SHIFT[0] % x_spacing
y_shift=PatternMap.DOT_MAP_UV_SHIFT[1] % y_spacing
x_start=x_shift if x_shift < .5 * x_spacing else x_shift-x_spacing
y_start=y_shift if y_shift < .5 * y_spacing else y_shift-y_spacing
else:
x_start, y_start = 0.0, 0.0
x_end, y_end=self.length + .5 * x_spacing, self.height + .5 * y_spacing
if PatternMap.DOT_MAP_HARD_EASING:
while (x_start < feature_spacing):
x_start += x_spacing
while (x_end > self.length - feature_spacing):
x_end -= x_spacing
x_val=x_start
y_val=y_start
while y_val < y_end:
if count % 2 == 1:
x_val = x_spacing * .5 + x_start
else:
x_val = x_start
while x_val < x_end + .1:
loc_vertex = Vertex(x_val = x_val, y_val = y_val)
dots.append(loc_vertex)
x_val += x_spacing
y_val += y_spacing
count += 1
if self.rnd_cull:
new_dots = []
for dot in dots:
if random.random() < self.rnd_cull_val:
new_dots.append(dot)
dots = new_dots
if self.rnd_inout:
self.in_out_multi = [self.random_inout() for i in range(len(dots))]
if self.rnd_radius:
self.radii = [self.random_rad() for i in range(len(dots))]
if self.rnd_n_val:
self.n_vals = [self.random_n_val() for i in range(len(dots))]
return dots
def dotMap(self, spacing, radius, max_val, direction = None):
dots = self.dotGen(spacing, feature_spacing=radius)
for pt in self.surface_set:
distance_set = []
for dot in dots:
distance = pt.numeric_distance(dot)
distance_set.append(distance)
distance = min(distance_set)
v_i = distance_set.index(distance)
if self.rnd_radius:
radius = self.radii[v_i]
if self.rnd_n_val:
max_val = radius * self.n_vals[v_i]
distance -= radius
if distance < 0:
scale_val = abs(distance) / radius * max_val
else:
scale_val = 0.0
if self.rnd_inout:
scale_val *= self.in_out_multi[v_i]
pt.warp_pt(scale_val * self.dir)
def ellipsoidBumpMap(self, spacing, radius, max_val, direction = None):
dots = self.dotGen(spacing, feature_spacing=radius)
for pt in self.surface_set:
distance_set = []
for dot in dots:
distance = pt.numeric_distance(dot)
distance_set.append(distance)
distance = min(distance_set)
v_i = distance_set.index(distance)
if self.rnd_radius:
radius = self.radii[v_i]
if self.rnd_n_val:
max_val = radius * self.n_vals[v_i]
if distance < radius:
scale_val = (1 - (distance / radius) ** 2.0) ** .5 * max_val
else:
scale_val = 0.0
if self.rnd_inout:
scale_val *= self.in_out_multi[v_i]
pt.warp_pt(scale_val * self.dir)
def getKey(self, item):
return item[0]
def cylinderMap(self, spacing, height, radius, max_val, radius_bot = None):
radius_bot=radius if radius_bot == None else radius_bot
radius_delta = radius - radius_bot
radius_f = 1.0 - radius_delta / radius
dots = self.dotGen(spacing, feature_spacing=max([radius, radius_bot]))
for pt in self.surface_set:
# get closest dot
distance_set = []
for dot in dots:
distance = pt.numeric_distance(dot)
distance_set.append((distance, dot))
distance = min(distance_set)
v_i = distance_set.index(distance)
dot = dots[v_i]
if self.rnd_radius:
radius = self.radii[v_i]
radius_bot = radius * radius_f
if self.rnd_n_val:
max_val = radius * self.n_vals[v_i]
# y_distance calculation
y_distance = pt.y_val - dot.y_val
# x_distance calculation
x_distance = abs(pt.x_val - dot.x_val)
if abs(y_distance) + .01 < height * .5 :
local_radius = radius_bot + radius_delta * (y_distance / height + .5)
else:
local_radius = -1
if x_distance < local_radius:
scale_val = (1 - (x_distance / local_radius) ** 2.0) ** .5 * max_val
else:
scale_val = 0.0
if self.rnd_inout:
scale_val *= self.in_out_multi[v_i]
pt.warp_pt(scale_val * self.dir)
def makeCurves(self):
self.pts_set = []
crv_set = []
for layer_i, layer_set in enumerate(self.pattern_set):
pt_set = [vertex.v for vertex in layer_set]
if self.add_other_set:
pt_set = pt_set + self.other_set[layer_i][1:-1] + [pt_set[0]]
if self.periodic:
# print("I am giving you a closed polyline")
pt_set = pt_set + [pt_set[0]]
self.pts_set.append(pt_set)
crv = rg.Polyline(pt_set)
crv_set.append(crv)
self.curved = True
return crv_set
def makeMesh(self, quad = False):
if not(self.curved):
self.makeCurves()
srf_mesh = rg.Mesh()
lay_c = len(self.pts_set)
pt_c = len(self.pts_set[0][:-1])
# adding the vertices
[srf_mesh.Vertices.Add(pt) for pt in self.pts_set[0][:-1]]
for i in range(1, lay_c, 1):
y = i - 1
for pt_i, pt in enumerate(self.pts_set[i][:-1]):
x = pt_i
srf_mesh.Vertices.Add(pt)
v_a = y * pt_c + (x - 1)%pt_c
v_b = (y + 1) * pt_c + (x - 1)%pt_c
v_c = (y + 1) * pt_c + x
v_d = y * pt_c + x
if quad:
srf_mesh.Faces.AddFace(v_a, v_b, v_c, v_d)
else:
srf_mesh.Faces.AddFace(v_a, v_b, v_d)
srf_mesh.Faces.AddFace(v_b, v_c, v_d)
# addding the faces
return srf_mesh
```
#### File: clay_bricks/PatternBrickLibrary/pinLinkingBis.py
```python
import Rhino.Geometry as rg
from copy import deepcopy as dc
from generalFunctions import *
from brickSlicer import Pin
from layerFunctions import LineSet
def lineInterpolate(pt_0, pt_2, count):
delta_pt = (pt_2 - pt_0) / float(count + 1.0)
return [pt_0 + delta_pt * i for i in range(1, count + 1, 1)]
def linkGen(pt_a, pt_b):
distance = pt_a.DistanceTo(pt_b)
end_pt = rg.Point3d((pt_b - pt_a) / distance + pt_a)
return rg.Line(pt_a, end_pt), end_pt
def copyTransformSet(geo_set, trans_matrix):
new_set = []
for geo in geo_set:
tmp_geo = dc(geo)
tmp_geo.Transform(trans_matrix)
new_set.append(tmp_geo)
return new_set
def startEnd(pt, pt_bis, count, alfa):
pt_set = []
link_lines = []
if count > 1:
alfa_delta = ( 6.2831853072 - 2 * alfa ) / (count - 1)
for i in range(count):
loc_angle = alfa + alfa_delta * i
rot_m = rg.Transform.Rotation(loc_angle, pt)
loc_pt = dc(pt_bis)
loc_pt.Transform(rot_m)
pt_set.append(loc_pt)
link_lines.append(rg.Line(pt, loc_pt))
elif count == 0:
pass
else:
# case will consider count == 1
loc_angle = 3.1415927
rot_m = rg.Transform.Rotation(loc_angle, pt)
loc_pt = dc(pt_bis)
loc_pt.Transform(rot_m)
pt_set.append(loc_pt)
link_lines.append(rg.Line(pt, loc_pt))
return link_lines, pt_set
def start(pt, pt_1, count, alfa = .5):
# _, pt_a = linkGen(pt, pt_0)
main_link, pt_b = linkGen(pt, pt_1)
# main_link = rg.Line(pt, pt_1)
link_lines, pt_set = startEnd(pt, pt_b, count, alfa)
pt_set = [pt_b] + pt_set
return main_link, link_lines, pt_set
def end(pt_0, pt, count, alfa = .5):
_, pt_a = linkGen(pt, pt_0)
# main_link, pt_b = linkGen(pt, pt_1)
link_lines, pt_set = startEnd(pt, pt_a, count, alfa)
pt_set = [pt_a] + pt_set
main_link = None
return main_link, link_lines, pt_set
def mid(pt_0, pt, pt_1, count, min_alfa = .25):
_, pt_a = linkGen(pt, pt_0)
main_link, pt_b = linkGen(pt, pt_1)
# main_link = rg.Line(pt, pt_1)
link_lines, pt_set = midDivide(pt_a, pt, pt_b, items=count, cutoff=min_alfa)
pt_set = [pt_a, pt_b] + pt_set
return main_link, link_lines, pt_set
def midDivide(pt_0, pt, pt_1, items = 1, cutoff = .25):
_, pt_0 = linkGen(pt, pt_0)
_, pt_1 = linkGen(pt, pt_1)
circle = rg.Circle(pt, 1.0).ToNurbsCurve()
crvs, _ = layer2ptIntersect(circle, [pt_0, pt_1])
crv_0 = crvs[0]
crv_1 = crvs[1]
crv_0_l = crv_0.GetLength()
crv_1_l = crv_1.GetLength()
if crv_0_l < crv_1_l:
double_split_crv = crv_1
double_crv_l = crv_1_l
single_split_crv = crv_0
else:
double_split_crv = crv_0
double_crv_l = crv_0_l
single_split_crv = crv_1
if items < 2:
double_split_crv.Domain = rg.Interval(0.0, 1.0)
pt_2 = double_split_crv.PointAt(.5)
line_set = [rg.Line(pt, pt_2)]
pt_set = [pt_2]
elif items == 2:
single_split_crv.Domain = rg.Interval(0,1)
pt_2 = single_split_crv.PointAt(.5)
double_split_crv.Domain = rg.Interval(0,1)
pt_3 = double_split_crv.PointAt(.5)
line_set = [rg.Line(pt, pt_2), rg.Line(pt, pt_3)]
pt_set = [pt_2, pt_3]
else:
if double_crv_l < 3.1415927 + cutoff:
line_set, pt_set = midDivide(pt_0, pt, pt_1, items = 2)
else:
double_split_crv.Domain = rg.Interval(0,double_crv_l)
single_split_crv.Domain = rg.Interval(0,1.0)
pt_2 = single_split_crv.PointAt(.5)
pt_3 = double_split_crv.PointAt(.5 * 3.1415927)
pt_4 = double_split_crv.PointAt(double_crv_l - .5 * 3.1415927)
line_set = [rg.Line(pt, pt_2), rg.Line(pt, pt_3), rg.Line(pt, pt_4)]
pt_set = [pt_2, pt_3, pt_4]
return line_set, pt_set
def longestSegment(pin_point, pin_pt_set, second_one = False):
c = rg.Circle(pin_point, 1.0).ToNurbsCurve()
crvs, pts = layer2ptIntersect(c, pin_pt_set)
crv_lengths = [crv.GetLength() for crv in crvs]
crv_lengths, crvs = zip(*sorted(zip(crv_lengths, crvs)))
print("this is the list of crv lengths")
print(crv_lengths)
longest_crv = crvs[-1]
second_longest_crv = crvs[-2]
longest_crv.Domain = rg.Interval(0, 1.0)
longest_mid_pt = longest_crv.PointAt(.5)
if second_one:
second_longest_crv.Domain = rg.Interval(0, 1.0)
second_longest_mid_pt = longest_crv.PointAt(.5)
split_lines = [
rg.Line(pin_point, second_longest_mid_pt),
rg.Line(pin_point, longest_mid_pt)
]
return split_lines
else:
split_line = rg.Line(pin_point, longest_mid_pt)
return split_line
def createLinks(pin_pts, connection_count = 2, max_main_line = 100.0, start_end_alfa = .5, mid_alfa = .25):
pin_pts = [rg.Point3d(pt.X, pt.Y, 0.0) for pt in pin_pts]
main_links, link_lines, pin_pt_sets = [], [], []
split_lines = []
pin_count = len(pin_pts)
for pt_i, pt in enumerate(pin_pts):
if pt_i == 0:
main_link, link_line_set, pt_set = start(pt, pin_pts[pt_i + 1], connection_count, start_end_alfa)
main_links.append(main_link)
link_lines.extend(link_line_set)
pin_pt_sets.append(pt_set)
elif pt_i == pin_count - 1:
_, link_line_set, pt_set = end(pin_pts[pt_i - 1], pt, connection_count, start_end_alfa)
# main_links.append(main_link)
link_lines.extend(link_line_set)
pin_pt_sets.append(pt_set)
else:
main_link, link_line_set, pt_set = mid(pin_pts[pt_i - 1], pt, pin_pts[pt_i + 1], connection_count, mid_alfa)
main_links.append(main_link)
link_lines.extend(link_line_set)
pin_pt_sets.append(pt_set)
# generating extra line division in case the main lines are too long
b_mid_pts = []
mid_links = []
for i in range(pin_count - 1):
pt_0 = pin_pts[i]
pt_1 = pin_pts[i + 1]
distance = pt_0.DistanceTo(pt_1)
if distance > max_main_line:
_, loc_link_set, _ = start(pt_0, pt_1, 2, alfa = 1.570796)
split_count = int(round(distance / max_main_line))
b_pts = lineInterpolate(pt_0, pt_1, split_count)
loc_mid_pts = []
if len(b_pts) > 1:
# generating mid points between the base points if necessary
for i in range(len(b_pts) - 1):
loc_mid_pts.append(rg.Point3d( (b_pts[i] + b_pts[i+1]) * .5) )
for b_pt in b_pts:
trans_m = rg.Transform.Translation(rg.Vector3d(b_pt - pt_0))
mid_links.extend(copyTransformSet(loc_link_set, trans_m))
b_mid_pts.append(loc_mid_pts)
# generating the split lines
for i, pin_pt_set in enumerate(pin_pt_sets):
split_lines.append(longestSegment(pin_pts[i], pin_pt_set))
return main_links, link_lines, split_lines, pin_pt_sets, mid_links, b_mid_pts
def pinMaker(pin_pts, pin_height, bot_rad, top_rad, resolution = 16):
return [Pin(pt, pin_height, bot_rad, top_rad, resolution) for pt in pin_pts]
def extendTrim(inner_crv, outer_crv, line_set):
line_set.trimLines(outer_crv, True)
line_set.trimLines(inner_crv, False)
def linkClosestPoints(link, pts):
extra_pt_set = []
lines = link.createLines()
for line in lines:
for pt in pts:
extra_pt_set.append( line.ClosestPoint(pt, False) )
return extra_pt_set
def joinShape(prev_crv, next_crv, line_set, extend_trim = True, crossing = False):
if extend_trim:
extendTrim(prev_crv, next_crv, line_set)
result_crvs_0, _ = layer2ptIntersect(prev_crv, line_set.Start)
main_crv = curveToPolyline(longestCurve(result_crvs_0))
result_crvs_1, _ = layer2ptIntersect(next_crv, line_set.End)
next_crv = curveToPolyline(longestCurve(result_crvs_1))
main_crv, next_crv = list(main_crv), list(next_crv)
if not(crossing):
pt_set = main_crv + next_crv
else:
crossing_distance = 4.0
start0, start1 = main_crv[0], main_crv[-1]
end0, end1 = next_crv[-1], next_crv[0]
distance0 = start0.DistanceTo(end0)
distance1 = start1.DistanceTo(end1)
exPt0_0 = crossing_distance
exPt0_1 = distance0 - crossing_distance
exPt1_0 = crossing_distance
exPt1_1 = distance1 - crossing_distance
# swapping & reversing set 1
pt0_0 = interpolatePts(start0, end0, exPt0_0)
pt0_1 = interpolatePts(start0, end0, exPt0_1)
pt1_1 = interpolatePts(start1, end1, exPt1_0)
pt1_0 = interpolatePts(start1, end1, exPt1_1)
pt_set = main_crv + [pt0_0, pt0_1] + next_crv + [pt1_0, pt1_1]
return pt_set + [pt_set[0]]
def linkSetGeneration(lines, spacing, amount = 2):
link_set = []
for line in lines:
loc_line_set = LineSet(line, spacing, amount)
loc_line_set.createLines()
link_set.append(loc_line_set)
return link_set
def makeMainShape(pins, main_link_set, height, bottom_shift = None):
main_crv = pins[0].createSlice(height).ToPolylineCurve()
if not(bottom_shift == None):
print("I am offsetting the bottom layer with %s" % bottom_shift)
main_crv = offsetCurveSet(main_crv, bottom_shift, "outside", count = 2)[1].ToNurbsCurve()
inner_crvs = [dc(main_crv)]
mv_m = rg.Transform.Translation(rg.Vector3d(0,0,height))
main_links = []
for i, main_link in enumerate(main_link_set):
con_line_set = dc(main_link)
con_line_set.Transform(mv_m)
next_crv = pins[i + 1].createSlice(height).ToPolylineCurve()
if not(bottom_shift == None):
print("I am offsetting the bottom layer with %s" % bottom_shift)
next_crv = offsetCurveSet(next_crv, bottom_shift, "outside", count = 2)[1].ToNurbsCurve()
inner_crvs.append(dc(next_crv))
current_pt_set = joinShape(main_crv, next_crv, con_line_set)
main_crv = rg.Polyline(current_pt_set).ToPolylineCurve()
main_links.append(con_line_set)
return main_crv, current_pt_set, main_links, inner_crvs
def addLink(polycrv, side_link, open_end = True, start_pts = False, safety_dis = True, start_dis = (20.0, 15.0)):
result_crvs, _ = layer2ptIntersect(polycrv, side_link.Start)
pt_set = list(curveToPolyline(longestCurve(result_crvs)).ToArray())
if pt_set[0].DistanceTo(side_link.Start[1]) < .001:
# start_pts = list(side_link.Start).reverse()
end_pts = list(side_link.End)
end_pts.reverse()
else:
# start_pts = list(side_link.Start)
end_pts = list(side_link.End)
if open_end:
pt_set = [end_pts[1]] + pt_set + [end_pts[0]]
elif start_pts:
if safety_dis:
start_dis = [start_dis[0], start_dis[1], start_dis[1], start_dis[0]]
vecs = [
rg.Vector3d(pt_set[-2] - pt_set[-1]),
rg.Vector3d(end_pts[0] - pt_set[-1]),
rg.Vector3d(end_pts[1] - pt_set[0]),
rg.Vector3d(pt_set[1] - pt_set[0])
]
[vec.Unitize() for vec in vecs]
vecs = [vec * start_dis[vec_i] for vec_i, vec in enumerate(vecs)]
b_pts = [pt_set[-1], pt_set[-1], pt_set[0], pt_set[0]]
ex_pts = [rg.Point3d(b_pt + vecs[i]) for i, b_pt in enumerate(b_pts)]
ex_pts = ex_pts[:2] + end_pts + ex_pts[2:]
print(len(ex_pts))
pt_set = pt_set[1:-1] + ex_pts # + [pt_set[1]]
else:
pt_set = pt_set[1:-1] + end_pts + [pt_set[1]]
else:
# pt_set = pt_set + side_link.joinLines(False) + [pt_set[0]]
pt_set = pt_set + end_pts + [pt_set[0]]
return pt_set
def makeSideLinks(main_crv, exterior_crv, link_set, height):
mv_m = rg.Transform.Translation(rg.Vector3d(0,0,height))
side_link_set = []
main_crv_pt_set = []
for side_link in link_set:
loc_side_link = dc(side_link)
loc_side_link.Transform(mv_m)
extendTrim(main_crv, exterior_crv, loc_side_link)
side_link_set.append(loc_side_link)
main_crv_pt_set = addLink(main_crv, loc_side_link, False)
main_crv = rg.Polyline(main_crv_pt_set).ToPolylineCurve()
return main_crv_pt_set, side_link_set
def makeInnerCurve(inner_crvs, main_links, side_links, end_link = None, loc_extra_pts = None, mid_links = None, diamond_settings = (False, False, (15.0, 10.0))):
inner_crv_count = len(inner_crvs)
main_crv = dc(inner_crvs[0])
# reading in the diamond settings
start_mid_pts, safety_dis, start_dis = diamond_settings
if inner_crv_count > 1:
print("I have to do more!")
for i in range(inner_crv_count - 1):
current_pt_set = joinShape(main_crv, inner_crvs[i + 1], main_links[i], False, True)
main_crv = rg.Polyline(current_pt_set).ToPolylineCurve()
if not(loc_extra_pts == None):
for pt in loc_extra_pts:
_, t_val = main_crv.ClosestPoint(pt)
main_crv.ChangeClosedCurveSeam(t_val)
for side_link in side_links:
print("I am adding a side_link!")
pt_set = addLink(main_crv, side_link, open_end = False)
main_crv = rg.Polyline(pt_set + [pt_set[0]]).ToPolylineCurve()
for mid_link in mid_links:
print("I am here?")
pt_set = addLink(main_crv, mid_link, open_end = False, start_pts = start_mid_pts, safety_dis = safety_dis, start_dis = start_dis)
main_crv = rg.Polyline(pt_set + [pt_set[0]]).ToPolylineCurve()
if not(end_link == None):
print("I am adding the end_link!")
pt_set = addLink(main_crv, end_link, True)
main_crv = rg.Polyline(pt_set).ToPolylineCurve()
return main_crv
# def createLinkChain(pins, height, main_links, link_lines, split_lines, outer_part, ):
```
#### File: PatternBrickLibrary/pure_python_library/pp_distance_functions.py
```python
import math
class NoneFunction():
def get_val(self, _=None):
return 1.0
class SimpleSine():
def __init__(self, period_x=1.0, period_y=None, period_z=None, amplitude = 0.0, warp=None):
self.p_x=period_x
self.a=amplitude
if period_y == None:
self.v_attribute="1D"
else:
self.p_y=period_y
self.v_attribute="2D"
if not(period_y is None) and not(period_z is None):
self.v_attribute="3D"
self.p_z=period_z
def get_val(self, vertex):
if self.v_attribute=="1D":
return math.sin(vertex.numeric1D / self.p_x) * self.a
elif self.v_attribute=="2D":
u, v = vertex.numeric2D.tuples
return math.sin(u / self.p_x + v / self.p_y) * self.a
elif self.v_attribute=="3D":
x, y, z = vertex.numeric3D
return math.sin(x / self.p_x + y / self.p_y + z / self.p_z) * self.a
```
#### File: 2.92/bl_ui/space_sequencer.py
```python
import sys
import typing
import bpy_types
import bl_ui.space_toolsystem_common
import bl_ui.properties_grease_pencil_common
import rna_prop_ui
class SEQUENCER_HT_header(bpy_types.Header, bpy_types._GenericUI):
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_HT_tool_header(bpy_types.Header, bpy_types._GenericUI):
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_tool_settings(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
bl_translation_context = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add_effect(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add_empty(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add_transitions(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_change(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_context_menu(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_editor_menus(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_marker(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_navigation(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_preview_zoom(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_proxy(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_range(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select_channel(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select_handle(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select_linked(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_effect(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_image_transform(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_input(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_lock_mute(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_movie(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_transform(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_view(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_view_cache(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_active_tool(
bl_ui.space_toolsystem_common.ToolActivePanelHelper, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_overlay(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
bl_ui_units_x = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_preview_overlay(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_sequencer_overlay(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SequencerButtonsPanel:
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def has_sequencer(self, context):
'''
'''
pass
def poll(self, context):
'''
'''
pass
class SequencerButtonsPanel_Output:
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def has_preview(self, context):
'''
'''
pass
def poll(self, context):
'''
'''
pass
class SEQUENCER_PT_adjust_color(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_comp(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_crop(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_sound(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_transform(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_video(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_cache_settings(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_custom_props(SequencerButtonsPanel,
rna_prop_ui.PropertyPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_order = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_effect(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_effect_text_layout(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_effect_text_style(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_mask(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_modifiers(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_proxy_settings(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_scene(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_sound(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_source(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_strip(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_strip_cache(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_strip_proxy(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_time(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header_preset(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_annotation(
SequencerButtonsPanel_Output,
bl_ui.properties_grease_pencil_common.AnnotationDataPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def draw_layers(self, context, layout, gpd):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_annotation_onion(
SequencerButtonsPanel_Output,
bl_ui.properties_grease_pencil_common.AnnotationOnionSkin,
bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_frame_overlay(SequencerButtonsPanel_Output, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_preview(SequencerButtonsPanel_Output, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_view(SequencerButtonsPanel_Output, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_view_safe_areas(SequencerButtonsPanel_Output,
bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_view_safe_areas_center_cut(
SequencerButtonsPanel_Output, bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
def act_strip(context):
'''
'''
pass
def draw_color_balance(layout, color_balance):
'''
'''
pass
def selected_sequences_len(context):
'''
'''
pass
```
#### File: bpy/ops/clip.py
```python
import sys
import typing
import bpy.types
def add_marker(location: typing.List[float] = (0.0, 0.0)):
''' Place new marker at specified location
:param location: Location, Location of marker on frame
:type location: typing.List[float]
'''
pass
def add_marker_at_click():
''' Place new marker at the desired (clicked) position
'''
pass
def add_marker_move(CLIP_OT_add_marker=None, TRANSFORM_OT_translate=None):
''' Add new marker and move it on movie
:param CLIP_OT_add_marker: Add Marker, Place new marker at specified location
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def add_marker_slide(CLIP_OT_add_marker=None, TRANSFORM_OT_translate=None):
''' Add new marker and slide it with mouse until mouse button release
:param CLIP_OT_add_marker: Add Marker, Place new marker at specified location
:param TRANSFORM_OT_translate: Move, Move selected items
'''
pass
def apply_solution_scale(distance: float = 0.0):
''' Apply scale on solution itself to make distance between selected tracks equals to desired
:param distance: Distance, Distance between selected tracks
:type distance: float
'''
pass
def bundles_to_mesh():
''' Create vertex cloud using coordinates of reconstructed tracks :file: startup/bl_operators/clip.py\:299 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$299> _
'''
pass
def camera_preset_add(name: str = "",
remove_name: bool = False,
remove_active: bool = False,
use_focal_length: bool = True):
''' Add or remove a Tracking Camera Intrinsics Preset
:param name: Name, Name of the preset, used to make the path name
:type name: str
:param remove_name: remove_name
:type remove_name: bool
:param remove_active: remove_active
:type remove_active: bool
:param use_focal_length: Include Focal Length, Include focal length into the preset
:type use_focal_length: bool
'''
pass
def change_frame(frame: int = 0):
''' Interactively change the current frame number
:param frame: Frame
:type frame: int
'''
pass
def clean_tracks(frames: int = 0,
error: float = 0.0,
action: typing.Union[int, str] = 'SELECT'):
''' Clean tracks with high error values or few frames
:param frames: Tracked Frames, Effect on tracks which are tracked less than specified amount of frames
:type frames: int
:param error: Reprojection Error, Effect on tracks which have got larger reprojection error
:type error: float
:param action: Action, Cleanup action to execute * SELECT Select, Select unclean tracks. * DELETE_TRACK Delete Track, Delete unclean tracks. * DELETE_SEGMENTS Delete Segments, Delete unclean segments of tracks.
:type action: typing.Union[int, str]
'''
pass
def clear_solution():
''' Clear all calculated data
'''
pass
def clear_track_path(action: typing.Union[int, str] = 'REMAINED',
clear_active: bool = False):
''' Clear tracks after/before current position or clear the whole track
:param action: Action, Clear action to execute * UPTO Clear Up To, Clear path up to current frame. * REMAINED Clear Remained, Clear path at remaining frames (after current). * ALL Clear All, Clear the whole path.
:type action: typing.Union[int, str]
:param clear_active: Clear Active, Clear active track only instead of all selected tracks
:type clear_active: bool
'''
pass
def constraint_to_fcurve():
''' Create F-Curves for object which will copy object's movement caused by this constraint :file: startup/bl_operators/clip.py\:543 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$543> _
'''
pass
def copy_tracks():
''' Copy selected tracks to clipboard
'''
pass
def create_plane_track():
''' Create new plane track out of selected point tracks
'''
pass
def cursor_set(location: typing.List[float] = (0.0, 0.0)):
''' Set 2D cursor location
:param location: Location, Cursor location in normalized clip coordinates
:type location: typing.List[float]
'''
pass
def delete_marker():
''' Delete marker for current frame from selected tracks
'''
pass
def delete_proxy():
''' Delete movie clip proxy files from the hard drive :file: startup/bl_operators/clip.py\:369 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$369> _
'''
pass
def delete_track():
''' Delete selected tracks
'''
pass
def detect_features(placement: typing.Union[int, str] = 'FRAME',
margin: int = 16,
threshold: float = 0.5,
min_distance: int = 120):
''' Automatically detect features and place markers to track
:param placement: Placement, Placement for detected features * FRAME Whole Frame, Place markers across the whole frame. * INSIDE_GPENCIL Inside Annotated Area, Place markers only inside areas outlined with the Annotation tool. * OUTSIDE_GPENCIL Outside Annotated Area, Place markers only outside areas outlined with the Annotation tool.
:type placement: typing.Union[int, str]
:param margin: Margin, Only features further than margin pixels from the image edges are considered
:type margin: int
:param threshold: Threshold, Threshold level to consider feature good enough for tracking
:type threshold: float
:param min_distance: Distance, Minimal distance accepted between two features
:type min_distance: int
'''
pass
def disable_markers(action: typing.Union[int, str] = 'DISABLE'):
''' Disable/enable selected markers
:param action: Action, Disable action to execute * DISABLE Disable, Disable selected markers. * ENABLE Enable, Enable selected markers. * TOGGLE Toggle, Toggle disabled flag for selected markers.
:type action: typing.Union[int, str]
'''
pass
def dopesheet_select_channel(location: typing.List[float] = (0.0, 0.0),
extend: bool = False):
''' Select movie tracking channel
:param location: Location, Mouse location to select channel
:type location: typing.List[float]
:param extend: Extend, Extend selection rather than clearing the existing selection
:type extend: bool
'''
pass
def dopesheet_view_all():
''' Reset viewable area to show full keyframe range
'''
pass
def filter_tracks(track_threshold: float = 5.0):
''' Filter tracks which has weirdly looking spikes in motion curves
:param track_threshold: Track Threshold, Filter Threshold to select problematic tracks
:type track_threshold: float
'''
pass
def frame_jump(position: typing.Union[int, str] = 'PATHSTART'):
''' Jump to special frame
:param position: Position, Position to jump to * PATHSTART Path Start, Jump to start of current path. * PATHEND Path End, Jump to end of current path. * FAILEDPREV Previous Failed, Jump to previous failed frame. * FAILNEXT Next Failed, Jump to next failed frame.
:type position: typing.Union[int, str]
'''
pass
def graph_center_current_frame():
''' Scroll view so current frame would be centered
'''
pass
def graph_delete_curve():
''' Delete track corresponding to the selected curve
'''
pass
def graph_delete_knot():
''' Delete curve knots
'''
pass
def graph_disable_markers(action: typing.Union[int, str] = 'DISABLE'):
''' Disable/enable selected markers
:param action: Action, Disable action to execute * DISABLE Disable, Disable selected markers. * ENABLE Enable, Enable selected markers. * TOGGLE Toggle, Toggle disabled flag for selected markers.
:type action: typing.Union[int, str]
'''
pass
def graph_select(location: typing.List[float] = (0.0, 0.0),
extend: bool = False):
''' Select graph curves
:param location: Location, Mouse location to select nearest entity
:type location: typing.List[float]
:param extend: Extend, Extend selection rather than clearing the existing selection
:type extend: bool
'''
pass
def graph_select_all_markers(action: typing.Union[int, str] = 'TOGGLE'):
''' Change selection of all markers of active track
:param action: Action, Selection action to execute * TOGGLE Toggle, Toggle selection for all elements. * SELECT Select, Select all elements. * DESELECT Deselect, Deselect all elements. * INVERT Invert, Invert selection of all elements.
:type action: typing.Union[int, str]
'''
pass
def graph_select_box(xmin: int = 0,
xmax: int = 0,
ymin: int = 0,
ymax: int = 0,
wait_for_input: bool = True,
deselect: bool = False,
extend: bool = True):
''' Select curve points using box selection
:param xmin: X Min
:type xmin: int
:param xmax: X Max
:type xmax: int
:param ymin: Y Min
:type ymin: int
:param ymax: Y Max
:type ymax: int
:param wait_for_input: Wait for Input
:type wait_for_input: bool
:param deselect: Deselect, Deselect rather than select items
:type deselect: bool
:param extend: Extend, Extend selection instead of deselecting everything first
:type extend: bool
'''
pass
def graph_view_all():
''' View all curves in editor
'''
pass
def hide_tracks(unselected: bool = False):
''' Hide selected tracks
:param unselected: Unselected, Hide unselected tracks
:type unselected: bool
'''
pass
def hide_tracks_clear():
''' Clear hide selected tracks
'''
pass
def join_tracks():
''' Join selected tracks
'''
pass
def keyframe_delete():
''' Delete a keyframe from selected tracks at current frame
'''
pass
def keyframe_insert():
''' Insert a keyframe to selected tracks at current frame
'''
pass
def lock_selection_toggle():
''' Toggle Lock Selection option of the current clip editor
'''
pass
def lock_tracks(action: typing.Union[int, str] = 'LOCK'):
''' Lock/unlock selected tracks
:param action: Action, Lock action to execute * LOCK Lock, Lock selected tracks. * UNLOCK Unlock, Unlock selected tracks. * TOGGLE Toggle, Toggle locked flag for selected tracks.
:type action: typing.Union[int, str]
'''
pass
def mode_set(mode: typing.Union[int, str] = 'TRACKING'):
''' Set the clip interaction mode
:param mode: Mode * TRACKING Tracking, Show tracking and solving tools. * MASK Mask, Show mask editing tools.
:type mode: typing.Union[int, str]
'''
pass
def open(directory: str = "",
files: typing.
Union[typing.Dict[str, 'bpy.types.OperatorFileListElement'], typing.
List['bpy.types.OperatorFileListElement'],
'bpy_prop_collection'] = None,
hide_props_region: bool = True,
filter_blender: bool = False,
filter_backup: bool = False,
filter_image: bool = True,
filter_movie: bool = True,
filter_python: bool = False,
filter_font: bool = False,
filter_sound: bool = False,
filter_text: bool = False,
filter_archive: bool = False,
filter_btx: bool = False,
filter_collada: bool = False,
filter_alembic: bool = False,
filter_usd: bool = False,
filter_volume: bool = False,
filter_folder: bool = True,
filter_blenlib: bool = False,
filemode: int = 9,
relative_path: bool = True,
show_multiview: bool = False,
use_multiview: bool = False,
display_type: typing.Union[int, str] = 'DEFAULT',
sort_method: typing.Union[int, str] = ''):
''' Load a sequence of frames or a movie file
:param directory: Directory, Directory of the file
:type directory: str
:param files: Files
:type files: typing.Union[typing.Dict[str, 'bpy.types.OperatorFileListElement'], typing.List['bpy.types.OperatorFileListElement'], 'bpy_prop_collection']
:param hide_props_region: Hide Operator Properties, Collapse the region displaying the operator settings
:type hide_props_region: bool
:param filter_blender: Filter .blend files
:type filter_blender: bool
:param filter_backup: Filter .blend files
:type filter_backup: bool
:param filter_image: Filter image files
:type filter_image: bool
:param filter_movie: Filter movie files
:type filter_movie: bool
:param filter_python: Filter python files
:type filter_python: bool
:param filter_font: Filter font files
:type filter_font: bool
:param filter_sound: Filter sound files
:type filter_sound: bool
:param filter_text: Filter text files
:type filter_text: bool
:param filter_archive: Filter archive files
:type filter_archive: bool
:param filter_btx: Filter btx files
:type filter_btx: bool
:param filter_collada: Filter COLLADA files
:type filter_collada: bool
:param filter_alembic: Filter Alembic files
:type filter_alembic: bool
:param filter_usd: Filter USD files
:type filter_usd: bool
:param filter_volume: Filter OpenVDB volume files
:type filter_volume: bool
:param filter_folder: Filter folders
:type filter_folder: bool
:param filter_blenlib: Filter Blender IDs
:type filter_blenlib: bool
:param filemode: File Browser Mode, The setting for the file browser mode to load a .blend file, a library or a special file
:type filemode: int
:param relative_path: Relative Path, Select the file relative to the blend file
:type relative_path: bool
:param show_multiview: Enable Multi-View
:type show_multiview: bool
:param use_multiview: Use Multi-View
:type use_multiview: bool
:param display_type: Display Type * DEFAULT Default, Automatically determine display type for files. * LIST_VERTICAL Short List, Display files as short list. * LIST_HORIZONTAL Long List, Display files as a detailed list. * THUMBNAIL Thumbnails, Display files as thumbnails.
:type display_type: typing.Union[int, str]
:param sort_method: File sorting mode
:type sort_method: typing.Union[int, str]
'''
pass
def paste_tracks():
''' Paste tracks from clipboard
'''
pass
def prefetch():
''' Prefetch frames from disk for faster playback/tracking
'''
pass
def rebuild_proxy():
''' Rebuild all selected proxies and timecode indices in the background
'''
pass
def refine_markers(backwards: bool = False):
''' Refine selected markers positions by running the tracker from track's reference to current frame
:param backwards: Backwards, Do backwards tracking
:type backwards: bool
'''
pass
def reload():
''' Reload clip
'''
pass
def select(extend: bool = False,
deselect_all: bool = False,
location: typing.List[float] = (0.0, 0.0)):
''' Select tracking markers
:param extend: Extend, Extend selection rather than clearing the existing selection
:type extend: bool
:param deselect_all: Deselect On Nothing, Deselect all when nothing under the cursor
:type deselect_all: bool
:param location: Location, Mouse location in normalized coordinates, 0.0 to 1.0 is within the image bounds
:type location: typing.List[float]
'''
pass
def select_all(action: typing.Union[int, str] = 'TOGGLE'):
''' Change selection of all tracking markers
:param action: Action, Selection action to execute * TOGGLE Toggle, Toggle selection for all elements. * SELECT Select, Select all elements. * DESELECT Deselect, Deselect all elements. * INVERT Invert, Invert selection of all elements.
:type action: typing.Union[int, str]
'''
pass
def select_box(xmin: int = 0,
xmax: int = 0,
ymin: int = 0,
ymax: int = 0,
wait_for_input: bool = True,
mode: typing.Union[int, str] = 'SET'):
''' Select markers using box selection
:param xmin: X Min
:type xmin: int
:param xmax: X Max
:type xmax: int
:param ymin: Y Min
:type ymin: int
:param ymax: Y Max
:type ymax: int
:param wait_for_input: Wait for Input
:type wait_for_input: bool
:param mode: Mode * SET Set, Set a new selection. * ADD Extend, Extend existing selection. * SUB Subtract, Subtract existing selection.
:type mode: typing.Union[int, str]
'''
pass
def select_circle(x: int = 0,
y: int = 0,
radius: int = 25,
wait_for_input: bool = True,
mode: typing.Union[int, str] = 'SET'):
''' Select markers using circle selection
:param x: X
:type x: int
:param y: Y
:type y: int
:param radius: Radius
:type radius: int
:param wait_for_input: Wait for Input
:type wait_for_input: bool
:param mode: Mode * SET Set, Set a new selection. * ADD Extend, Extend existing selection. * SUB Subtract, Subtract existing selection.
:type mode: typing.Union[int, str]
'''
pass
def select_grouped(group: typing.Union[int, str] = 'ESTIMATED'):
''' Select all tracks from specified group
:param group: Action, Clear action to execute * KEYFRAMED Keyframed Tracks, Select all keyframed tracks. * ESTIMATED Estimated Tracks, Select all estimated tracks. * TRACKED Tracked Tracks, Select all tracked tracks. * LOCKED Locked Tracks, Select all locked tracks. * DISABLED Disabled Tracks, Select all disabled tracks. * COLOR Tracks with Same Color, Select all tracks with same color as active track. * FAILED Failed Tracks, Select all tracks which failed to be reconstructed.
:type group: typing.Union[int, str]
'''
pass
def select_lasso(path: typing.Union[
typing.Dict[str, 'bpy.types.OperatorMousePath'], typing.
List['bpy.types.OperatorMousePath'], 'bpy_prop_collection'] = None,
mode: typing.Union[int, str] = 'SET'):
''' Select markers using lasso selection
:param path: Path
:type path: typing.Union[typing.Dict[str, 'bpy.types.OperatorMousePath'], typing.List['bpy.types.OperatorMousePath'], 'bpy_prop_collection']
:param mode: Mode * SET Set, Set a new selection. * ADD Extend, Extend existing selection. * SUB Subtract, Subtract existing selection.
:type mode: typing.Union[int, str]
'''
pass
def set_active_clip():
''' Undocumented, consider contributing <https://developer.blender.org/T51061> __. :file: startup/bl_operators/clip.py\:228 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$228> _
'''
pass
def set_axis(axis: typing.Union[int, str] = 'X'):
''' Set direction of scene axis rotating camera (or its parent if present) and assume selected track lies on real axis, joining it with the origin
:param axis: Axis, Axis to use to align bundle along * X X, Align bundle align X axis. * Y Y, Align bundle align Y axis.
:type axis: typing.Union[int, str]
'''
pass
def set_center_principal():
''' Set optical center to center of footage
'''
pass
def set_origin(use_median: bool = False):
''' Set active marker as origin by moving camera (or its parent if present) in 3D space
:param use_median: Use Median, Set origin to median point of selected bundles
:type use_median: bool
'''
pass
def set_plane(plane: typing.Union[int, str] = 'FLOOR'):
''' Set plane based on 3 selected bundles by moving camera (or its parent if present) in 3D space
:param plane: Plane, Plane to be used for orientation * FLOOR Floor, Set floor plane. * WALL Wall, Set wall plane.
:type plane: typing.Union[int, str]
'''
pass
def set_scale(distance: float = 0.0):
''' Set scale of scene by scaling camera (or its parent if present)
:param distance: Distance, Distance between selected tracks
:type distance: float
'''
pass
def set_scene_frames():
''' Set scene's start and end frame to match clip's start frame and length
'''
pass
def set_solution_scale(distance: float = 0.0):
''' Set object solution scale using distance between two selected tracks
:param distance: Distance, Distance between selected tracks
:type distance: float
'''
pass
def set_solver_keyframe(keyframe: typing.Union[int, str] = 'KEYFRAME_A'):
''' Set keyframe used by solver
:param keyframe: Keyframe, Keyframe to set
:type keyframe: typing.Union[int, str]
'''
pass
def set_viewport_background():
''' Set current movie clip as a camera background in 3D Viewport (works only when a 3D Viewport is visible) :file: startup/bl_operators/clip.py\:433 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$433> _
'''
pass
def setup_tracking_scene():
''' Prepare scene for compositing 3D objects into this footage :file: startup/bl_operators/clip.py\:997 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$997> _
'''
pass
def slide_marker(offset: typing.List[float] = (0.0, 0.0)):
''' Slide marker areas
:param offset: Offset, Offset in floating-point units, 1.0 is the width and height of the image
:type offset: typing.List[float]
'''
pass
def slide_plane_marker():
''' Slide plane marker areas
'''
pass
def solve_camera():
''' Solve camera motion from tracks
'''
pass
def stabilize_2d_add():
''' Add selected tracks to 2D translation stabilization
'''
pass
def stabilize_2d_remove():
''' Remove selected track from translation stabilization
'''
pass
def stabilize_2d_rotation_add():
''' Add selected tracks to 2D rotation stabilization
'''
pass
def stabilize_2d_rotation_remove():
''' Remove selected track from rotation stabilization
'''
pass
def stabilize_2d_rotation_select():
''' Select tracks which are used for rotation stabilization
'''
pass
def stabilize_2d_select():
''' Select tracks which are used for translation stabilization
'''
pass
def track_color_preset_add(name: str = "",
remove_name: bool = False,
remove_active: bool = False):
''' Add or remove a Clip Track Color Preset
:param name: Name, Name of the preset, used to make the path name
:type name: str
:param remove_name: remove_name
:type remove_name: bool
:param remove_active: remove_active
:type remove_active: bool
'''
pass
def track_copy_color():
''' Copy color to all selected tracks
'''
pass
def track_markers(backwards: bool = False, sequence: bool = False):
''' Track selected markers
:param backwards: Backwards, Do backwards tracking
:type backwards: bool
:param sequence: Track Sequence, Track marker during image sequence rather than single image
:type sequence: bool
'''
pass
def track_settings_as_default():
''' Copy tracking settings from active track to default settings :file: startup/bl_operators/clip.py\:1028 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$1028> _
'''
pass
def track_settings_to_track():
''' Copy tracking settings from active track to selected tracks :file: startup/bl_operators/clip.py\:1076 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$1076> _
'''
pass
def track_to_empty():
''' Create an Empty object which will be copying movement of active track :file: startup/bl_operators/clip.py\:275 <https://developer.blender.org/diffusion/B/browse/master/release/scripts/startup/bl_operators/clip.py$275> _
'''
pass
def tracking_object_new():
''' Add new object for tracking
'''
pass
def tracking_object_remove():
''' Remove object for tracking
'''
pass
def tracking_settings_preset_add(name: str = "",
remove_name: bool = False,
remove_active: bool = False):
''' Add or remove a motion tracking settings preset
:param name: Name, Name of the preset, used to make the path name
:type name: str
:param remove_name: remove_name
:type remove_name: bool
:param remove_active: remove_active
:type remove_active: bool
'''
pass
def view_all(fit_view: bool = False):
''' View whole image with markers
:param fit_view: Fit View, Fit frame to the viewport
:type fit_view: bool
'''
pass
def view_center_cursor():
''' Center the view so that the cursor is in the middle of the view
'''
pass
def view_ndof():
''' Use a 3D mouse device to pan/zoom the view
'''
pass
def view_pan(offset: typing.List[float] = (0.0, 0.0)):
''' Pan the view
:param offset: Offset, Offset in floating-point units, 1.0 is the width and height of the image
:type offset: typing.List[float]
'''
pass
def view_selected():
''' View all selected elements
'''
pass
def view_zoom(factor: float = 0.0, use_cursor_init: bool = True):
''' Zoom in/out the view
:param factor: Factor, Zoom factor, values higher than 1.0 zoom in, lower values zoom out
:type factor: float
:param use_cursor_init: Use Mouse Position, Allow the initial mouse position to be used
:type use_cursor_init: bool
'''
pass
def view_zoom_in(location: typing.List[float] = (0.0, 0.0)):
''' Zoom in the view
:param location: Location, Cursor location in screen coordinates
:type location: typing.List[float]
'''
pass
def view_zoom_out(location: typing.List[float] = (0.0, 0.0)):
''' Zoom out the view
:param location: Location, Cursor location in normalized (0.0 to 1.0) coordinates
:type location: typing.List[float]
'''
pass
def view_zoom_ratio(ratio: float = 0.0):
''' Set the zoom ratio (based on clip size)
:param ratio: Ratio, Zoom ratio, 1.0 is 1:1, higher is zoomed in, lower is zoomed out
:type ratio: float
'''
pass
```
#### File: 2.92/freestyle/types.py
```python
import sys
import typing
import bpy.types
import mathutils
class AdjacencyIterator:
''' Class hierarchy: Iterator > AdjacencyIterator Class for representing adjacency iterators used in the chaining process. An AdjacencyIterator is created in the increment() and decrement() methods of a ChainingIterator and passed to the traverse() method of the ChainingIterator.
'''
is_incoming: bool = None
''' True if the current ViewEdge is coming towards the iteration vertex, and False otherwise.
:type: bool
'''
object: 'ViewEdge' = None
''' The ViewEdge object currently pointed to by this iterator.
:type: 'ViewEdge'
'''
def __init__(self):
''' __init__(brother) __init__(vertex, restrict_to_selection=True, restrict_to_unvisited=True) Builds an AdjacencyIterator using the default constructor, copy constructor or the overloaded constructor.
:param brother: An AdjacencyIterator object.
:type brother: 'AdjacencyIterator'
:param vertex: The vertex which is the next crossing.
:type vertex: 'ViewVertex'
:param restrict_to_selection: Indicates whether to force the chaining to stay within the set of selected ViewEdges or not.
:type restrict_to_selection: bool
:param restrict_to_unvisited: Indicates whether a ViewEdge that has already been chained must be ignored ot not.
:type restrict_to_unvisited: bool
'''
pass
class BBox:
''' Class for representing a bounding box.
'''
def __init__(self):
''' Default constructor.
'''
pass
class BinaryPredicate0D:
''' Base class for binary predicates working on Interface0D objects. A BinaryPredicate0D is typically an ordering relation between two Interface0D objects. The predicate evaluates a relation between the two Interface0D instances and returns a boolean value (true or false). It is used by invoking the __call__() method.
'''
name: str = None
''' The name of the binary 0D predicate.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def __call__(self, inter1: 'Interface0D', inter2: 'Interface0D') -> bool:
''' Must be overload by inherited classes. It evaluates a relation between two Interface0D objects.
:param inter1: The first Interface0D object.
:type inter1: 'Interface0D'
:param inter2: The second Interface0D object.
:type inter2: 'Interface0D'
:rtype: bool
:return: True or false.
'''
pass
class BinaryPredicate1D:
''' Base class for binary predicates working on Interface1D objects. A BinaryPredicate1D is typically an ordering relation between two Interface1D objects. The predicate evaluates a relation between the two Interface1D instances and returns a boolean value (true or false). It is used by invoking the __call__() method.
'''
name: str = None
''' The name of the binary 1D predicate.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def __call__(self, inter1: 'Interface1D', inter2: 'Interface1D') -> bool:
''' Must be overload by inherited classes. It evaluates a relation between two Interface1D objects.
:param inter1: The first Interface1D object.
:type inter1: 'Interface1D'
:param inter2: The second Interface1D object.
:type inter2: 'Interface1D'
:rtype: bool
:return: True or false.
'''
pass
class Chain:
''' Class hierarchy: Interface1D > Curve > Chain Class to represent a 1D elements issued from the chaining process. A Chain is the last step before the Stroke and is used in the Splitting and Creation processes.
'''
def __init__(self):
''' __init__(brother) __init__(id) Builds a Chain using the default constructor, copy constructor or from an Id .
:param brother: A Chain object.
:type brother: 'Chain'
:param id: An Id object.
:type id: 'Id'
'''
pass
def push_viewedge_back(self, viewedge: 'ViewEdge', orientation: bool):
''' Adds a ViewEdge at the end of the Chain.
:param viewedge: The ViewEdge that must be added.
:type viewedge: 'ViewEdge'
:param orientation: The orientation with which the ViewEdge must be processed.
:type orientation: bool
'''
pass
def push_viewedge_front(self, viewedge: 'ViewEdge', orientation: bool):
''' Adds a ViewEdge at the beginning of the Chain.
:param viewedge: The ViewEdge that must be added.
:type viewedge: 'ViewEdge'
:param orientation: The orientation with which the ViewEdge must be processed.
:type orientation: bool
'''
pass
class ChainingIterator:
''' Class hierarchy: Iterator > ViewEdgeIterator > ChainingIterator Base class for chaining iterators. This class is designed to be overloaded in order to describe chaining rules. It makes the description of chaining rules easier. The two main methods that need to overloaded are traverse() and init(). traverse() tells which ViewEdge to follow, among the adjacent ones. If you specify restriction rules (such as "Chain only ViewEdges of the selection"), they will be included in the adjacency iterator (i.e, the adjacent iterator will only stop on "valid" edges).
'''
is_incrementing: bool = None
''' True if the current iteration is an incrementation.
:type: bool
'''
next_vertex: 'ViewVertex' = None
''' The ViewVertex that is the next crossing.
:type: 'ViewVertex'
'''
object: 'ViewEdge' = None
''' The ViewEdge object currently pointed by this iterator.
:type: 'ViewEdge'
'''
def __init__(self,
restrict_to_selection: bool = True,
restrict_to_unvisited: bool = True,
begin: 'ViewEdge' = None,
orientation: bool = True):
''' __init__(brother) Builds a Chaining Iterator from the first ViewEdge used for iteration and its orientation or by using the copy constructor.
:param restrict_to_selection: Indicates whether to force the chaining to stay within the set of selected ViewEdges or not.
:type restrict_to_selection: bool
:param restrict_to_unvisited: Indicates whether a ViewEdge that has already been chained must be ignored ot not.
:type restrict_to_unvisited: bool
:param begin: The ViewEdge from which to start the chain.
:type begin: 'ViewEdge'
:param orientation: The direction to follow to explore the graph. If true, the direction indicated by the first ViewEdge is used.
:type orientation: bool
:param brother:
:type brother: 'ChainingIterator'
'''
pass
def init(self):
''' Initializes the iterator context. This method is called each time a new chain is started. It can be used to reset some history information that you might want to keep.
'''
pass
def traverse(self, it: 'AdjacencyIterator') -> 'ViewEdge':
''' This method iterates over the potential next ViewEdges and returns the one that will be followed next. Returns the next ViewEdge to follow or None when the end of the chain is reached.
:param it: The iterator over the ViewEdges adjacent to the end vertex of the current ViewEdge. The adjacency iterator reflects the restriction rules by only iterating over the valid ViewEdges.
:type it: 'AdjacencyIterator'
:rtype: 'ViewEdge'
:return: Returns the next ViewEdge to follow, or None if chaining ends.
'''
pass
class Curve:
''' Class hierarchy: Interface1D > Curve Base class for curves made of CurvePoints. SVertex is the type of the initial curve vertices. A Chain is a specialization of a Curve.
'''
is_empty: bool = None
''' True if the Curve doesn't have any Vertex yet.
:type: bool
'''
segments_size: int = None
''' The number of segments in the polyline constituting the Curve.
:type: int
'''
def __init__(self):
''' __init__(brother) __init__(id) Builds a FrsCurve using a default constructor, copy constructor or from an Id .
:param brother: A Curve object.
:type brother: 'bpy.types.Curve'
:param id: An Id object.
:type id: 'Id'
'''
pass
def push_vertex_back(self, vertex: typing.Union['CurvePoint', 'SVertex']):
''' Adds a single vertex at the end of the Curve.
:param vertex: A vertex object.
:type vertex: typing.Union['CurvePoint', 'SVertex']
'''
pass
def push_vertex_front(self, vertex: typing.Union['CurvePoint', 'SVertex']):
''' Adds a single vertex at the front of the Curve.
:param vertex: A vertex object.
:type vertex: typing.Union['CurvePoint', 'SVertex']
'''
pass
class CurvePoint:
''' Class hierarchy: Interface0D > CurvePoint Class to represent a point of a curve. A CurvePoint can be any point of a 1D curve (it doesn't have to be a vertex of the curve). Any Interface1D is built upon ViewEdges, themselves built upon FEdges. Therefore, a curve is basically a polyline made of a list of SVertex objects. Thus, a CurvePoint is built by linearly interpolating two SVertex instances. CurvePoint can be used as virtual points while querying 0D information along a curve at a given resolution.
'''
fedge: 'FEdge' = None
''' Gets the FEdge for the two SVertices that given CurvePoints consists out of. A shortcut for CurvePoint.first_svertex.get_fedge(CurvePoint.second_svertex).
:type: 'FEdge'
'''
first_svertex: 'SVertex' = None
''' The first SVertex upon which the CurvePoint is built.
:type: 'SVertex'
'''
second_svertex: 'SVertex' = None
''' The second SVertex upon which the CurvePoint is built.
:type: 'SVertex'
'''
t2d: float = None
''' The 2D interpolation parameter.
:type: float
'''
def __init__(self):
''' __init__(brother) __init__(first_vertex, second_vertex, t2d) __init__(first_point, second_point, t2d) Builds a CurvePoint using the default constructor, copy constructor, or one of the overloaded constructors. The over loaded constructors can either take two SVertex or two CurvePoint objects and an interpolation parameter
:param brother: A CurvePoint object.
:type brother: 'CurvePoint'
:param first_vertex: The first SVertex.
:type first_vertex: 'SVertex'
:param second_vertex: The second SVertex.
:type second_vertex: 'SVertex'
:param first_point: The first CurvePoint.
:type first_point: 'CurvePoint'
:param second_point: The second CurvePoint.
:type second_point: 'CurvePoint'
:param t2d: A 2D interpolation parameter used to linearly interpolate first_vertex and second_vertex or first_point and second_point.
:type t2d: float
'''
pass
class CurvePointIterator:
''' Class hierarchy: Iterator > CurvePointIterator Class representing an iterator on a curve. Allows an iterating outside initial vertices. A CurvePoint is instantiated and returned through the .object attribute.
'''
object: 'CurvePoint' = None
''' The CurvePoint object currently pointed by this iterator.
:type: 'CurvePoint'
'''
t: float = None
''' The curvilinear abscissa of the current point.
:type: float
'''
u: float = None
''' The point parameter at the current point in the stroke (0 <= u <= 1).
:type: float
'''
def __init__(self):
''' __init__(brother) __init__(step=0.0) Builds a CurvePointIterator object using either the default constructor, copy constructor, or the overloaded constructor.
:param brother: A CurvePointIterator object.
:type brother: 'CurvePointIterator'
:param step: A resampling resolution with which the curve is resampled. If zero, no resampling is done (i.e., the iterator iterates over initial vertices).
:type step: float
'''
pass
class FEdge:
''' Class hierarchy: Interface1D > FEdge Base Class for feature edges. This FEdge can represent a silhouette, a crease, a ridge/valley, a border or a suggestive contour. For silhouettes, the FEdge is oriented so that the visible face lies on the left of the edge. For borders, the FEdge is oriented so that the face lies on the left of the edge. An FEdge can represent an initial edge of the mesh or runs across a face of the initial mesh depending on the smoothness or sharpness of the mesh. This class is specialized into a smooth and a sharp version since their properties slightly vary from one to the other.
'''
first_svertex: 'SVertex' = None
''' The first SVertex constituting this FEdge.
:type: 'SVertex'
'''
id: 'Id' = None
''' The Id of this FEdge.
:type: 'Id'
'''
is_smooth: bool = None
''' True if this FEdge is a smooth FEdge.
:type: bool
'''
nature: 'Nature' = None
''' The nature of this FEdge.
:type: 'Nature'
'''
next_fedge: 'FEdge' = None
''' The FEdge following this one in the ViewEdge. The value is None if this FEdge is the last of the ViewEdge.
:type: 'FEdge'
'''
previous_fedge: 'FEdge' = None
''' The FEdge preceding this one in the ViewEdge. The value is None if this FEdge is the first one of the ViewEdge.
:type: 'FEdge'
'''
second_svertex: 'SVertex' = None
''' The second SVertex constituting this FEdge.
:type: 'SVertex'
'''
viewedge: 'ViewEdge' = None
''' The ViewEdge to which this FEdge belongs to.
:type: 'ViewEdge'
'''
def FEdge(self):
''' FEdge(brother) Builds an FEdge using the default constructor, copy constructor, or between two SVertex objects.
:param brother: An FEdge object.
:type brother: 'FEdge'
:param first_vertex: The first SVertex.
:type first_vertex: 'SVertex'
:param second_vertex: The second SVertex.
:type second_vertex: 'SVertex'
'''
pass
class FEdgeSharp:
''' Class hierarchy: Interface1D > FEdge > FEdgeSharp Class defining a sharp FEdge. A Sharp FEdge corresponds to an initial edge of the input mesh. It can be a silhouette, a crease or a border. If it is a crease edge, then it is bordered by two faces of the mesh. Face a lies on its right whereas Face b lies on its left. If it is a border edge, then it doesn't have any face on its right, and thus Face a is None.
'''
face_mark_left: bool = None
''' The face mark of the face lying on the left of the FEdge.
:type: bool
'''
face_mark_right: bool = None
''' The face mark of the face lying on the right of the FEdge. If this FEdge is a border, it has no face on the right and thus this property is set to false.
:type: bool
'''
material_index_left: int = None
''' The index of the material of the face lying on the left of the FEdge.
:type: int
'''
material_index_right: int = None
''' The index of the material of the face lying on the right of the FEdge. If this FEdge is a border, it has no Face on its right and therefore no material.
:type: int
'''
material_left: 'bpy.types.Material' = None
''' The material of the face lying on the left of the FEdge.
:type: 'bpy.types.Material'
'''
material_right: 'bpy.types.Material' = None
''' The material of the face lying on the right of the FEdge. If this FEdge is a border, it has no Face on its right and therefore no material.
:type: 'bpy.types.Material'
'''
normal_left: 'mathutils.Vector' = None
''' The normal to the face lying on the left of the FEdge.
:type: 'mathutils.Vector'
'''
normal_right: 'mathutils.Vector' = None
''' The normal to the face lying on the right of the FEdge. If this FEdge is a border, it has no Face on its right and therefore no normal.
:type: 'mathutils.Vector'
'''
def __init__(self):
''' __init__(brother) __init__(first_vertex, second_vertex) Builds an FEdgeSharp using the default constructor, copy constructor, or between two SVertex objects.
:param brother: An FEdgeSharp object.
:type brother: 'FEdgeSharp'
:param first_vertex: The first SVertex object.
:type first_vertex: 'SVertex'
:param second_vertex: The second SVertex object.
:type second_vertex: 'SVertex'
'''
pass
class FEdgeSmooth:
''' Class hierarchy: Interface1D > FEdge > FEdgeSmooth Class defining a smooth edge. This kind of edge typically runs across a face of the input mesh. It can be a silhouette, a ridge or valley, a suggestive contour.
'''
face_mark: bool = None
''' The face mark of the face that this FEdge is running across.
:type: bool
'''
material: 'bpy.types.Material' = None
''' The material of the face that this FEdge is running across.
:type: 'bpy.types.Material'
'''
material_index: int = None
''' The index of the material of the face that this FEdge is running across.
:type: int
'''
normal: 'mathutils.Vector' = None
''' The normal of the face that this FEdge is running across.
:type: 'mathutils.Vector'
'''
def __init__(self):
''' __init__(brother) __init__(first_vertex, second_vertex) Builds an FEdgeSmooth using the default constructor, copy constructor, or between two SVertex .
:param brother: An FEdgeSmooth object.
:type brother: 'FEdgeSmooth'
:param first_vertex: The first SVertex object.
:type first_vertex: 'SVertex'
:param second_vertex: The second SVertex object.
:type second_vertex: 'SVertex'
'''
pass
class Id:
''' Class for representing an object Id.
'''
first: int = None
''' The first number constituting the Id.
:type: int
'''
second: int = None
''' The second number constituting the Id.
:type: int
'''
def __init__(self, brother: 'Id'):
''' __init__(first=0, second=0) Build the Id from two numbers or another Id using the copy constructor.
:param brother: An Id object.
:type brother: 'Id'
:param first:
:type first: int
:param second: The second number.
:type second: int
'''
pass
class IntegrationType:
''' Class hierarchy: int > IntegrationType Different integration methods that can be invoked to integrate into a single value the set of values obtained from each 0D element of an 1D element: * IntegrationType.MEAN: The value computed for the 1D element is the mean of the values obtained for the 0D elements. * IntegrationType.MIN: The value computed for the 1D element is the minimum of the values obtained for the 0D elements. * IntegrationType.MAX: The value computed for the 1D element is the maximum of the values obtained for the 0D elements. * IntegrationType.FIRST: The value computed for the 1D element is the first of the values obtained for the 0D elements. * IntegrationType.LAST: The value computed for the 1D element is the last of the values obtained for the 0D elements.
'''
pass
class Interface0D:
''' Base class for any 0D element.
'''
id: 'Id' = None
''' The Id of this 0D element.
:type: 'Id'
'''
name: str = None
''' The string of the name of this 0D element.
:type: str
'''
nature: 'Nature' = None
''' The nature of this 0D element.
:type: 'Nature'
'''
point_2d: 'mathutils.Vector' = None
''' The 2D point of this 0D element.
:type: 'mathutils.Vector'
'''
point_3d: 'mathutils.Vector' = None
''' The 3D point of this 0D element.
:type: 'mathutils.Vector'
'''
projected_x: float = None
''' The X coordinate of the projected 3D point of this 0D element.
:type: float
'''
projected_y: float = None
''' The Y coordinate of the projected 3D point of this 0D element.
:type: float
'''
projected_z: float = None
''' The Z coordinate of the projected 3D point of this 0D element.
:type: float
'''
def __init__(self):
''' Default constructor.
'''
pass
def get_fedge(self, inter: 'Interface0D') -> 'FEdge':
''' Returns the FEdge that lies between this 0D element and the 0D element given as the argument.
:param inter: A 0D element.
:type inter: 'Interface0D'
:rtype: 'FEdge'
:return: The FEdge lying between the two 0D elements.
'''
pass
class Interface0DIterator:
''' Class hierarchy: Iterator > Interface0DIterator Class defining an iterator over Interface0D elements. An instance of this iterator is always obtained from a 1D element.
'''
at_last: bool = None
''' True if the iterator points to the last valid element. For its counterpart (pointing to the first valid element), use it.is_begin.
:type: bool
'''
object: 'Interface0D' = None
''' The 0D object currently pointed to by this iterator. Note that the object may be an instance of an Interface0D subclass. For example if the iterator has been created from the vertices_begin() method of the Stroke class, the .object property refers to a StrokeVertex object.
:type: 'Interface0D'
'''
t: float = None
''' The curvilinear abscissa of the current point.
:type: float
'''
u: float = None
''' The point parameter at the current point in the 1D element (0 <= u <= 1).
:type: float
'''
def __init__(self, brother: 'Interface0DIterator'):
''' __init__(it) Construct a nested Interface0DIterator using either the copy constructor or the constructor that takes an he argument of a Function0D.
:param brother: An Interface0DIterator object.
:type brother: 'Interface0DIterator'
:param it: An iterator object to be nested.
:type it: typing.Union['StrokeVertexIterator', 'SVertexIterator', 'CurvePointIterator']
'''
pass
class Interface1D:
''' Base class for any 1D element.
'''
id: 'Id' = None
''' The Id of this Interface1D.
:type: 'Id'
'''
length_2d: float = None
''' The 2D length of this Interface1D.
:type: float
'''
name: str = None
''' The string of the name of the 1D element.
:type: str
'''
nature: 'Nature' = None
''' The nature of this Interface1D.
:type: 'Nature'
'''
time_stamp: int = None
''' The time stamp of the 1D element, mainly used for selection.
:type: int
'''
def __init__(self):
''' Default constructor.
'''
pass
def points_begin(self, t: float = 0.0) -> 'Interface0DIterator':
''' Returns an iterator over the Interface1D points, pointing to the first point. The difference with vertices_begin() is that here we can iterate over points of the 1D element at a any given sampling. Indeed, for each iteration, a virtual point is created.
:param t: A sampling with which we want to iterate over points of this 1D element.
:type t: float
:rtype: 'Interface0DIterator'
:return: An Interface0DIterator pointing to the first point.
'''
pass
def points_end(self, t: float = 0.0) -> 'Interface0DIterator':
''' Returns an iterator over the Interface1D points, pointing after the last point. The difference with vertices_end() is that here we can iterate over points of the 1D element at a given sampling. Indeed, for each iteration, a virtual point is created.
:param t: A sampling with which we want to iterate over points of this 1D element.
:type t: float
:rtype: 'Interface0DIterator'
:return: An Interface0DIterator pointing after the last point.
'''
pass
def vertices_begin(self) -> 'Interface0DIterator':
''' Returns an iterator over the Interface1D vertices, pointing to the first vertex.
:rtype: 'Interface0DIterator'
:return: An Interface0DIterator pointing to the first vertex.
'''
pass
def vertices_end(self) -> 'Interface0DIterator':
''' Returns an iterator over the Interface1D vertices, pointing after the last vertex.
:rtype: 'Interface0DIterator'
:return: An Interface0DIterator pointing after the last vertex.
'''
pass
class Iterator:
''' Base class to define iterators.
'''
is_begin: bool = None
''' True if the iterator points to the first element.
:type: bool
'''
is_end: bool = None
''' True if the iterator points to the last element.
:type: bool
'''
name: str = None
''' The string of the name of this iterator.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def decrement(self):
''' Makes the iterator point the previous element.
'''
pass
def increment(self):
''' Makes the iterator point the next element.
'''
pass
class Material:
''' Class defining a material.
'''
ambient: 'mathutils.Color' = None
''' RGBA components of the ambient color of the material.
:type: 'mathutils.Color'
'''
diffuse: 'mathutils.Vector' = None
''' RGBA components of the diffuse color of the material.
:type: 'mathutils.Vector'
'''
emission: 'mathutils.Color' = None
''' RGBA components of the emissive color of the material.
:type: 'mathutils.Color'
'''
line: 'mathutils.Vector' = None
''' RGBA components of the line color of the material.
:type: 'mathutils.Vector'
'''
priority: int = None
''' Line color priority of the material.
:type: int
'''
shininess: float = None
''' Shininess coefficient of the material.
:type: float
'''
specular: 'mathutils.Vector' = None
''' RGBA components of the specular color of the material.
:type: 'mathutils.Vector'
'''
def __init__(self):
''' __init__(brother) __init__(line, diffuse, ambient, specular, emission, shininess, priority) Creates a FrsMaterial using either default constructor, copy constructor, or an overloaded constructor
:param brother: A Material object to be used as a copy constructor.
:type brother: 'bpy.types.Material'
:param line: The line color.
:type line: typing.Union[typing.List[float], typing.List['mathutils.Vector']]
:param diffuse: The diffuse color.
:type diffuse: typing.Union[typing.List[float], typing.List['mathutils.Vector']]
:param ambient: The ambient color.
:type ambient: typing.Union[typing.List[float], typing.List['mathutils.Vector']]
:param specular: The specular color.
:type specular: typing.Union[typing.List[float], typing.List['mathutils.Vector']]
:param emission: The emissive color.
:type emission: typing.Union[typing.List[float], typing.List['mathutils.Vector']]
:param shininess: The shininess coefficient.
:type shininess: float
:param priority: The line color priority.
:type priority: int
'''
pass
class MediumType:
''' Class hierarchy: int > MediumType The different blending modes available to similate the interaction media-medium: * Stroke.DRY_MEDIUM: To simulate a dry medium such as Pencil or Charcoal. * Stroke.HUMID_MEDIUM: To simulate ink painting (color subtraction blending). * Stroke.OPAQUE_MEDIUM: To simulate an opaque medium (oil, spray...).
'''
pass
class Nature:
''' Class hierarchy: int > Nature Different possible natures of 0D and 1D elements of the ViewMap. Vertex natures: * Nature.POINT: True for any 0D element. * Nature.S_VERTEX: True for SVertex. * Nature.VIEW_VERTEX: True for ViewVertex. * Nature.NON_T_VERTEX: True for NonTVertex. * Nature.T_VERTEX: True for TVertex. * Nature.CUSP: True for CUSP. Edge natures: * Nature.NO_FEATURE: True for non feature edges (always false for 1D elements of the ViewMap). * Nature.SILHOUETTE: True for silhouettes. * Nature.BORDER: True for borders. * Nature.CREASE: True for creases. * Nature.RIDGE: True for ridges. * Nature.VALLEY: True for valleys. * Nature.SUGGESTIVE_CONTOUR: True for suggestive contours. * Nature.MATERIAL_BOUNDARY: True for edges at material boundaries. * Nature.EDGE_MARK: True for edges having user-defined edge marks.
'''
pass
class Noise:
''' Class to provide Perlin noise functionalities. Undocumented, consider contributing <https://developer.blender.org/T51061> __. Undocumented, consider contributing <https://developer.blender.org/T51061> __.
'''
def __init__(self, seed=' -1'):
''' Builds a Noise object. Seed is an optional argument. The seed value is used as a seed for random number generation if it is equal to or greater than zero; otherwise, time is used as a seed.
:param seed: Seed for random number generation.
:type seed: int
'''
pass
def smoothNoise1(self, v: float) -> float:
''' Returns a smooth noise value for a 1D element.
:param v: One-dimensional sample point.
:type v: float
:rtype: float
:return: A smooth noise value.
'''
pass
def smoothNoise2(self, v: typing.List['mathutils.Vector']) -> float:
''' Returns a smooth noise value for a 2D element.
:param v: Two-dimensional sample point.
:type v: typing.List['mathutils.Vector']
:rtype: float
:return: A smooth noise value.
'''
pass
def smoothNoise3(self, v: typing.List['mathutils.Vector']) -> float:
''' Returns a smooth noise value for a 3D element.
:param v: Three-dimensional sample point.
:type v: typing.List['mathutils.Vector']
:rtype: float
:return: A smooth noise value.
'''
pass
def turbulence1(self, v: float, freq: float, amp: float,
oct: int = 4) -> float:
''' Returns a noise value for a 1D element.
:param v: One-dimensional sample point.
:type v: float
:param freq: Noise frequency.
:type freq: float
:param amp: Amplitude.
:type amp: float
:param oct: Number of octaves.
:type oct: int
:rtype: float
:return: A noise value.
'''
pass
def turbulence2(self,
v: typing.List['mathutils.Vector'],
freq: float,
amp: float,
oct: int = 4) -> float:
''' Returns a noise value for a 2D element.
:param v: Two-dimensional sample point.
:type v: typing.List['mathutils.Vector']
:param freq: Noise frequency.
:type freq: float
:param amp: Amplitude.
:type amp: float
:param oct: Number of octaves.
:type oct: int
:rtype: float
:return: A noise value.
'''
pass
def turbulence3(self,
v: typing.List['mathutils.Vector'],
freq: float,
amp: float,
oct: int = 4) -> float:
''' Returns a noise value for a 3D element.
:param v: Three-dimensional sample point.
:type v: typing.List['mathutils.Vector']
:param freq: Noise frequency.
:type freq: float
:param amp: Amplitude.
:type amp: float
:param oct: Number of octaves.
:type oct: int
:rtype: float
:return: A noise value.
'''
pass
class NonTVertex:
''' Class hierarchy: Interface0D > ViewVertex > NonTVertex View vertex for corners, cusps, etc. associated to a single SVertex. Can be associated to 2 or more view edges.
'''
svertex: 'SVertex' = None
''' The SVertex on top of which this NonTVertex is built.
:type: 'SVertex'
'''
def __init__(self):
''' __init__(svertex) Builds a NonTVertex using the default constructor or a SVertex .
:param svertex: An SVertex object.
:type svertex: 'SVertex'
'''
pass
class Operators:
''' Class defining the operators used in a style module. There are five types of operators: Selection, chaining, splitting, sorting and creation. All these operators are user controlled through functors, predicates and shaders that are taken as arguments.
'''
@staticmethod
def bidirectional_chain(it: 'ChainingIterator', pred: 'UnaryPredicate1D'):
''' bidirectional_chain(it) Builds a set of chains from the current set of ViewEdges. Each ViewEdge of the current list potentially starts a new chain. The chaining operator then iterates over the ViewEdges of the ViewMap using the user specified iterator. This operator iterates both using the increment and decrement operators and is therefore bidirectional. This operator works with a ChainingIterator which contains the chaining rules. It is this last one which can be told to chain only edges that belong to the selection or not to process twice a ViewEdge during the chaining. Each time a ViewEdge is added to a chain, its chaining time stamp is incremented. This allows you to keep track of the number of chains to which a ViewEdge belongs to.
:param it: The ChainingIterator on the ViewEdges of the ViewMap. It contains the chaining rule.
:type it: 'ChainingIterator'
:param pred: The predicate on the ViewEdge that expresses the stopping condition. This parameter is optional, you make not want to pass a stopping criterion when the stopping criterion is already contained in the iterator definition.
:type pred: 'UnaryPredicate1D'
'''
pass
@staticmethod
def chain(it: 'ViewEdgeIterator', pred: 'UnaryPredicate1D',
modifier: 'UnaryFunction1DVoid'):
''' chain(it, pred) Builds a set of chains from the current set of ViewEdges. Each ViewEdge of the current list starts a new chain. The chaining operator then iterates over the ViewEdges of the ViewMap using the user specified iterator. This operator only iterates using the increment operator and is therefore unidirectional.
:param it: The iterator on the ViewEdges of the ViewMap. It contains the chaining rule.
:type it: 'ViewEdgeIterator'
:param pred: The predicate on the ViewEdge that expresses the stopping condition.
:type pred: 'UnaryPredicate1D'
:param modifier: A function that takes a ViewEdge as argument and that is used to modify the processed ViewEdge state (the timestamp incrementation is a typical illustration of such a modifier). If this argument is not given, the time stamp is automatically managed.
:type modifier: 'UnaryFunction1DVoid'
'''
pass
@staticmethod
def create(pred: 'UnaryPredicate1D', shaders: typing.List['StrokeShader']):
''' Creates and shades the strokes from the current set of chains. A predicate can be specified to make a selection pass on the chains.
:param pred: The predicate that a chain must verify in order to be transform as a stroke.
:type pred: 'UnaryPredicate1D'
:param shaders: The list of shaders used to shade the strokes.
:type shaders: typing.List['StrokeShader']
'''
pass
@staticmethod
def get_chain_from_index(i: int) -> 'Chain':
''' Returns the Chain at the index in the current set of Chains.
:param i: index (0 <= i < Operators.get_chains_size()).
:type i: int
:rtype: 'Chain'
:return: The Chain object.
'''
pass
@staticmethod
def get_chains_size() -> int:
''' Returns the number of Chains.
:rtype: int
:return: The number of Chains.
'''
pass
@staticmethod
def get_stroke_from_index(i: int) -> 'Stroke':
''' Returns the Stroke at the index in the current set of Strokes.
:param i: index (0 <= i < Operators.get_strokes_size()).
:type i: int
:rtype: 'Stroke'
:return: The Stroke object.
'''
pass
@staticmethod
def get_strokes_size() -> int:
''' Returns the number of Strokes.
:rtype: int
:return: The number of Strokes.
'''
pass
@staticmethod
def get_view_edges_size() -> int:
''' Returns the number of ViewEdges.
:rtype: int
:return: The number of ViewEdges.
'''
pass
@staticmethod
def get_viewedge_from_index(i: int) -> 'ViewEdge':
''' Returns the ViewEdge at the index in the current set of ViewEdges.
:param i: index (0 <= i < Operators.get_view_edges_size()).
:type i: int
:rtype: 'ViewEdge'
:return: The ViewEdge object.
'''
pass
@staticmethod
def recursive_split(func: 'UnaryFunction0DDouble',
pred_1d: 'UnaryPredicate1D',
sampling: float = 0.0):
''' recursive_split(func, pred_0d, pred_1d, sampling=0.0) Splits the current set of chains in a recursive way. We process the points of each chain (with a specified sampling) to find the point minimizing a specified function. The chain is split in two at this point and the two new chains are processed in the same way. The recursivity level is controlled through a predicate 1D that expresses a stopping condition on the chain that is about to be processed. The user can also specify a 0D predicate to make a first selection on the points that can potentially be split. A point that doesn't verify the 0D predicate won't be candidate in realizing the min.
:param func: The Unary Function evaluated at each point of the chain. The splitting point is the point minimizing this function.
:type func: 'UnaryFunction0DDouble'
:param pred_0d: The Unary Predicate 0D used to select the candidate points where the split can occur. For example, it is very likely that would rather have your chain splitting around its middle point than around one of its extremities. A 0D predicate working on the curvilinear abscissa allows to add this kind of constraints.
:type pred_0d: 'UnaryPredicate0D'
:param pred_1d: The Unary Predicate expressing the recursivity stopping condition. This predicate is evaluated for each curve before it actually gets split. If pred_1d(chain) is true, the curve won't be split anymore.
:type pred_1d: 'UnaryPredicate1D'
:param sampling: The resolution used to sample the chain for the predicates evaluation. (The chain is not actually resampled; a virtual point only progresses along the curve using this resolution.)
:type sampling: float
'''
pass
@staticmethod
def reset(delete_strokes: bool = True):
''' Resets the line stylization process to the initial state. The results of stroke creation are accumulated if **delete_strokes** is set to False.
:param delete_strokes: Delete the strokes that are currently stored.
:type delete_strokes: bool
'''
pass
@staticmethod
def select(pred: 'UnaryPredicate1D'):
''' Selects the ViewEdges of the ViewMap verifying a specified condition.
:param pred: The predicate expressing this condition.
:type pred: 'UnaryPredicate1D'
'''
pass
@staticmethod
def sequential_split(starting_pred: 'UnaryPredicate0D',
stopping_pred: 'UnaryPredicate0D',
sampling: float = 0.0):
''' sequential_split(pred, sampling=0.0) Splits each chain of the current set of chains in a sequential way. The points of each chain are processed (with a specified sampling) sequentially. The first point of the initial chain is the first point of one of the resulting chains. The splitting ends when no more chain can start.
:param starting_pred: The predicate on a point that expresses the starting condition. Each time this condition is verified, a new chain begins
:type starting_pred: 'UnaryPredicate0D'
:param stopping_pred: The predicate on a point that expresses the stopping condition. The chain ends as soon as this predicate is verified.
:type stopping_pred: 'UnaryPredicate0D'
:param pred: The predicate on a point that expresses the splitting condition. Each time the condition is verified, the chain is split into two chains. The resulting set of chains is a partition of the initial chain
:type pred: 'UnaryPredicate0D'
:param sampling: The resolution used to sample the chain for the predicates evaluation. (The chain is not actually resampled; a virtual point only progresses along the curve using this resolution.)
:type sampling: float
'''
pass
@staticmethod
def sort(pred: 'BinaryPredicate1D'):
''' Sorts the current set of chains (or viewedges) according to the comparison predicate given as argument.
:param pred: The binary predicate used for the comparison.
:type pred: 'BinaryPredicate1D'
'''
pass
class SShape:
''' Class to define a feature shape. It is the gathering of feature elements from an identified input shape.
'''
bbox: 'BBox' = None
''' The bounding box of the SShape.
:type: 'BBox'
'''
edges: typing.List['FEdge'] = None
''' The list of edges constituting this SShape.
:type: typing.List['FEdge']
'''
id: 'Id' = None
''' The Id of this SShape.
:type: 'Id'
'''
name: str = None
''' The name of the SShape.
:type: str
'''
vertices: typing.List['SVertex'] = None
''' The list of vertices constituting this SShape.
:type: typing.List['SVertex']
'''
def __init__(self):
''' __init__(brother) Creates a SShape class using either a default constructor or copy constructor.
:param brother: An SShape object.
:type brother: 'SShape'
'''
pass
def add_edge(self, edge: 'FEdge'):
''' Adds an FEdge to the list of FEdges.
:param edge: An FEdge object.
:type edge: 'FEdge'
'''
pass
def add_vertex(self, vertex: 'SVertex'):
''' Adds an SVertex to the list of SVertex of this Shape. The SShape attribute of the SVertex is also set to this SShape.
:param vertex: An SVertex object.
:type vertex: 'SVertex'
'''
pass
def compute_bbox(self):
''' Compute the bbox of the SShape.
'''
pass
class SVertex:
''' Class hierarchy: Interface0D > SVertex Class to define a vertex of the embedding.
'''
curvatures: tuple = None
''' Curvature information expressed in the form of a seven-element tuple (K1, e1, K2, e2, Kr, er, dKr), where K1 and K2 are scalar values representing the first (maximum) and second (minimum) principal curvatures at this SVertex, respectively; e1 and e2 are three-dimensional vectors representing the first and second principal directions, i.e. the directions of the normal plane where the curvature takes its maximum and minimum values, respectively; and Kr, er and dKr are the radial curvature, radial direction, and the derivative of the radial curvature at this SVertex, respectively.
:type: tuple
'''
id: 'Id' = None
''' The Id of this SVertex.
:type: 'Id'
'''
normals: typing.List['mathutils.Vector'] = None
''' The normals for this Vertex as a list. In a sharp surface, an SVertex has exactly one normal. In a smooth surface, an SVertex can have any number of normals.
:type: typing.List['mathutils.Vector']
'''
normals_size: int = None
''' The number of different normals for this SVertex.
:type: int
'''
point_2d: 'mathutils.Vector' = None
''' The projected 3D coordinates of the SVertex.
:type: 'mathutils.Vector'
'''
point_3d: 'mathutils.Vector' = None
''' The 3D coordinates of the SVertex.
:type: 'mathutils.Vector'
'''
viewvertex: 'ViewVertex' = None
''' If this SVertex is also a ViewVertex, this property refers to the ViewVertex, and None otherwise.
:type: 'ViewVertex'
'''
def __init__(self):
''' __init__(brother) __init__(point_3d, id) Builds a SVertex using the default constructor, copy constructor or the overloaded constructor which builds a SVertex from 3D coordinates and an Id.
:param brother: A SVertex object.
:type brother: 'SVertex'
:param point_3d: A three-dimensional vector.
:type point_3d: 'mathutils.Vector'
:param id: An Id object.
:type id: 'Id'
'''
pass
def add_fedge(self, fedge: 'FEdge'):
''' Add an FEdge to the list of edges emanating from this SVertex.
:param fedge: An FEdge.
:type fedge: 'FEdge'
'''
pass
def add_normal(self, normal: typing.List['mathutils.Vector']):
''' Adds a normal to the SVertex's set of normals. If the same normal is already in the set, nothing changes.
:param normal: A three-dimensional vector.
:type normal: typing.List['mathutils.Vector']
'''
pass
class SVertexIterator:
''' Class hierarchy: Iterator > SVertexIterator Class representing an iterator over SVertex of a ViewEdge . An instance of an SVertexIterator can be obtained from a ViewEdge by calling verticesBegin() or verticesEnd().
'''
object: 'SVertex' = None
''' The SVertex object currently pointed by this iterator.
:type: 'SVertex'
'''
t: float = None
''' The curvilinear abscissa of the current point.
:type: float
'''
u: float = None
''' The point parameter at the current point in the 1D element (0 <= u <= 1).
:type: float
'''
def __init__(self):
''' __init__(brother) __init__(vertex, begin, previous_edge, next_edge, t) Build an SVertexIterator using either the default constructor, copy constructor, or the overloaded constructor that starts iteration from an SVertex object vertex.
:param brother: An SVertexIterator object.
:type brother: 'SVertexIterator'
:param vertex: The SVertex from which the iterator starts iteration.
:type vertex: 'SVertex'
:param begin: The first SVertex of a ViewEdge.
:type begin: 'SVertex'
:param previous_edge: The previous FEdge coming to vertex.
:type previous_edge: 'FEdge'
:param next_edge: The next FEdge going out from vertex.
:type next_edge: 'FEdge'
:param t: The curvilinear abscissa at vertex.
:type t: float
'''
pass
class Stroke:
''' Class hierarchy: Interface1D > Stroke Class to define a stroke. A stroke is made of a set of 2D vertices ( StrokeVertex ), regularly spaced out. This set of vertices defines the stroke's backbone geometry. Each of these stroke vertices defines the stroke's shape and appearance at this vertex position.
'''
id: 'Id' = None
''' The Id of this Stroke.
:type: 'Id'
'''
length_2d: float = None
''' The 2D length of the Stroke.
:type: float
'''
medium_type: 'MediumType' = None
''' The MediumType used for this Stroke.
:type: 'MediumType'
'''
texture_id: int = None
''' The ID of the texture used to simulate th marks system for this Stroke.
:type: int
'''
tips: bool = None
''' True if this Stroke uses a texture with tips, and false otherwise.
:type: bool
'''
def Stroke(self):
''' Stroke(brother) Creates a Stroke using the default constructor or copy constructor
'''
pass
def compute_sampling(self, n: int) -> float:
''' Compute the sampling needed to get N vertices. If the specified number of vertices is less than the actual number of vertices, the actual sampling value is returned. (To remove Vertices, use the RemoveVertex() method of this class.)
:param n: The number of stroke vertices we eventually want in our Stroke.
:type n: int
:rtype: float
:return: The sampling that must be used in the Resample(float) method.
'''
pass
def insert_vertex(self, vertex: 'StrokeVertex',
next: 'StrokeVertexIterator'):
''' Inserts the StrokeVertex given as argument into the Stroke before the point specified by next. The length and curvilinear abscissa are updated consequently.
:param vertex: The StrokeVertex to insert in the Stroke.
:type vertex: 'StrokeVertex'
:param next: A StrokeVertexIterator pointing to the StrokeVertex before which vertex must be inserted.
:type next: 'StrokeVertexIterator'
'''
pass
def remove_all_vertices(self):
''' Removes all vertices from the Stroke.
'''
pass
def remove_vertex(self, vertex: 'StrokeVertex'):
''' Removes the StrokeVertex given as argument from the Stroke. The length and curvilinear abscissa are updated consequently.
:param vertex: the StrokeVertex to remove from the Stroke.
:type vertex: 'StrokeVertex'
'''
pass
def resample(self, n: int):
''' resample(sampling) Resamples the stroke so using one of two methods with the goal of creating a stroke with fewer points and the same shape.
:param n: Resamples the stroke so that it eventually has N points. That means it is going to add N-vertices_size, where vertices_size is the number of points we already have. If vertices_size >= N, no resampling is done.
:type n: int
:param sampling: Resamples the stroke with a given sampling value. If the sampling is smaller than the actual sampling value, no resampling is done.
:type sampling: float
'''
pass
def stroke_vertices_begin(self, t: float = 0.0) -> 'StrokeVertexIterator':
''' Returns a StrokeVertexIterator pointing on the first StrokeVertex of the Stroke. One can specify a sampling value to re-sample the Stroke on the fly if needed.
:param t: The resampling value with which we want our Stroke to be resampled. If 0 is specified, no resampling is done.
:type t: float
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator pointing on the first StrokeVertex.
'''
pass
def stroke_vertices_end(self) -> 'StrokeVertexIterator':
''' Returns a StrokeVertexIterator pointing after the last StrokeVertex of the Stroke.
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator pointing after the last StrokeVertex.
'''
pass
def stroke_vertices_size(self) -> int:
''' Returns the number of StrokeVertex constituting the Stroke.
:rtype: int
:return: The number of stroke vertices.
'''
pass
def update_length(self):
''' Updates the 2D length of the Stroke.
'''
pass
class StrokeAttribute:
''' Class to define a set of attributes associated with a StrokeVertex . The attribute set stores the color, alpha and thickness values for a Stroke Vertex.
'''
alpha: float = None
''' Alpha component of the stroke color.
:type: float
'''
color: 'mathutils.Color' = None
''' RGB components of the stroke color.
:type: 'mathutils.Color'
'''
thickness: 'mathutils.Vector' = None
''' Right and left components of the stroke thickness. The right (left) component is the thickness on the right (left) of the vertex when following the stroke.
:type: 'mathutils.Vector'
'''
visible: bool = None
''' The visibility flag. True if the StrokeVertex is visible.
:type: bool
'''
def __init__(self):
''' __init__(brother) __init__(red, green, blue, alpha, thickness_right, thickness_left) __init__(attribute1, attribute2, t) Creates a StrokeAttribute object using either a default constructor, copy constructor, overloaded constructor, or and interpolation constructor to interpolate between two StrokeAttribute objects.
:param brother: A StrokeAttribute object to be used as a copy constructor.
:type brother: 'StrokeAttribute'
:param red: Red component of a stroke color.
:type red: float
:param green: Green component of a stroke color.
:type green: float
:param blue: Blue component of a stroke color.
:type blue: float
:param alpha: Alpha component of a stroke color.
:type alpha: float
:param thickness_right: Stroke thickness on the right.
:type thickness_right: float
:param thickness_left: Stroke thickness on the left.
:type thickness_left: float
:param attribute1: The first StrokeAttribute object.
:type attribute1: 'StrokeAttribute'
:param attribute2: The second StrokeAttribute object.
:type attribute2: 'StrokeAttribute'
:param t: The interpolation parameter (0 <= t <= 1).
:type t: float
'''
pass
def get_attribute_real(self, name: str) -> float:
''' Returns an attribute of float type.
:param name: The name of the attribute.
:type name: str
:rtype: float
:return: The attribute value.
'''
pass
def get_attribute_vec2(self, name: str) -> 'mathutils.Vector':
''' Returns an attribute of two-dimensional vector type.
:param name: The name of the attribute.
:type name: str
:rtype: 'mathutils.Vector'
:return: The attribute value.
'''
pass
def get_attribute_vec3(self, name: str) -> 'mathutils.Vector':
''' Returns an attribute of three-dimensional vector type.
:param name: The name of the attribute.
:type name: str
:rtype: 'mathutils.Vector'
:return: The attribute value.
'''
pass
def has_attribute_real(self, name: str) -> bool:
''' Checks whether the attribute name of float type is available.
:param name: The name of the attribute.
:type name: str
:rtype: bool
:return: True if the attribute is available.
'''
pass
def has_attribute_vec2(self, name: str) -> bool:
''' Checks whether the attribute name of two-dimensional vector type is available.
:param name: The name of the attribute.
:type name: str
:rtype: bool
:return: True if the attribute is available.
'''
pass
def has_attribute_vec3(self, name: str) -> bool:
''' Checks whether the attribute name of three-dimensional vector type is available.
:param name: The name of the attribute.
:type name: str
:rtype: bool
:return: True if the attribute is available.
'''
pass
def set_attribute_real(self, name: str, value: float):
''' Adds a user-defined attribute of float type. If there is no attribute of the given name, it is added. Otherwise, the new value replaces the old one.
:param name: The name of the attribute.
:type name: str
:param value: The attribute value.
:type value: float
'''
pass
def set_attribute_vec2(self, name: str,
value: typing.List['mathutils.Vector']):
''' Adds a user-defined attribute of two-dimensional vector type. If there is no attribute of the given name, it is added. Otherwise, the new value replaces the old one.
:param name: The name of the attribute.
:type name: str
:param value: The attribute value.
:type value: typing.List['mathutils.Vector']
'''
pass
def set_attribute_vec3(self, name: str,
value: typing.List['mathutils.Vector']):
''' Adds a user-defined attribute of three-dimensional vector type. If there is no attribute of the given name, it is added. Otherwise, the new value replaces the old one.
:param name: The name of the attribute.
:type name: str
:param value: The attribute value.
:type value: typing.List['mathutils.Vector']
'''
pass
class StrokeShader:
''' Base class for stroke shaders. Any stroke shader must inherit from this class and overload the shade() method. A StrokeShader is designed to modify stroke attributes such as thickness, color, geometry, texture, blending mode, and so on. The basic way for this operation is to iterate over the stroke vertices of the Stroke and to modify the StrokeAttribute of each vertex. Here is a code example of such an iteration:: it = ioStroke.strokeVerticesBegin() while not it.is_end: att = it.object.attribute ## perform here any attribute modification it.increment()
'''
name: str = None
''' The name of the stroke shader.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def shade(self, stroke: 'Stroke'):
''' The shading method. Must be overloaded by inherited classes.
:param stroke: A Stroke object.
:type stroke: 'Stroke'
'''
pass
class StrokeVertex:
''' Class hierarchy: Interface0D > CurvePoint > StrokeVertex Class to define a stroke vertex.
'''
attribute: 'StrokeAttribute' = None
''' StrokeAttribute for this StrokeVertex.
:type: 'StrokeAttribute'
'''
curvilinear_abscissa: float = None
''' Curvilinear abscissa of this StrokeVertex in the Stroke.
:type: float
'''
point: 'mathutils.Vector' = None
''' 2D point coordinates.
:type: 'mathutils.Vector'
'''
stroke_length: float = None
''' Stroke length (it is only a value retained by the StrokeVertex, and it won't change the real stroke length).
:type: float
'''
u: float = None
''' Curvilinear abscissa of this StrokeVertex in the Stroke.
:type: float
'''
def __init__(self):
''' __init__(brother) __init__(first_vertex, second_vertex, t3d) __init__(point) __init__(svertex) __init__(svertex, attribute) Builds a StrokeVertex using the default constructor, copy constructor, from 2 StrokeVertex and an interpolation parameter, from a CurvePoint, from a SVertex, or a SVertex and a StrokeAttribute object.
:param brother: A StrokeVertex object.
:type brother: 'StrokeVertex'
:param first_vertex: The first StrokeVertex.
:type first_vertex: 'StrokeVertex'
:param second_vertex: The second StrokeVertex.
:type second_vertex: 'StrokeVertex'
:param t3d: An interpolation parameter.
:type t3d: float
:param point: A CurvePoint object.
:type point: 'CurvePoint'
:param svertex: An SVertex object. An SVertex object.
:type svertex: 'SVertex'
:param svertex: An SVertex object. An SVertex object.
:type svertex: 'SVertex'
:param attribute: A StrokeAttribute object.
:type attribute: 'StrokeAttribute'
'''
pass
class StrokeVertexIterator:
''' Class hierarchy: Iterator > StrokeVertexIterator Class defining an iterator designed to iterate over the StrokeVertex of a Stroke . An instance of a StrokeVertexIterator can be obtained from a Stroke by calling iter(), stroke_vertices_begin() or stroke_vertices_begin(). It is iterating over the same vertices as an Interface0DIterator . The difference resides in the object access: an Interface0DIterator only allows access to an Interface0D while one might need to access the specialized StrokeVertex type. In this case, one should use a StrokeVertexIterator. To call functions of the UnaryFuntion0D type, a StrokeVertexIterator can be converted to an Interface0DIterator by by calling Interface0DIterator(it).
'''
at_last: bool = None
''' True if the iterator points to the last valid element. For its counterpart (pointing to the first valid element), use it.is_begin.
:type: bool
'''
object: 'StrokeVertex' = None
''' The StrokeVertex object currently pointed to by this iterator.
:type: 'StrokeVertex'
'''
t: float = None
''' The curvilinear abscissa of the current point.
:type: float
'''
u: float = None
''' The point parameter at the current point in the stroke (0 <= u <= 1).
:type: float
'''
def __init__(self):
''' __init__(brother) Creates a StrokeVertexIterator using either the default constructor or the copy constructor.
:param brother: A StrokeVertexIterator object.
:type brother: 'StrokeVertexIterator'
'''
pass
def decremented(self) -> 'StrokeVertexIterator':
''' Returns a copy of a decremented StrokeVertexIterator.
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator pointing the previous StrokeVertex.
'''
pass
def incremented(self) -> 'StrokeVertexIterator':
''' Returns a copy of an incremented StrokeVertexIterator.
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator pointing the next StrokeVertex.
'''
pass
def reversed(self) -> 'StrokeVertexIterator':
''' Returns a StrokeVertexIterator that traverses stroke vertices in the reversed order.
:rtype: 'StrokeVertexIterator'
:return: A StrokeVertexIterator traversing stroke vertices backward.
'''
pass
class TVertex:
''' Class hierarchy: Interface0D > ViewVertex > TVertex Class to define a T vertex, i.e. an intersection between two edges. It points towards two SVertex and four ViewEdges. Among the ViewEdges, two are front and the other two are back. Basically a front edge hides part of a back edge. So, among the back edges, one is of invisibility N and the other of invisibility N+1.
'''
back_svertex: 'SVertex' = None
''' The SVertex that is further away from the viewpoint.
:type: 'SVertex'
'''
front_svertex: 'SVertex' = None
''' The SVertex that is closer to the viewpoint.
:type: 'SVertex'
'''
id: 'Id' = None
''' The Id of this TVertex.
:type: 'Id'
'''
def __init__(self):
''' Default constructor.
'''
pass
def get_mate(self, viewedge: 'ViewEdge') -> 'ViewEdge':
''' Returns the mate edge of the ViewEdge given as argument. If the ViewEdge is frontEdgeA, frontEdgeB is returned. If the ViewEdge is frontEdgeB, frontEdgeA is returned. Same for back edges.
:param viewedge: A ViewEdge object.
:type viewedge: 'ViewEdge'
:rtype: 'ViewEdge'
:return: The mate edge of the given ViewEdge.
'''
pass
def get_svertex(self, fedge: 'FEdge') -> 'SVertex':
''' Returns the SVertex (among the 2) belonging to the given FEdge.
:param fedge: An FEdge object.
:type fedge: 'FEdge'
:rtype: 'SVertex'
:return: The SVertex belonging to the given FEdge.
'''
pass
class UnaryFunction0D:
''' Base class for Unary Functions (functors) working on Interface0DIterator . A unary function will be used by invoking __call__() on an Interface0DIterator. In Python, several different subclasses of UnaryFunction0D are used depending on the types of functors' return values. For example, you would inherit from a UnaryFunction0DDouble if you wish to define a function that returns a double value. Available UnaryFunction0D subclasses are: * UnaryFunction0DDouble * UnaryFunction0DEdgeNature * UnaryFunction0DFloat * UnaryFunction0DId * UnaryFunction0DMaterial * UnaryFunction0DUnsigned * UnaryFunction0DVec2f * UnaryFunction0DVec3f * UnaryFunction0DVectorViewShape * UnaryFunction0DViewShape
'''
name: str = None
''' The name of the unary 0D function.
:type: str
'''
class UnaryFunction0DDouble:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DDouble Base class for unary functions (functors) that work on Interface0DIterator and return a float value.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DEdgeNature:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DEdgeNature Base class for unary functions (functors) that work on Interface0DIterator and return a Nature object.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DFloat:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DFloat Base class for unary functions (functors) that work on Interface0DIterator and return a float value.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DId:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DId Base class for unary functions (functors) that work on Interface0DIterator and return an Id object.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DMaterial:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DMaterial Base class for unary functions (functors) that work on Interface0DIterator and return a Material object.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DUnsigned:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DUnsigned Base class for unary functions (functors) that work on Interface0DIterator and return an int value.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DVec2f:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DVec2f Base class for unary functions (functors) that work on Interface0DIterator and return a 2D vector.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DVec3f:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DVec3f Base class for unary functions (functors) that work on Interface0DIterator and return a 3D vector.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DVectorViewShape:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DVectorViewShape Base class for unary functions (functors) that work on Interface0DIterator and return a list of ViewShape objects.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction0DViewShape:
''' Class hierarchy: UnaryFunction0D > UnaryFunction0DViewShape Base class for unary functions (functors) that work on Interface0DIterator and return a ViewShape object.
'''
def __init__(self):
''' Default constructor.
'''
pass
class UnaryFunction1D:
''' Base class for Unary Functions (functors) working on Interface1D . A unary function will be used by invoking __call__() on an Interface1D. In Python, several different subclasses of UnaryFunction1D are used depending on the types of functors' return values. For example, you would inherit from a UnaryFunction1DDouble if you wish to define a function that returns a double value. Available UnaryFunction1D subclasses are: * UnaryFunction1DDouble * UnaryFunction1DEdgeNature * UnaryFunction1DFloat * UnaryFunction1DUnsigned * UnaryFunction1DVec2f * UnaryFunction1DVec3f * UnaryFunction1DVectorViewShape * UnaryFunction1DVoid
'''
name: str = None
''' The name of the unary 1D function.
:type: str
'''
class UnaryFunction1DDouble:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DDouble Base class for unary functions (functors) that work on Interface1D and return a float value.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DEdgeNature:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DEdgeNature Base class for unary functions (functors) that work on Interface1D and return a Nature object.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DFloat:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DFloat Base class for unary functions (functors) that work on Interface1D and return a float value.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DUnsigned:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DUnsigned Base class for unary functions (functors) that work on Interface1D and return an int value.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DVec2f:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DVec2f Base class for unary functions (functors) that work on Interface1D and return a 2D vector.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DVec3f:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DVec3f Base class for unary functions (functors) that work on Interface1D and return a 3D vector.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DVectorViewShape:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DVectorViewShape Base class for unary functions (functors) that work on Interface1D and return a list of ViewShape objects.
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using the default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryFunction1DVoid:
''' Class hierarchy: UnaryFunction1D > UnaryFunction1DVoid Base class for unary functions (functors) working on Interface1D .
'''
integration_type: 'IntegrationType' = None
''' The integration method.
:type: 'IntegrationType'
'''
def __init__(self):
''' __init__(integration_type) Builds a unary 1D function using either a default constructor or the integration method given as an argument.
:param integration_type: An integration method.
:type integration_type: 'IntegrationType'
'''
pass
class UnaryPredicate0D:
''' Base class for unary predicates that work on Interface0DIterator . A UnaryPredicate0D is a functor that evaluates a condition on an Interface0DIterator and returns true or false depending on whether this condition is satisfied or not. The UnaryPredicate0D is used by invoking its __call__() method. Any inherited class must overload the __call__() method.
'''
name: str = None
''' The name of the unary 0D predicate.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def __call__(self, it: 'Interface0DIterator') -> bool:
''' Must be overload by inherited classes.
:param it: The Interface0DIterator pointing onto the Interface0D at which we wish to evaluate the predicate.
:type it: 'Interface0DIterator'
:rtype: bool
:return: True if the condition is satisfied, false otherwise.
'''
pass
class UnaryPredicate1D:
''' Base class for unary predicates that work on Interface1D . A UnaryPredicate1D is a functor that evaluates a condition on a Interface1D and returns true or false depending on whether this condition is satisfied or not. The UnaryPredicate1D is used by invoking its __call__() method. Any inherited class must overload the __call__() method.
'''
name: str = None
''' The name of the unary 1D predicate.
:type: str
'''
def __init__(self):
''' Default constructor.
'''
pass
def __call__(self, inter: 'Interface1D') -> bool:
''' Must be overload by inherited classes.
:param inter: The Interface1D on which we wish to evaluate the predicate.
:type inter: 'Interface1D'
:rtype: bool
:return: True if the condition is satisfied, false otherwise.
'''
pass
class ViewEdge:
''' Class hierarchy: Interface1D > ViewEdge Class defining a ViewEdge. A ViewEdge in an edge of the image graph. it connects two ViewVertex objects. It is made by connecting a set of FEdges.
'''
chaining_time_stamp: int = None
''' The time stamp of this ViewEdge.
:type: int
'''
first_fedge: 'FEdge' = None
''' The first FEdge that constitutes this ViewEdge.
:type: 'FEdge'
'''
first_viewvertex: 'ViewVertex' = None
''' The first ViewVertex.
:type: 'ViewVertex'
'''
id: 'Id' = None
''' The Id of this ViewEdge.
:type: 'Id'
'''
is_closed: bool = None
''' True if this ViewEdge forms a closed loop.
:type: bool
'''
last_fedge: 'FEdge' = None
''' The last FEdge that constitutes this ViewEdge.
:type: 'FEdge'
'''
last_viewvertex: 'ViewVertex' = None
''' The second ViewVertex.
:type: 'ViewVertex'
'''
nature: 'Nature' = None
''' The nature of this ViewEdge.
:type: 'Nature'
'''
occludee: 'ViewShape' = None
''' The shape that is occluded by the ViewShape to which this ViewEdge belongs to. If no object is occluded, this property is set to None.
:type: 'ViewShape'
'''
qi: int = None
''' The quantitative invisibility.
:type: int
'''
viewshape: 'ViewShape' = None
''' The ViewShape to which this ViewEdge belongs to.
:type: 'ViewShape'
'''
def __init__(self):
''' __init__(brother) Builds a ViewEdge using the default constructor or the copy constructor.
:param brother: A ViewEdge object.
:type brother: 'ViewEdge'
'''
pass
def update_fedges(self):
''' Sets Viewedge to this for all embedded fedges.
'''
pass
class ViewEdgeIterator:
''' Class hierarchy: Iterator > ViewEdgeIterator Base class for iterators over ViewEdges of the ViewMap Graph. Basically the increment() operator of this class should be able to take the decision of "where" (on which ViewEdge) to go when pointing on a given ViewEdge.
'''
begin: 'ViewEdge' = None
''' The first ViewEdge used for the iteration.
:type: 'ViewEdge'
'''
current_edge: 'ViewEdge' = None
''' The ViewEdge object currently pointed by this iterator.
:type: 'ViewEdge'
'''
object: 'ViewEdge' = None
''' The ViewEdge object currently pointed by this iterator.
:type: 'ViewEdge'
'''
orientation: bool = None
''' The orientation of the pointed ViewEdge in the iteration. If true, the iterator looks for the next ViewEdge among those ViewEdges that surround the ending ViewVertex of the "begin" ViewEdge. If false, the iterator searches over the ViewEdges surrounding the ending ViewVertex of the "begin" ViewEdge.
:type: bool
'''
def __init__(self, begin: 'ViewEdge' = None, orientation: bool = True):
''' __init__(brother) Builds a ViewEdgeIterator from a starting ViewEdge and its orientation or the copy constructor.
:param begin: The ViewEdge from where to start the iteration.
:type begin: 'ViewEdge'
:param orientation: If true, we'll look for the next ViewEdge among the ViewEdges that surround the ending ViewVertex of begin. If false, we'll search over the ViewEdges surrounding the ending ViewVertex of begin.
:type orientation: bool
:param brother: A ViewEdgeIterator object.
:type brother: 'ViewEdgeIterator'
'''
pass
def change_orientation(self):
''' Changes the current orientation.
'''
pass
class ViewMap:
''' Class defining the ViewMap.
'''
scene_bbox: 'BBox' = None
''' The 3D bounding box of the scene.
:type: 'BBox'
'''
def __init__(self):
''' Default constructor.
'''
pass
def get_closest_fedge(self, x: float, y: float) -> 'FEdge':
''' Gets the FEdge nearest to the 2D point specified as arguments.
:param x: X coordinate of a 2D point.
:type x: float
:param y: Y coordinate of a 2D point.
:type y: float
:rtype: 'FEdge'
:return: The FEdge nearest to the specified 2D point.
'''
pass
def get_closest_viewedge(self, x: float, y: float) -> 'ViewEdge':
''' Gets the ViewEdge nearest to the 2D point specified as arguments.
:param x: X coordinate of a 2D point.
:type x: float
:param y: Y coordinate of a 2D point.
:type y: float
:rtype: 'ViewEdge'
:return: The ViewEdge nearest to the specified 2D point.
'''
pass
class ViewShape:
''' Class gathering the elements of the ViewMap (i.e., ViewVertex and ViewEdge ) that are issued from the same input shape.
'''
edges: typing.List['ViewEdge'] = None
''' The list of ViewEdge objects contained in this ViewShape.
:type: typing.List['ViewEdge']
'''
id: 'Id' = None
''' The Id of this ViewShape.
:type: 'Id'
'''
library_path: typing.Union[str, 'ViewShape'] = None
''' The library path of the ViewShape.
:type: typing.Union[str, 'ViewShape']
'''
name: str = None
''' The name of the ViewShape.
:type: str
'''
sshape: 'SShape' = None
''' The SShape on top of which this ViewShape is built.
:type: 'SShape'
'''
vertices: typing.List['ViewVertex'] = None
''' The list of ViewVertex objects contained in this ViewShape.
:type: typing.List['ViewVertex']
'''
def __init__(self):
''' __init__(brother) __init__(sshape) Builds a ViewShape using the default constructor, copy constructor, or from a SShape .
:param brother: A ViewShape object.
:type brother: 'ViewShape'
:param sshape: An SShape object.
:type sshape: 'SShape'
'''
pass
def add_edge(self, edge: 'ViewEdge'):
''' Adds a ViewEdge to the list of ViewEdge objects.
:param edge: A ViewEdge object.
:type edge: 'ViewEdge'
'''
pass
def add_vertex(self, vertex: 'ViewVertex'):
''' Adds a ViewVertex to the list of the ViewVertex objects.
:param vertex: A ViewVertex object.
:type vertex: 'ViewVertex'
'''
pass
class ViewVertex:
''' Class hierarchy: Interface0D > ViewVertex Class to define a view vertex. A view vertex is a feature vertex corresponding to a point of the image graph, where the characteristics of an edge (e.g., nature and visibility) might change. A ViewVertex can be of two kinds: A TVertex when it corresponds to the intersection between two ViewEdges or a NonTVertex when it corresponds to a vertex of the initial input mesh (it is the case for vertices such as corners for example). Thus, this class can be specialized into two classes, the TVertex class and the NonTVertex class.
'''
nature: 'Nature' = None
''' The nature of this ViewVertex.
:type: 'Nature'
'''
def edges_begin(self) -> 'orientedViewEdgeIterator':
''' Returns an iterator over the ViewEdges that goes to or comes from this ViewVertex pointing to the first ViewEdge of the list. The orientedViewEdgeIterator allows to iterate in CCW order over these ViewEdges and to get the orientation for each ViewEdge (incoming/outgoing).
:rtype: 'orientedViewEdgeIterator'
:return: An orientedViewEdgeIterator pointing to the first ViewEdge.
'''
pass
def edges_end(self) -> 'orientedViewEdgeIterator':
''' Returns an orientedViewEdgeIterator over the ViewEdges around this ViewVertex, pointing after the last ViewEdge.
:rtype: 'orientedViewEdgeIterator'
:return: An orientedViewEdgeIterator pointing after the last ViewEdge.
'''
pass
def edges_iterator(self, edge: 'ViewEdge') -> 'orientedViewEdgeIterator':
''' Returns an orientedViewEdgeIterator pointing to the ViewEdge given as argument.
:param edge: A ViewEdge object.
:type edge: 'ViewEdge'
:rtype: 'orientedViewEdgeIterator'
:return: An orientedViewEdgeIterator pointing to the given ViewEdge.
'''
pass
class orientedViewEdgeIterator:
''' Class hierarchy: Iterator > orientedViewEdgeIterator Class representing an iterator over oriented ViewEdges around a ViewVertex . This iterator allows a CCW iteration (in the image plane). An instance of an orientedViewEdgeIterator can only be obtained from a ViewVertex by calling edges_begin() or edges_end().
'''
object: typing.Union[bool, 'ViewEdge'] = None
''' The oriented ViewEdge (i.e., a tuple of the pointed ViewEdge and a boolean value) currently pointed to by this iterator. If the boolean value is true, the ViewEdge is incoming.
:type: typing.Union[bool, 'ViewEdge']
'''
def __init__(self):
''' __init__(iBrother) Creates an orientedViewEdgeIterator using either the default constructor or the copy constructor.
:param iBrother: An orientedViewEdgeIterator object.
:type iBrother: 'orientedViewEdgeIterator'
'''
pass
```
#### File: src/blender/simple_add_on.py
```python
bl_info = {
"name": "Move X Axis",
"blender": (2, 80, 0),
"category": "Object",
}
import bpy
class ObjectMoveX(bpy.types.Operator):
"""My Object Moving Script""" # Use this as a tooltip for menu items and buttons.
bl_idname = "object.move_x" # Unique identifier for buttons and menu items to reference.
bl_label = "Move X by One" # Display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
def execute(self, context): # execute() is called when running the operator.
# The original script
scene = context.scene
for obj in scene.objects:
obj.location.x += 1.0
return {'FINISHED'} # Lets Blender know the operator finished successfully.
def register():
bpy.utils.register_class(ObjectMoveX)
def unregister():
bpy.utils.unregister_class(ObjectMoveX)
# This allows you to run the script directly from Blender's Text editor
# to test the add-on without having to install it.
if __name__ == "__main__":
register()
```
#### File: ghPython/clean_code/patterns.py
```python
from math import sin, pi, ceil
import random
import Rhino.Geometry as rg
class Pattern():
def apply(self, v):
return v
class InOut():
def __init__(self, percentage, seed = 0):
self.p = percentage
random.seed(seed)
self.seed_dict = {}
def get_direction(self, index):
try:
self.seed_dict[index]
except:
self.seed_dict[index] = random.random() < self.p
return self.seed_dict[index]
class DotMap():
def __init__(self, x_spacing = 10., y_spacing = None, half_spacing = False, b_pt = rg.Point3d.Origin, in_out_class = None, direction = True):
self.x_spacing = x_spacing
self.y_spacing = y_spacing
self.o = b_pt
self._h_spacing = half_spacing
self.h_spacing_x = x_spacing * .5
self.h_spacing_y = self.y_spacing * .5
self.direction = direction
self._random_function = in_out_class
print(self._h_spacing)
def position_based(self, pt):
loc_pt = pt - self.o
if self._h_spacing:
p_x = loc_pt.X % self.x_spacing
p_y = loc_pt.Y % self.y_spacing
y_i = int((loc_pt.Y - p_y) / self.y_spacing)
if y_i % 2 == 0:
p_x += self.h_spacing_x
p_x %= self.x_spacing
else:
p_x = loc_pt.X % self.x_spacing
p_y = loc_pt.Y % self.y_spacing
y_i = int((loc_pt.Y - p_y) / self.y_spacing)
output_pt = rg.Point3d(
p_x - self.h_spacing_x,
p_y - self.h_spacing_y,
0.
)
if not(isinstance(self._random_function, type(None))):
x_i = int((loc_pt.X - p_x) / self.x_spacing)
seed_val = x_i + y_i * 100
direction = self._random_function.get_direction(seed_val)
else:
direction = True
direction = direction if self.direction else not(direction)
if direction:
return output_pt, 1.
else:
return output_pt, -1.
class EdgeEasing():
def __init__(self, zero_length, normal_length, total_length):
self.min = zero_length
self.max = normal_length
self.d = self.max - self.min
self.h_length = .5 * total_length
def scale_val(self, d_x):
d_x = self.h_length - abs(d_x - self.h_length)
if d_x >= self.max:
return 1.0
elif d_x > self.min:
return (d_x - self.min) / self.d
else:
return 0.
class SinWave(Pattern):
def __init__(self, period, amplitude, dir_angle, b_pt = rg.Point3d.Origin, edge_easing = None):
self.p = period * .5 / pi
self.a = amplitude
self.t_m = rg.Transform.Rotation(dir_angle, rg.Point3d.Origin)
self.b_pt = b_pt
self.ee = edge_easing
print(self.ee)
def apply(self, v):
tmp_pt = v.vec_version - self.b_pt
tmp_pt.Transform(self.t_m)
if not(isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(v.vec_version.X)
else:
amp = self.a
scale_val = sin(tmp_pt.X / self.p) * amp
return rg.Point3d(v.o + v.n * scale_val)
class PyramidPattern(Pattern):
def __init__(self, radius, amplitude, y_scale = 1., dot_map = None, edge_easing = None):
self.r = radius
self.a = amplitude
self.y_scale = y_scale
self.dot_map = DotMap if isinstance(dot_map, type(None)) else dot_map
self.ee = edge_easing
print(self.ee)
def apply(self, v):
loc, direction = self.dot_map.position_based(v.vec_version)
l = (loc.X ** 2. + (loc.Y * self.y_scale) ** 2.) ** .5
if not(isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(v.vec_version.X)
else:
amp = self.a
if l > self.r:
return v.v
else:
scale_val = direction * amp * (1. - l/self.r)
return rg.Point3d(v.o + v.n * scale_val)
class EllipsoidPattern(Pattern):
def __init__(self, radius, amplitude, y_scale=1., dot_map=None, edge_easing=None):
self.r = radius
self.a = amplitude
self.y_scale = y_scale
self.dot_map = DotMap if isinstance(dot_map, type(None)) else dot_map
self.ee = edge_easing
print(self.ee)
def apply(self, v):
loc, direction = self.dot_map.position_based(v.vec_version)
l = (loc.X ** 2. + (loc.Y * self.y_scale) ** 2.) ** .5
if not(isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(v.vec_version.X)
else:
amp = self.a
if l > self.r:
return v.v
else:
h = (1 - (l / self.r) ** 2.) ** .5
scale_val = direction * amp * h
return rg.Point3d(v.o + v.n * scale_val)
class CylinderPattern(Pattern):
def __init__(self, radius_a, radius_b, height, amplitude, dot_map=None, edge_easing=None):
self.r_a = radius_a
self.r_b = radius_b
self.r_d = self.r_b - self.r_a
self.r_f = self.r_d / self.r_a
self.h = height
self.a = amplitude
self.dot_map = DotMap if isinstance(dot_map, type(None)) else dot_map
self.ee = edge_easing
print(self.ee)
def apply(self, v):
loc, direction = self.dot_map.position_based(v.vec_version)
if not(isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(v.vec_version.X)
else:
amp = self.a
y_distance = loc.Y
x_distance = abs(loc.X)
if abs(y_distance) + .01 < self.h * .5:
local_radius = self.r_a + self.r_d * (y_distance / self.h + .5)
else:
local_radius = -1
if x_distance < local_radius:
scale_val = (1 - (x_distance / local_radius) ** 2.0) ** .5 * amp * direction
return rg.Point3d(v.o + v.n * scale_val)
else:
return v.v
class LayerMap(Pattern):
def __init__(self, spacing, pattern_set, length, layer_spacing, radius, amplitude, direction = False, periodic = False, b_pt = rg.Point3d.Origin, edge_easing=None):
self.periodic = periodic
self.pattern_generation(pattern_set, spacing, length)
self.l = length
self.l_h = layer_spacing
self.r = radius
self.a = amplitude if direction else -amplitude
self.b_pt = b_pt
self.ee = edge_easing
def pattern_generation(self, pattern_set, spacing, length):
if self.periodic:
print("pattern periodicizing")
print("updating the spacing")
print("old spacing: %s" % spacing)
scaling_int_val = ceil(length / spacing)
spacing = length / scaling_int_val
print("new spacing: %s" % spacing)
else:
spacing = spacing
# pattern_map (start, step, count)
# only have to consider x distance
layer_length_vals = []
for pattern in pattern_set:
start, step, count = pattern[0], pattern[1], pattern[2]
if count < 1:
count = 1
length_vals = []
x_val = start
x_delta = step * spacing
while x_val < length:
length_vals.append(x_val)
x_val += x_delta
for i in range(count):
layer_length_vals.append(length_vals)
self.layer_length_vals = layer_length_vals
def apply(self, v):
tmp_pt = v.vec_version - self.b_pt
d_x = tmp_pt.X % self.l
i = int(tmp_pt.Y / self.l_h)
x_diss = self.layer_length_vals[ i % len(self.layer_length_vals) ]
dis_set = []
for x_d in x_diss:
dis_set.append(abs(d_x - x_d))
distance = min(dis_set)
if not (isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(d_x)
else:
amp = self.a
if distance < self.r:
scale_val = (1 - (distance / self.r) ** 2.0) ** .5 * amp
else:
scale_val = 0.0
return rg.Point3d(v.o + v.n * scale_val)
class AxolotlFlat(Pattern):
def __init__(self, sdf, amplitude, direction = False, b_pt = rg.Point3d.Origin, edge_easing=None):
self.sdf = sdf
self.a = amplitude if direction else -amplitude
self.b_pt = b_pt
self.ee = edge_easing
def apply(self, v):
tmp_pt = v.vec_version - self.b_pt
if not (isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(v.vec_version.X)
else:
amp = self.a
scale_val = self.sdf.GetDistance(tmp_pt.X, tmp_pt.Y, tmp_pt.Z) * amp
return rg.Point3d(v.o + v.n * scale_val)
class AxolotlSpatial(Pattern):
def __init__(self, sdf, amplitude, direction=False, b_pt=rg.Point3d.Origin, edge_easing=None):
self.sdf = sdf
self.a = amplitude if direction else -amplitude
self.b_pt = b_pt
self.ee = edge_easing
def apply(self, v):
tmp_pt = v.o - self.b_pt
if not (isinstance(self.ee, type(None))):
amp = self.a * self.ee.scale_val(v.vec_version.X)
else:
amp = self.a
scale_val = self.sdf.GetDistance(tmp_pt.X, tmp_pt.Y, tmp_pt.Z) * amp
return rg.Point3d(v.o + v.n * scale_val)
```
#### File: ghPython/clean_code/settings_logger.py
```python
class Settings():
def __init__(self):
self._print_settings
self._pattern_settings
self._geometry_settings
@property
def print_settings(self):
return self._print_settings
@print_settings.setter
def print_settings(self, other):
self._print_settings = other
@property
def pattern_settings(self):
return self._pattern_settings
@pattern_settings.setter
def pattern_settings(self, other):
self._pattern_settings = other
@property
def geometry_settings(self):
return self._geometry_settings
@geometry_settings.setter
def geometry_settings(self, other):
self._geometry_settings = other
```
#### File: grasshopper/cups/bottom_line.py
```python
import Rhino.Geometry as rg
# # the grasshopper input variables:
# base_crvs # the curves which make out the bottom of the cup
# body_crvs # the curves that make out the rest of the cup
# layer_height # float
# bottom_spacing # the amount the bottom curves get offseted
# changing_height # bool
# changing_height_range # tuple
# changing_height_periods # float
# # detetcting the outer points of the curves
# getting the center point
def create_geometry(base_crvs, body_crvs, layer_height, bottom_height, changing_height, changing_height_range, changing_height, periods)
pt_0_0, pt_0_1 = base_crvs[0].PointAtEnd, base_crvs[0].PointAtStart
pt_1_0, pt_1_1 = base_crvs[1].PointAtEnd, base_crvs[1].PointAtStart
if pt_0_0.DistanceTo(pt_1_0) < .1 or pt_0_0.DistanceTo(pt_1_1) < .1:
c_pt = pt_0_0
else:
c_pt = pt_0_1
# offseting curves
b_plane = rg.Plane.WorldXY
c_style = rg.CurveOffsetCornerStyle.Round
for b_crv in base_crvs:
a = b_crv.Offset(b_plane, .5 * bottom_spacing, .01, c_style)
b = b_crv.Offset(b_plane, - .5 * bottom_spacing, .01, c_style)
a_line = rg.Line(
a.PointAtEnd,
b.PointAtEnd
).ToNurbseCurve()
b_line = rg.Line(
a.PointAtStart,
b.PointAtStart
).ToNurbseCurve()
``` |
{
"source": "JonasWechsler/DeepLearningLab5",
"score": 2
} |
#### File: JonasWechsler/DeepLearningLab5/allconv.py
```python
from __future__ import print_function
import tensorflow as tf
from keras.datasets import cifar10
import image
from image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Activation, Conv2D, GlobalAveragePooling2D, merge, BatchNormalization
from keras.utils import np_utils
from keras.optimizers import SGD
from keras import backend as K
from keras.models import Model
from keras.layers.core import Lambda
from keras.callbacks import ModelCheckpoint
from lsuv_init import LSUVinit
from skimage import data, img_as_float
from skimage import exposure
from PIL import Image
import os
import pandas
import numpy as np
K.set_image_dim_ordering('tf')
def load_data():
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
n_classes = len(set(y_train.flatten()))
Y_train = np_utils.to_categorical(y_train, n_classes) # Convert to one-hot vector
Y_test = np_utils.to_categorical(y_test, n_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255 #Normalize
X_test /= 255
return (X_train, Y_train, X_test, Y_test)
def preprocess_dataset(X,Y, settings):
if settings.augment_data:
datagen = ImageDataGenerator(
contrast_stretching=True, adaptive_equalization=False, histogram_equalization=False,
#featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False)
else:
datagen = ImageDataGenerator()
datagen.fit(X)
batches = datagen.flow(X, Y, batch_size=settings.batch_size)
return batches, datagen
def make_model(settings, X_train=None):
model = Sequential()
if settings.input_dropout:
model.add(Dropout(0.2, input_shape=(32, 32, 3)))
model.add(Conv2D(96, (3, 3), padding = 'same'))
else:
model.add(Conv2D(96, (3, 3), padding = 'same', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding='same', strides = (2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(192, (3, 3), padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3),padding='same', strides = (2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(192, (3, 3), padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1),padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(10, (1, 1), padding='valid'))
model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sgd = SGD(lr=settings.learning_rates[0], decay=settings.decay, momentum=0.9)
if settings.weights_path != None and settings.load_weights and os.path.isfile(settings.weights_path):
print("loading weights from checkpoint")
model.load_weights(settings.weights_path)
if settings.orthonormal_init:
model = LSUVinit(model, X_train[:settings.batch_size,:,:,:])
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
def run(settings, batches, test_batches, X_train):
model = make_model(settings, X_train)
checkpoint = ModelCheckpoint(settings.weights_path, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='max')
history = {'val_loss': [], 'val_acc': [], 'loss': [], 'acc': []}
total_epochs = 0
iter=0
for rate, epochs in zip(settings.learning_rates, settings.epoch_lengths):
K.set_value(model.optimizer.lr, rate)
history_callback = model.fit_generator(batches,
#steps_per_epoch=X_train.shape[0]/batch_size,
epochs=epochs,
validation_data=test_batches,
validation_steps=1,
#validation_steps=X_test.shape[0],
callbacks=[checkpoint],
verbose=2)
next_hist = history_callback.history
history = {key:history[key] + next_hist[key] for key in history}
pandas.DataFrame(history).to_csv("history-{}.csv".format(iter))
iter=iter+1
total_epochs += epochs
for key in history:
assert(len(history[key]) == total_epochs)
return history
class Settings:
def __init__(self,
batch_size=64,
epoch_lengths=[100],
learning_rates=[0.01],
momentum=0.9,
weights_path="weights.hdf5",
decay=3e-5,
input_dropout=False,
orthonormal_init=True,
augment_data=True):
self.batch_size = batch_size
self.epoch_lengths = epoch_lengths
self.learning_rates = learning_rates
self.load_weights=False
self.weights_path=weights_path
self.decay = decay
self.input_dropout = input_dropout
self.momentum = momentum
self.orthonormal_init = orthonormal_init
self.augment_data = augment_data
def param_search(settings_list, batches, test_batches, X_train):
with open('out.txt', 'a') as f:
for settings in settings_list:
history = run(settings, batches, test_batches, X_train)
pandas.DataFrame(history).to_csv("history-{}-{}.csv".format(settings.decay,settings.learning_rate[0]))
loss = np.min(history['loss'])
acc = np.max(history['acc'])
val_loss = np.min(history['val_loss'])
val_acc = np.max(history['val_acc'])
line = ','.join(map(str,[lr, decay, loss, acc, val_loss, val_acc]))
print(line)
f.write(line)
f.write('\n')
f.flush()
def learner(settings, batches, test_batches, X_train):
history = run(settings, batches, test_batches, X_train)
pandas.DataFrame(history).to_csv("history-{}-{}-{}.csv".format(settings.epoch_lengths[0], settings.learning_rates[0], settings.decay))
#model.save('final_model.h5')
def run_our_model():
settings = Settings(batch_size = 32,
epoch_lengths = [100],
learning_rates = [0.015],
decay = 3e-5,
input_dropout = False,
orthonormal_init = False)
X_train, Y_train, X_test, Y_test = load_data()
batches, datagen = preprocess_dataset(X_train, Y_train, settings)
test_batches = (X_test, Y_test)
learner(settings, batches, test_batches, X_train)
def run_their_model():
settings = Settings(batch_size = 64,
epoch_lengths = [200, 50, 50, 50],
learning_rates = [0.05, 0.005, 0.0005, 0.00005],
decay = 0.001,
input_dropout = True,
orthonormal_init = False)
X_train, Y_train, X_test, Y_test = load_data()
batches, datagen = preprocess_dataset(X_train, Y_train, settings)
test_batches = (X_test, Y_test)
learner(settings, batches, test_batches, X_train)
if __name__ == "__main__":
run_our_model()
#param_search([3e-5, 3e-5, 3e-5, 3e-5, 3e-5, 3e-5], [0.005, 0.01, 0.015, 0.02, 0.025, 0.03], 20)
``` |
{
"source": "JonasWechsler/Kerberos",
"score": 3
} |
#### File: Kerberos/lib/lib.py
```python
import random
import os.path
from fractions import gcd
from math import ceil, sqrt
from itertools import count
from random import getrandbits
from random import randint
import itertools
import sys
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('{}, {} modular inverse does not exist'.format(a, m))
else:
return x % m
def crt(X, P):
z = 0
pi = reduce(lambda a, b: a*b, P)
for x_i, p_i in zip(X, P):
p = pi / p_i
z += x_i*modinv(p, p_i)*p
return z%pi
#############################################################################
# #
# Primality Testing and Generation #
# #
#############################################################################
def sieve(n):
A=[True]*(n+1)
A[0] = False
A[1] = False
for i in xrange(2,int(sqrt(n)+1)):
if A[i]:
for j in map(lambda x:i*i+i*x,xrange(n)):
if j > n:
break
A[j] = False
P=[]
C=[]
for i in xrange(len(A)):
if A[i]:
P.append(i)
else:
C.append(i)
return [P,C]
sieve_cache = sieve(1000)
def sieve_cache_test(n):
for i in sieve_cache[0]:
if n%i == 0 and n != i:
return False
return True
def fermat_test(n, tests):
if n == 2:
return True
if n == 0 or n == 1 or n % 2 == 0:
return False
for d in xrange(tests):
a = randint(1, n-1)
div = gcd(a,n)
if div > 1:
return False
if pow(a,n-1,n) != 1:
return False
return True
def miller_rabin_test(n,k):
if n == 1:
return False
if n == 2:
return True
if n%2 == 0:
return False
m = n - 1
t = 0
#Binary search would have better worst case, but I think this will
#ultimately be faster bc we check for divisibility via sieve
while True:
try:
q, r = divmod(m, 2)
if r == 1:
break
t+=1
m = q
except:
print "{} {} {} {} {}".format(q,r,t,m,n)
#x = a^d mod n
#n-1 = 2^r * d
def _possible_prime(a):
x = pow(a,m,n)
if x == 1 or x == n - 1:
return True
for i in xrange(t):
x = pow(x,2,n)
if x == 1:
return False
if x == n-1:
return True
return False
for i in xrange(k):
a = randint(2, n-1)
if not _possible_prime(a):
return False
return True
def isPrime(n):
a = 100
return sieve_cache_test(n) and fermat_test(n,a) and miller_rabin_test(n,a)
def makePrime(bits=128):
while True:
r = getrandbits(bits)
if isPrime(r):
return r
def primeFactor(n):
primes = [2,3]
primefacs = []
exp = []
for i in range(5,n):
if isPrime(primes, i):
primes.append(i)
for p in primes:
e=0
while (n%p==0):
n=n//p
e+=1
if e != 0:
primefacs.append(p)
exp.append(e)
return (primefacs, exp)
#############################################################################
# #
# Discrete Log Solvers #
# #
#############################################################################
# Baby step giant step algorithm
def dl3(g, h, p):
m = int(ceil(sqrt(p)))
lis = {}
for j in xrange(m):
idx = pow(g,j,p)
if not idx in lis:
lis[idx] = j
#Really should probably be a hashmap
minv = modinv(g, p)
inv = pow(minv, m, p)
value = h
for i in xrange(0, m):
if value in lis:
return (i * m + lis[value]) % p
value = value * inv % p
return value
def dl2(g, h, p, e, q):
ppow = pow(p, e-1, q)
lgpow = pow(g, ppow, q)
hpow = pow(h, ppow, q)
X = dl3(lgpow, hpow, q)
for i in range(1, e):
gpow = pow(modinv(g, q), X, q)
ppow = pow(p, e-i-1, q)
hpow = pow(h*gpow, ppow, q)
X = X + dl3(lgpow, hpow, q)*pow(p, i, q)
return X
def discreteLog(g, h, q):
N = q - 1
F = primeFactor(N)
C = []
P = []
for i in range(0, len(F[0])):
p = F[0][i]
e = F[1][i]
exp = N/pow(p,e,q)
g0 = pow(g, exp, q)
h0 = pow(h, exp, q)
C.append(dl2(g0, h0, p, e, q))
P.append(pow(p, e))
return crt(C, P)
#############################################################################
# #
# RSA Cracking and Factorization #
# #
#############################################################################
#pollard p-1 algorithm
def factor(n):
a = 2
for j in itertools.count(1):
if j > n:
return -1
a = pow(a, j, n)
d = gcd(a-1, n)
if 1 < d and d < n:
return d
#x^e = c mod n
def rsa_crack(e,c,n):
p = factor(n)
q = n//p
d = modinv(e, (p-1)*(q-1))
m = pow(c,d,n)
return m
#############################################################################
# #
# Modular Polynomial Arithmetic in Fp[x]/(m) #
# #
#############################################################################
def div(p1, m, p):
result = [0]*len(p1)
rest = list(p1)
for i in xrange(len(p1)-1, -1, -1):
high = len(m)-1
if i-high < 0:
break
r = rest[i] / m[high]
result[i-high] = r%p
#l = [0]*len(p1)
for j in xrange(len(m)):
#l[j+i-high]=r*m[j]
rest[j+i-high]-=(r*m[j])
rest[j+i-high]%=p
return rest
#removes trailing zeros
def trim(p):
while not p[-1]:
p.pop()
if len(p) == 0:
return p
return p
def reducer(p1, m, p):
result = p1
trim(result)
trim(m)
if len(result) == 0 or len(m) == 0:
return result
while len(result) > len(m)-1:
result = div(result, m, p)
trim(result)
return result
def mul(p1, p2, m, p):
result = [0]*len(p1)*len(p2)
for i in xrange(len(p1)):
for j in xrange(len(p2)):
result[i+j]+=(p1[i]*p2[j])
result[i+j]%=p
return reducer(result, m, p)
def add(p1, p2, m, p):
result = [0]*len(p1)
for i in xrange(len(p1)):
result[i] += (p1[i] + p2[i])
result[i] %= p
print result
return reducer(result, m, p)
def sub(p1, p2, m, p):
result = []
for i in xrange(len(p1)):
result += (p1[i] - p2[i])%p
return reducer(result, m, p)
#############################################################################
# #
# Block Chain Encryption, Decryption #
# #
#############################################################################
# e is a encryption function
def encrypt_blockchain(M, e, iv = 5):
M = map(int, M)
C = [iv]
for idx in xrange(len(M)):
C.append(e(M[idx] ^ C[idx]))
return C
# d is a decryption function
def decrypt_blockchain(C, d, iv = 5):
C = map(int, C)
M = []
for idx in xrange(1,len(C)):
M.append(d(C[idx]) ^ C[idx-1])
return M
#############################################################################
# #
# Symmetric Key Encryption #
# #
#############################################################################
import base64
from Crypto.Cipher import AES
from Crypto import Random
import hashlib
import urllib
import os
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[:-ord(s[len(s)-1:])]
def one_way_hash(value):
#TODO Stop using md5. Need to find a cryptographically secure one-way hah
#that outputs 32-byte quantities, or use different symmetric-key encryption
#to take in variable-length keys
md = hashlib.md5()
md.update(str(value))
result = base64.b64encode(md.digest())
return result
def encrypt(txt, key):
txt = str(txt)
key = str(key)
key = one_way_hash(key)
key = str(key)
txt = pad(txt)
key = pad(key)
iv = os.urandom(16)[0:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(txt))
def decrypt(enc, key):
key = str(key)
key = one_way_hash(key)
key = str(key)
key = pad(key)
enc = base64.b64decode(enc)
iv = enc[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc[16:]))
def encrypt_tuple(txt, key):
encr = encrypt(str(txt), key)
return encr
from ast import literal_eval
def decrypt_tuple(txt, key):
decr = decrypt(txt, key)
return literal_eval(decr)
``` |
{
"source": "JonasWechsler/NeuralNetsLab3",
"score": 3
} |
#### File: JonasWechsler/NeuralNetsLab3/energy.py
```python
import hopfield
import attractors
def energy_at_attractors():
W = attractors.get_weights()
_, A = attractors.get_attractors()
energy = [hopfield.energy(W, x) for x in A]
for a, b in zip(energy, A):
print(a, b)
if __name__ == "__main__":
energy_at_attractors()
```
#### File: JonasWechsler/NeuralNetsLab3/pict.py
```python
import hopfield
import plot
import numpy as np
def pict_data():
with open("data/pict.dat") as f:
data_list = list(map(int, f.read().split(",")))
return np.array(data_list).reshape((11, 1024))
def reformat_data(data):
return data.reshape((32, 32))
def sequential():
data_array = pict_data()
X0 = data_array[:3]
W = hopfield.weights(X0)
X = [data_array[9], data_array[10]]
# for x in X0:
# plot.plot_heatmap(reformat_data(x))
# plot.plot_heatmap(reformat_data(hopfield.recall_until_stable(W, x)))
for x in X:
plot.plot_heatmap(reformat_data(x))
plot.plot_heatmap(reformat_data(hopfield.recall_until_stable(W, x)))
res_array = hopfield.recall_sequentially(W, x)
for r in res_array:
plot.plot_heatmap(reformat_data(np.array(r)))
def calculate_error(data, expected):
return 100*sum(1 for i, j in zip(data, expected) if i != j)/len(data)
def add_noise(data, choices):
new_data = list(data)
for c in choices:
new_data[c] *= -1
return new_data
def noise():
data_array = pict_data()
X0 = data_array[:3]
W = hopfield.weights(X0)
for x in X0:
error = []
for i in range(0, 101, 10):
choices = np.random.choice(len(x), size=int(i*len(x)/100), replace=False)
x_noise = add_noise(x, choices)
error.append(calculate_error(x, hopfield.recall_until_stable(W, x_noise)))
print(error)
plot.plot_points(error)
def test_capacity(start, end):
data_array = pict_data()
errors = []
for n in range(start, end, 1):
print(n)
X0 = data_array[:n]
W = hopfield.weights(X0)
error = []
for x in X0:
choices = np.random.choice(len(x), size=int(20*len(x)/100), replace=False)
x_noise = add_noise(x, choices)
error.append(calculate_error(x, hopfield.recall_until_stable(W, x_noise)))
errors.append(sum(error)/len(error))
print(errors)
plot.plot(range(start, end), errors)
def test_capacity_random(start, end):
data_array = pict_data()
errors = []
for n in range(start, end, 1):
X0 = list(data_array[:3])
for _ in range(n-3):
X0.append(generate_random())
print(len(X0))
W = hopfield.weights(X0)
error = []
for x in X0:
choices = np.random.choice(len(x), size=int(20*len(x)/100), replace=False)
x_noise = add_noise(x, choices)
error.append(calculate_error(x, hopfield.recall_until_stable(W, x_noise)))
errors.append(sum(error)/len(error))
print(errors)
plot.plot(range(start, end), errors)
def test_random(N=100, should_add_noise=False, should_remove_self_conn=False):
data_array = [generate_random(100) for _ in range(300)]
errors = []
for n in range(1, N):
print(n)
X0 = data_array[:n]
if should_remove_self_conn:
W = hopfield.weights(X0, True)
else:
W = hopfield.weights(X0)
error = []
for x in X0:
x_noise = x
if should_add_noise:
choices = np.random.choice(len(x), size=int(20*len(x)/100), replace=False)
x_noise = add_noise(x, choices)
error.append(1 if (x == hopfield.recall_until_stable(W, x_noise)).all() else 0)
errors.append(sum(error))
print(errors)
plot.plot(range(1, N), errors)
def generate_random(N=1024):
return np.random.choice([-1, 1], size=N)
def generate_random_biased(N=1024, ):
return np.random.choice([-1, 1], size=N, p=[1./3, 2./3])
if __name__ == "__main__":
#sequential()
#noise()
#test_capacity(4, 8)
#test_capacity_random(4, 20)
test_random(25)
test_random(25, should_remove_self_conn=True)
#test_random(True)
``` |
{
"source": "JonasWEIG/Desicion-Support-System-for-HE",
"score": 3
} |
#### File: src/models/apriori.py
```python
import pandas as pd
#import numpy as np
from mlxtend.frequent_patterns import apriori, association_rules
import os
def add_association_rules(df_layers, df_StudStart, min_support = 0.02, typ = 'bachelor'):
df_apri = pd.merge(df_layers, df_StudStart[['MNR_Zweit', 'ECTS_final']], how= 'left', on = 'MNR_Zweit')
#df_apri = pd.merge(df_apri, df_clusters[['MNR_Zweit', 'y_predicted']], how = 'left', on = 'MNR_Zweit')
df_apri = df_apri[(df_apri.ECTS_final != 0) & (df_apri.modultitel != 'Gesamtkonto') & (df_apri.Modulebene_2 != 55481) &
(df_apri.Bereich_1 != 1700) & ((df_apri.Bereich_1 != 1500) | (df_apri.studiengang == 'Sozialökonomik')) & (df_apri.Bereich_1 != 1997)]
#d = df_apri[df_apri.studiengang == 'Sozialökonomik']
studiengangs = pd.unique(df_apri.studiengang).tolist()
#studiengang = 'Wirtschaftswissenschaften'
df_final = pd.DataFrame()
for studiengang in studiengangs:
# select modules with mnote > 3.4
if typ == 'bachelor':
df = df_apri.loc[(df_apri.note_m2 > 3.2) & (df_apri.studiengang == studiengang) &
(df_apri.Startsemester > 20131),]
else:
df = df_apri.loc[(df_apri.note_m2 < 2.1) & (df_apri.studiengang == studiengang) &
(df_apri.Startsemester > 20131),]
#min_support = 0.02
# one hot encoding modultitel
df = pd.concat([df['MNR_Zweit'], pd.get_dummies(df['modultitel'])], axis = 1)
# group by mnr_zweit & do apriori and association
df = df.groupby(by = ['MNR_Zweit']).max()
df_apr = apriori(df, min_support = min_support, use_colnames = True, verbose = 1, max_len = 2)
df_ar = association_rules(df_apr, metric = 'confidence', min_threshold = 0.65)
# add lists
df_ar['associate'] = df_ar.loc[:,'antecedents'].apply(list)
df_ar['consequ'] = df_ar['consequents'].apply(list)
df_ar['modultitel'] = df_ar['associate'].apply(lambda x: ','.join(map(str, x)))
df_ar['problem'] = df_ar['consequ'].apply(lambda x: ','.join(map(str, x)))
df = df_layers.loc[(df_layers.note_m2 > 3.2) & (df_layers.studiengang == studiengang) &
(df_layers.bestanden == 'PV'), ['MNR_Zweit', 'modultitel', 'note_m2']]
test = df.merge(df_ar[['modultitel', 'problem']], on = 'modultitel', how = 'left')
test = test.drop_duplicates()
df2 = df_layers.loc[(df_layers.studiengang == studiengang) &
(df_layers.bestanden == 'PV'), ['MNR_Zweit', 'modultitel', 'note_m2']]
test2 = test.merge(df2, on = ['MNR_Zweit', 'modultitel'], how = 'right')
test2 = test2.drop_duplicates()
mnrs = pd.unique(test2.MNR_Zweit)
for mnr in mnrs:
titles = pd.unique(test2.loc[test2.MNR_Zweit == mnr, 'problem'])
liste = pd.unique(test2.loc[test2.MNR_Zweit == mnr, 'modultitel'])
for title in titles:
if title not in liste:
test2.loc[(test2.MNR_Zweit == mnr) & (test2.problem == title), 'real'] = title
df = test2[test2.real.notna()]
df = df[['MNR_Zweit', 'modultitel', 'problem']]
df = df.merge(df_ar[['problem', 'modultitel', 'lift', 'confidence']], on = ['problem', 'modultitel'], how = 'left')
df_final = pd.concat([df, df_final], ignore_index = True, sort = True)
if typ == 'bachelor':
df_ar.to_csv(os.path.abspath('../../data/processed/apriori_data/df_apriori_ba_' + studiengang + '.csv'), sep=';', decimal=',', encoding = 'utf-8')
else:
df_ar.to_csv(os.path.abspath('../../data/processed/apriori_data/df_apriori_ma_' + studiengang + '.csv'), sep=';', decimal=',', encoding = 'utf-8')
return df_final
if __name__ == '__main__':
df_layers = pd.read_pickle(os.path.abspath('../../data/interim/bachelor/df_layers.pkl'))
df_StudStart = pd.read_pickle(os.path.abspath('../../data/interim/bachelor/df_studstart.pkl'))
df_final = add_association_rules(df_layers, df_StudStart)
df_final.to_pickle(os.path.abspath('../../data/interim/bachelor/df_apriori.pkl'))
df_final.to_csv(os.path.abspath('../../data/processed/bachelor/df_apriori.csv'), sep=';', decimal=',', encoding = 'utf-8')
```
#### File: src/models/compare_models.py
```python
import pandas as pd
import numpy as np
import os
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import sys
sys.path.append('../features')
from build_features import prepare_final_files
def naive_bayes(x_train, y_train, x_test, y_test):
from sklearn.naive_bayes import GaussianNB
modelnb = GaussianNB()
modelnb.fit(x_train, y_train)
score = modelnb.score(x_test, y_test)
y_pred = modelnb.predict(x_test)
f1 = f1_score(y_test, y_pred, average = 'binary')
print("NB: acurracy: " + str(score) + "; f1 score: " + str(f1))
with open(os.path.abspath('../../models/modelnb_pickle'), 'wb') as file:
pickle.dump(modelnb, file)
def random_forest(x_train, y_train, x_test, y_test):
from sklearn.ensemble import RandomForestClassifier
modelrf = RandomForestClassifier(n_estimators = 1000)
modelrf.fit(x_train, y_train)
score = modelrf.score(x_test, y_test)
y_pred = modelrf.predict(x_test)
f1 = f1_score(y_test, y_pred, average = 'binary')
print("RF: acurracy: " + str(score) + "; f1 score: " + str(f1))
with open(os.path.abspath('../../models/modelrf_pickle'), 'wb') as file:
pickle.dump(modelrf, file)
def logistic_regression(x_train, y_train, x_test, y_test):
from sklearn.linear_model import LogisticRegression
logReg = LogisticRegression()
logReg.fit(x_train, y_train)
score = logReg.score(x_test, y_test)
y_pred = logReg.predict(x_test)
f1 = f1_score(y_test, y_pred, average = 'binary')
print("LR: acurracy: " + str(score) + "; f1 score: " + str(f1))
with open(os.path.abspath('../../models/modellr_pickle'), 'wb') as file:
pickle.dump(logReg, file)
def support_vector_machine(x_train, y_train, x_test, y_test):
from sklearn.svm import SVC
modelsvm = SVC(kernel = 'rbf')
modelsvm.fit(x_train, y_train)
score = modelsvm.score(x_test, y_test)
y_pred = modelsvm.predict(x_test)
f1 = f1_score(y_test, y_pred, average = 'binary')
print("SVM: acurracy: " + str(score) + "; f1 score: " + str(f1))
with open(os.path.abspath('../../models/modelsvm_pickle'), 'wb') as file:
pickle.dump(modelsvm, file)
def neural_net(x_train, y_train, x_test, y_test):
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
modelann = Sequential([
Dense(units = 32, input_shape=(len(x_test.columns),), activation = 'sigmoid'),
Dense(units = 64, activation = 'sigmoid'),
Dense(units = 128, activation = 'sigmoid'),
Dense(units = 256, activation = 'sigmoid'),
Dense(units = 128, activation = 'sigmoid'),
Dense(units = 64, activation = 'sigmoid'),
Dense(units = 32, activation = 'sigmoid'),
Dense(units = 1, activation = 'sigmoid')])
modelann.compile(optimizer = Adam(learning_rate=0.001), loss= 'mean_squared_error', metrics=['accuracy'])
modelann.fit(x_train, y_train, batch_size=64, epochs=100, validation_split=0.2, verbose = 0, callbacks=None, validation_data=None, shuffle=True)
y_pred = modelann.predict(x_test)
y_pred = np.where(y_pred >= 0.5, 1, 0)
#y_test, y_pred = set(1, 2, 4), set(2, 8, 1)
y = np.asarray(y_test) - y_pred[:,0]
score = np.count_nonzero(y == 0)/(y_pred[:,0]).size
#from sklearn.metrics import f1_score, accuracy_score, confusion_matrix
#cm = confusion_matrix(y_test, y_pred)
f1 = f1_score(y_test, y_pred, average = 'binary')
print("ANN: acurracy: " + str(score) + "; f1 score: " + str(f1))
with open(os.path.abspath('C:../../models/modelann_pickle'), 'wb') as file:
pickle.dump(modelann, file)
if __name__ == '__main__':
#for master change path from bachelor to master
df_StudStart = pd.read_pickle(os.path.abspath('../../data/interim/bachelor/df_studstart_without_prediction.pkl'))
df_Path = pd.read_pickle(os.path.abspath('../../data/interim/bachelor/df_path.pkl'))
df_demo = pd.read_pickle(os.path.abspath('../../data/interim/bachelor/df_demo.pkl'))
for semester in range(1,2):
#for master add 'master in function
Final = prepare_final_files(semester, df_StudStart, df_Path, df_demo, 'bachelor')[1]
x_train, x_test, y_train, y_test = train_test_split(Final.drop(['MNR_Zweit', 'Startsemester', 'studiengang', 'final'], axis = 1),
Final.final, test_size = 0.25, random_state = 0)
logistic_regression(x_train, y_train, x_test, y_test)
random_forest(x_train, y_train, x_test, y_test)
naive_bayes(x_train, y_train, x_test, y_test)
support_vector_machine(x_train, y_train, x_test, y_test)
neural_net(x_train, y_train, x_test, y_test)
import numpy as np
np.version
``` |
{
"source": "jonas-weimar/pinkie",
"score": 3
} |
#### File: pinkie/knn/nearestneighbour.py
```python
from scipy.spatial import distance
from termcolor import colored
import numpy as np
# => Nearest Neighbour Classifier
class NNClassifier(object):
# Initializer / Constructor:
def __init__(self):
self.id = hash(id(self))
# Hook methods for build in functions:
def __str__(self):
return "Classifier: " + str(self.id)
# Private methods
def __information(self, message, start=""):
print(colored('{}Information:'.format(start), 'cyan'), message)
def __success(self, message, start=""):
print(colored('{}Success:'.format(start), 'white'), message)
def __closest(self, row):
best_dist = distance.euclidean(row, self.x_train[0])
best_index = 0
for i in range(len(self.x_train)):
dist = distance.euclidean(row, self.x_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
# Public methods
def setID(self, id):
self.id = id
def getID(self):
return self.id
def fit(self, X_train, Y_train):
self.x_train = X_train
self.y_train = Y_train
def predict(self, X_predict):
predictions = []
for row in X_predict:
predictions.append(self.__closest(row))
self.__success("Prediction was made")
return predictions
```
#### File: pinkie/mlp/activation.py
```python
import numpy as np
from random import uniform
def randlin(x, derivative=False):
if derivative:
return uniform(0.,1.)
return uniform(0.,1.) * x
def sigmoid(x, derivative=False):
if derivative:
return x * (1.0 - x)
return 1.0/(1+ np.exp(-x))
def tanh(x, derivative=False):
if derivative:
return (1-np.power(x,2))
return np.tanh(x)
def linear(x, derivative=False, alpha=1):
if derivative:
return alpha
return alpha * x
def relu(x, derivative=False, alpha=1):
if derivative:
x[x<=0] = 0
x[x>0] = alpha
return x
return np.maximum(0, alpha*x)
def leakyrelu(x, derivative=False, alpha=1):
if derivative:
x[x==0] = 0
x[x<0] = 0.2
x[x>0] = alpha
return x
x[x==0] = 0
x[x<0] = 0.2 * x
x[x>0] = alpha * x
return x
``` |
{
"source": "Jonas-Wennerstrom/Tagger",
"score": 3
} |
#### File: Tagger/Tagger/taggersql.py
```python
from sqlalchemy import insert, select, asc, func, exists
from taggermodels import *
##Selection
#Selects a subset of File
def select_file(session, taglist):
"""Returns a list of all File entries with entries in Match matching
all tags in taglist.
Parameters:
session: An SQLAlchemy database session.
taglist (string list): A list of tag names.
Returns:
q (SQLAlchemy object): Information on entries in table File with
entries in table Match corresponding to entries in table Tag
corresponding to all 'tags' in taglist.
"""
q = session.query(File).join(
File.contains).filter(
Tag.name.in_(taglist)).group_by(
File.title).having(
func.count()==len(taglist)).all()
return q
def get_tags_from_names(session,taglist):
"""Returns all tag objects with Tag.name in taglist.
"""
return session.query(Tag).filter(Tag.name.in_(taglist)).all()
def get_file(session,title):
"""Returns file object with File.title == title.
"""
return session.query(File).filter(
File.title == title).scalar()
def get_all_file_titles(session):
"""Returns all file.title fields in session.
"""
return session.query(File.title).all()
def get_all_tag_names(session):
"""Returns all tag.name fields in session.
"""
return session.query(Tag.name).all()
##Insertion
def insert_file(session, fileinfo, taglist):
"""Inserts fileinfo into table File in session and entries in table
Match for each tag in taglist coupled with the new File entry.
Parameters:
session: An SQLAlchemy database session.
fileinfo (list): Length, Title, Link to be entered into session.
taglist (string list): A list of tag names.
Returns:
True if insertion was successful, else False.
Side-effects:
Entry in table File created with fileinfo. Entries in table
Match created coupling new File entry with each 'tag' in
taglist.
"""
unique = not file_exists(session,fileinfo[1])
if unique:
q = get_tags_from_names(session,taglist)
new_file = File(length=fileinfo[0],
title=fileinfo[1],
link=fileinfo[2])
for t in q:
new_file.contains.append(t)
session.add(new_file)
session.commit()
return True
else:
return False
def insert_tag (session,taglist):
"""Inserts each 'tag' in taglist into table Tag in session.
Parameters:
session: An SQLAlchemy database session.
taglist (string list): A list of tag names.
Returns:
q (list): A sorted list of all tags in taglist which already
exist in table Tag.
Side-effects:
New entries created in table Tag in session for each
non-duplicate tag in taglist.
"""
insert_list = []
skipped_list = []
for new_tag in taglist:
if not tag_exists(session,new_tag):
insert_list.append(new_tag)
else:
skipped_list.append(new_tag)
session.execute(Tag.__table__.insert(),
[{"name": t} for t in insert_list])
session.commit()
return sorted(skipped_list)
##Deletion
def delete_tag(session,taglist):
"""Deletes all 'tags' in taglist from session.
Parameters:
session: An SQLAlchemy database session.
taglist (string list): A list of tag names.
Side-effects:
All entries in table Tag in session with name in taglist
deleted.
All entries in table Match in session matching tags in
taglist deleted.
"""
for t in taglist:
session.query(Tag.name==t).delete()
session.commit()
def delete_file(session,title):
"""Deletes File.title == title from session.
Parameters:
session: An SQLAlchemy database session.
title (string): A title to be deleted.
Side-effects:
Any entry in table File in session with title==title deleted
All entries in table Match in session matching File.title
deleted.
"""
session.query(File).filter(File.title == title).delete()
session.commit()
def cleanup_files(session):
"""Deletes all entries in table File in session without any entry
in table Match.
Parameters:
session: An SQLAlchemy database session.
Side-effects:
All entries in table File whose id do not exist in Match.file_id
deleted.
"""
s = session.query(File.id).filter(~File.contains.any()).all()
if s:
session.execute(File.__table__.delete(),
[{"id": t[0]} for t in s])
session.commit()
##Confirm functions
#These functions check if data exists in db
def file_exists(session,title):
"""Returns true if a file with title == title exists, else false."""
return session.query(exists().where(File.title == title)).scalar()
def tag_exists(session,name):
"""Returns true if a tag with name == name exists, else false"""
return session.query(exists().where(Tag.name == name)).scalar()
``` |
{
"source": "jonas-werner/piedpiper-syd",
"score": 2
} |
#### File: jonas-werner/piedpiper-syd/script.py
```python
@app.route('/floorplan/')
def floorplan():
MIDDLE = """
<h1><u>Floorplan</u></h1>
<img src="/static/floorplan.jpg">
<br>
"""
return TOP + MIDDLE + BOTTOM
if __name__ == "__main__":
app.run(debug=False,host='0.0.0.0', port=int(os.getenv('PORT', '5000')))
``` |
{
"source": "JonasXPX/DeepRoof-MTEK",
"score": 3
} |
#### File: JonasXPX/DeepRoof-MTEK/image_test.py
```python
import cv2 as cv
def getContours(img):
contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAI)
image = cv.imread('./images/temp.jpg')
# im = cv.equalize(image)
resized = cv.resize(image, (900, 600))
shifted = cv.pyrMeanShiftFiltering(resized, 12, 71)
gray = cv.cvtColor(shifted, cv.COLOR_BGR2GRAY)
imgBlur = cv.GaussianBlur(gray, (7,7), 1)
thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)[1]
cv.imshow('test', thresh)
cv.imshow('test2', resized)
cv.imshow('test3', imgBlur)
cv.waitKey(0)
``` |
{
"source": "jonasyip/machineLearningLibrary",
"score": 3
} |
#### File: jonasyip/machineLearningLibrary/k-MeanClustering.py
```python
import random
import math
def calculateAverageVector(clusterList = []):
averageVectorList = []
for i in range(0,len(clusterList[0])): #From first element to last element
valuePoint = 0
for j in range(0,len(clusterList)):
valuePoint = valuePoint + int(clusterList[j][i])
#print(valuePoint)
averagePoint = valuePoint / len(clusterList)
averageVectorList.append(averagePoint) #Stores the ordinate in a list [i,j,k,l]
#print(averageVectorList)
return averageVectorList #Return vector coodinates
def produceRandomReferencePoint(numberOfDataPoints): #Returns two random data points
referencePointOne = random.randint(0,numberOfDataPoints)
referencePointTwo = random.randint(0,numberOfDataPoints)
while referencePointOne == referencePointTwo: #Prevents two data point references have the same value
referencePointTwo = random.randint(0,numberOfDataPoints)
return [referencePointOne,referencePointTwo]
def distanceBetweenTwoPoints(coodinate_1 = [], coodinate_2 = []): #This function returns a value length between two points
holdValue = 0
for i in range(0,len(coodinate_1)):
holdValue = holdValue + ((coodinate_2[i] - coodinate_1[i])**2)
print(holdValue)
return math.sqrt(holdValue)
def compareTwoDistances(distance_1, distance_2,fromReferencePoint): #
if (distance_1 > distance_2 and fromReferencePoint):
a=1
testData =[[1,2,3,4],[2,3,4,5],[3,4,5,6],[4,5,6,7],[5,6,7,8]]
print(calculateAverageVector(testData))
print(produceRandomReferencePoint(len(testData)))
print(distanceBetweenTwoPoints([1,2,3,4],[4,3,2,1]))
```
#### File: jonasyip/machineLearningLibrary/kmean.py
```python
import random
import math
import time
def getRandomValue(MaxValue): #Returns two random data points
value = random.randint(0,MaxValue)
return value
def hasClusterChanged(prev, curr):
if len(prev) != len(curr):
return False
for i in range(len(prev)):
for j in range(len(prev)):
if prev[i][j] != curr[i][j]:
return False
return True
def getEuclideanDistance(point_1,point_2):
holdValue = 0
for i in range(len(point_1)):
holdValue +=((point_1[i] - point_2[i])**2)
return math.sqrt(holdValue)
def getAverageVector(points):
averageVector = []
for coloumn in range(len(points[0])):
averageVectorDem = []
for rows in range(len(points)):
averageVectorDem.append(points[rows][coloumn])
averageVector.append(sum(averageVectorDem)/len(points))
return averageVector
'''
def getCluster(DataPoints,Reference_Priority,Reference_Compare):
Cluster = []
Cluster.append(Reference_Priority)
for i in range(len(DataPoints)):
if (Reference_Priority != Reference_Compare) and (Reference_Priority != DataPoints[i]) and (Reference_Compare!=DataPoints[i]):
if getNuclideanDistance(Reference_Priority,DataPoints[i]) < getNuclideanDistance(Reference_Compare,DataPoints[i]):
Cluster.append(DataPoints[i])
'''
def isInCluster(clusterNumber,referencePoints,dataPoint):
#Get distance of Reference point 1 to datapoint
#get list of distances of all the other points from the data point
#if index is num, and reference point num is the smallest distance, return true
#print("referencePoints", referencePoints)
listOfDistances = [getEuclideanDistance(aPoint,dataPoint) for aPoint in referencePoints]
return listOfDistances.index(min(listOfDistances)) == clusterNumber
#========================================================================================
def test_isInCluster():
#Returns a cluster
#Data = [[1,2,3,4],[2,3,4,5],[3,4,5,6],[4,5,6,7],[5,6,7,8]]
Data = [3,4,5,6]
Ref_Points = [[1,2,3,4],[2,3,4,5]]
Index = 2
expected_result = True
actual_result = isInCluster(Index, Ref_Points, Data)
assert expected_result == actual_result
expected_result = False
Ref_Points = [[2,3,4,5],[1,2,3,4]]
actual_result = isInCluster(Index, Ref_Points, Data)
assert expected_result == actual_result
Ref_Points = [[1,2],[12,13]]
Data = [2,3]
Index = 1
expected_result = True
actual_result = isInCluster(Index, Ref_Points, Data)
assert expected_result == actual_result
def test_compareTwoDistances():
#Returns true if first distance is greater than second distance
Distance_1 = 10
Distance_2 = 3
expected_result = True
actual_result = isDistanceGreater(Distance_1,Distance_2)
assert expected_result == actual_result
def test_getAverageVector():
#Calculates the mean coordiantes, it should return the mean coordinates 2D
Cluster = [[1,2],[2,3],[3,4],[4,5],[5,6]]
expected_result = [3,4]
actual_result = getAverageVector(Cluster)
print(actual_result)
assert expected_result == actual_result
#Calculates the mean coordiantes, it should return the mean coordinates 3D
Cluster = [[1,2,3],[2,3,4],[3,4,5],[4,5,6],[5,6,7]]
expected_result = [3,4,5]
actual_result = getAverageVector(Cluster)
print(actual_result)
assert expected_result == actual_result
def test_getNuclideanDistance():
#using pythagoras, it should return a correct distance in 4D
p1 = [8,8,8,8]
p2 = [6,6,6,6]
expected_result = 4
actual_result = getEuclideanDistance(p1,p2)
assert expected_result == actual_result
#using pythagoras, it should return a correct distance in 2D
p1 = [15,21]
p2 = [11,18]
expected_result = 5
actual_result = getEuclideanDistance(p1,p2)
assert expected_result == actual_result
def test_hasClusterChanged():
#if the two cluster length is different, it should return false
prev_cluster = [[1,2],[2,1]]
curr_cluster = [[3,1]]
expected_result = False
# expect hasClusterChanged(prev, curr) to False
actual_result = hasClusterChanged(prev_cluster, curr_cluster)
assert expected_result == actual_result
#if the clusters are identical, it should return true
prev_cluster = [[1,2],[2,1]]
curr_cluster = [[1,2],[2,1]]
expected_result = True
actual_result = hasClusterChanged(prev_cluster, curr_cluster)
assert expected_result == actual_result
#if the clusters are identical in length,
# if the cluster points are different, it should return false
#this is a worst case scenio
curr_cluster = [[1,2],[2,2]]
expected_result = False
actual_result = hasClusterChanged(prev_cluster, curr_cluster)
assert expected_result == actual_result
#========================================================================================
def kMean(data, k):
current_Cluster = []
previous_Cluster = []
referencePoints = []
clusters = []
numberOfClusters = k
means = []
for k in range(numberOfClusters):
hasTheClusterChanged = True
if k >= len(clusters):
clusters.append([])
means.append([])
while hasTheClusterChanged:
scrambledDataPoints = random.sample(data,len(data))
referencePoints = scrambledDataPoints[0:numberOfClusters]
clusterK = []
for j in range(len(data)):
if isInCluster(k, referencePoints, data[j]):
clusterK.append(data[j])
hasTheClusterChanged = hasClusterChanged(clusterK,clusters[k])
clusters[k] = clusterK
means[k] = getAverageVector(clusterK)
return means
def predict(point , means):
categories = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica', 'Iris-bogus', 'five', 'six', 'seven']
distances = [getEuclideanDistance(aPoint,point) for aPoint in means]
index = distances.index(min(distances))
return categories[index]
if __name__ == '__main__':
data = [
[5.1,3.5,1.4,0.2],
[4.9,3,1.4,0.2],
[4.7,3.2,1.3,0.2],
[4.6,3.1,1.5,0.2],
[5,3.6,1.4,0.2],
[5.4,3.9,1.7,0.4],
[4.6,3.4,1.4,0.3],
[5,3.4,1.5,0.2],
[4.4,2.9,1.4,0.2],
[4.9,3.1,1.5,0.1],
[5.4,3.7,1.5,0.2],
[4.8,3.4,1.6,0.2],
[4.8,3,1.4,0.1],
[4.3,3,1.1,0.1],
[5.8,4,1.2,0.2],
[5.7,4.4,1.5,0.4],
[5.4,3.9,1.3,0.4],
[5.1,3.5,1.4,0.3],
[5.7,3.8,1.7,0.3],
[5.1,3.8,1.5,0.3],
[5.4,3.4,1.7,0.2],
[5.1,3.7,1.5,0.4],
[4.6,3.6,1,0.2],
[5.1,3.3,1.7,0.5],
[4.8,3.4,1.9,0.2],
[5,3,1.6,0.2],
[5,3.4,1.6,0.4],
[5.2,3.5,1.5,0.2],
[5.2,3.4,1.4,0.2],
[4.7,3.2,1.6,0.2],
[4.8,3.1,1.6,0.2],
[5.4,3.4,1.5,0.4],
[5.2,4.1,1.5,0.1],
[5.5,4.2,1.4,0.2],
[4.9,3.1,1.5,0.1],
[5,3.2,1.2,0.2],
[5.5,3.5,1.3,0.2],
[4.9,3.1,1.5,0.1],
[4.4,3,1.3,0.2],
[5.1,3.4,1.5,0.2],
[5,3.5,1.3,0.3],
[4.5,2.3,1.3,0.3],
[4.4,3.2,1.3,0.2],
[5,3.5,1.6,0.6],
[5.1,3.8,1.9,0.4],
[4.8,3,1.4,0.3],
[5.1,3.8,1.6,0.2],
[4.6,3.2,1.4,0.2],
[5.3,3.7,1.5,0.2],
[5,3.3,1.4,0.2],
[7,3.2,4.7,1.4],
[6.4,3.2,4.5,1.5],
[6.9,3.1,4.9,1.5],
[5.5,2.3,4,1.3],
[6.5,2.8,4.6,1.5],
[5.7,2.8,4.5,1.3],
[6.3,3.3,4.7,1.6],
[4.9,2.4,3.3,1],
[6.6,2.9,4.6,1.3],
[5.2,2.7,3.9,1.4],
[5,2,3.5,1],
[5.9,3,4.2,1.5],
[6,2.2,4,1],
[6.1,2.9,4.7,1.4],
[5.6,2.9,3.6,1.3],
[6.7,3.1,4.4,1.4],
[5.6,3,4.5,1.5],
[5.8,2.7,4.1,1],
[6.2,2.2,4.5,1.5],
[5.6,2.5,3.9,1.1],
[5.9,3.2,4.8,1.8],
[6.1,2.8,4,1.3],
[6.3,2.5,4.9,1.5],
[6.1,2.8,4.7,1.2],
[6.4,2.9,4.3,1.3],
[6.6,3,4.4,1.4],
[6.8,2.8,4.8,1.4],
[6.7,3,5,1.7],
[6,2.9,4.5,1.5],
[5.7,2.6,3.5,1],
[5.5,2.4,3.8,1.1],
[5.5,2.4,3.7,1],
[5.8,2.7,3.9,1.2],
[6,2.7,5.1,1.6],
[5.4,3,4.5,1.5],
[6,3.4,4.5,1.6],
[6.7,3.1,4.7,1.5],
[6.3,2.3,4.4,1.3],
[5.6,3,4.1,1.3],
[5.5,2.5,4,1.3],
[5.5,2.6,4.4,1.2],
[6.1,3,4.6,1.4],
[5.8,2.6,4,1.2],
[5,2.3,3.3,1],
[5.6,2.7,4.2,1.3],
[5.7,3,4.2,1.2],
[5.7,2.9,4.2,1.3],
[6.2,2.9,4.3,1.3],
[5.1,2.5,3,1.1],
[5.7,2.8,4.1,1.3],
[6.3,3.3,6,2.5],
[5.8,2.7,5.1,1.9],
[7.1,3,5.9,2.1],
[6.3,2.9,5.6,1.8],
[6.5,3,5.8,2.2],
[7.6,3,6.6,2.1],
[4.9,2.5,4.5,1.7],
[7.3,2.9,6.3,1.8],
[6.7,2.5,5.8,1.8],
[7.2,3.6,6.1,2.5],
[6.5,3.2,5.1,2],
[6.4,2.7,5.3,1.9],
[6.8,3,5.5,2.1],
[5.7,2.5,5,2],
[5.8,2.8,5.1,2.4],
[6.4,3.2,5.3,2.3],
[6.5,3,5.5,1.8],
[7.7,3.8,6.7,2.2],
[7.7,2.6,6.9,2.3],
[6,2.2,5,1.5],
[6.9,3.2,5.7,2.3],
[5.6,2.8,4.9,2],
[7.7,2.8,6.7,2],
[6.3,2.7,4.9,1.8],
[6.7,3.3,5.7,2.1],
[7.2,3.2,6,1.8],
[6.2,2.8,4.8,1.8],
[6.1,3,4.9,1.8],
[6.4,2.8,5.6,2.1],
[7.2,3,5.8,1.6],
[7.4,2.8,6.1,1.9],
[7.9,3.8,6.4,2],
[6.4,2.8,5.6,2.2],
[6.3,2.8,5.1,1.5],
[6.1,2.6,5.6,1.4],
[7.7,3,6.1,2.3],
[6.3,3.4,5.6,2.4],
[6.4,3.1,5.5,1.8],
[6,3,4.8,1.8],
[6.9,3.1,5.4,2.1],
[6.7,3.1,5.6,2.4],
[6.9,3.1,5.1,2.3],
[5.8,2.7,5.1,1.9],
[6.8,3.2,5.9,2.3],
[6.7,3.3,5.7,2.5],
[6.7,3,5.2,2.3],
[6.3,2.5,5,1.9],
[6.5,3,5.2,2],
[6.2,3.4,5.4,2.3],
[5.9,3,5.1,1.8]]
timeStart = time.time()
means = kMean(data, 7)
print("time taken",time.time()-timeStart)
print (predict([5.1,3.4,1.5,0.2], means))
print (predict([5.9, 3.0, 5.1, 1.8], means))
print (predict([6.7,3,5.2,2.3], means))
print (predict([3.2, 7.2, 5, 10.1], means))
``` |
{
"source": "jonaszchuang/lava-platformer",
"score": 3
} |
#### File: lava-platformer/game/heart.py
```python
import os
import pygame
class Heart(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
image_location = os.path.join("assets", "heart.png")
self.image = pygame.image.load(image_location).convert_alpha()
self.rect = self.image.get_rect()
self.image = pygame.transform.scale(self.image, (100, 100))
self.rect.x = x
self.rect.y = y
``` |
{
"source": "jonaszchuang/tic-tac-toe",
"score": 4
} |
#### File: jonaszchuang/tic-tac-toe/tic-tac-toe.py
```python
grid = [
["_", "_", "_"],
["_", "_", "_"],
["_", "_", "_"]
]
print("")
p1 = str(input("Player one, what is your name? "))
print("")
p2 = str(input("Player two, what is your name? "))
xTurn = True
turns = 0
def board(grid):
for row in grid:
for slot in row:
print(f"{slot} ", end = "")
print()
def quit(spot):
if spot == "q":
return True
else:
return False
def check(spot):
if not isnumb(spot):
return False
spot = int(spot)
if not goodnumb(spot):
return False
return True
def isnumb(spot):
if not spot.isnumeric():
print("This is not a valid character, ", end = "")
return False
else:
return True
def goodnumb(spot):
if spot > 9 or spot < 1:
print("This is not a valid number, ", end = "")
return False
else:
return True
def taken(coords, grid):
row = coords[0]
column = coords[1]
if grid[row][column] != "_":
print("This spot is already taken, ", end = "")
return True
return False
def coordinates(spot):
row = int(spot / 3)
column = spot
if column > 2:
column = int(column % 3)
return [row, column]
def add(coords, grid, player):
row = coords[0]
column = coords[1]
grid[row][column] = player
def user(xTurn):
if xTurn:
return "X"
else:
return "O"
def win(player, grid):
if rowWin(player, grid):
return True
if columnWin(player, grid):
return True
if diagonalWin(player, grid):
return True
def rowWin(player, grid):
for row in grid:
completeRow = True
for slot in row:
if slot != player:
completeRow = False
break
if completeRow:
return True
return False
def columnWin(player, grid):
for column in range(3):
completeColumn = True
for row in range(3):
if grid[row][column] != player:
completeColumn = False
break
if completeColumn:
return True
return False
def diagonalWin(player, grid):
if (grid[0][0] == player) and (grid[1][1] == player) and (grid[2][2] == player):
return True
elif (grid[0][2] == player) and (grid[1][1] == player) and (grid[2][0] == player):
return True
else:
return False
while turns < 9:
player = user(xTurn)
board(grid)
spot = input("Chose a position (1-9) or \"q\" to quit: ")
if quit(spot):
break
if not check(spot):
print("try again.")
print("")
continue
spot = int(spot) - 1
coords = coordinates(spot)
if taken(coords, grid):
print("try again.")
continue
add(coords, grid, player)
if win(player, grid):
print("")
if player == "X":
print(f"{p1} won! Thanks for playing!")
else:
print(f"{p2} won! Thanks for playing!")
break
turns += 1
if turns == 9:
print("This is a tie. :| Play again for a rematch!")
xTurn = not xTurn
``` |
{
"source": "JonasZehn/ntopo",
"score": 2
} |
#### File: ntopo/ntopo/constraints.py
```python
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from ntopo.utils import get_grid_centers, compute_domain_volume
def zero_densities_function(positions):
return tf.zeros(tf.stack((tf.shape(positions)[0], 1)), dtype=positions.dtype)
def one_densities_function(positions):
return tf.ones(tf.stack((tf.shape(positions)[0], 1)), dtype=positions.dtype)
class DensityConstraintBase:
def apply(self, positions, densities):
distances, function_values = self.compute(positions)
return tf.where(distances <= 0.0, function_values, densities)
def estimate_volume(self, domain):
assert len(domain) == 4 # only implemented in 2d for now
positions = get_grid_centers(domain, [512, 512], dtype=np.float32)
zeros = tf.zeros((positions.shape[0], 1))
ones = tf.ones((positions.shape[0], 1))
distances, function_values = self.compute(positions)
bounding_volume = compute_domain_volume(domain)
free_volume = bounding_volume * \
tf.math.reduce_mean(tf.where(distances >= 0.0, ones, zeros))
constrained_densities = tf.where(distances < 0.0, function_values, zeros)
constraint_volume = bounding_volume * tf.math.reduce_mean(constrained_densities)
return free_volume.numpy(), constraint_volume.numpy()
def plot(self, domain, n_samples, folder):
positions = get_grid_centers(domain, n_samples)
distances, function_values = self.compute(positions)
distances = np.reshape(distances, (n_samples[1], n_samples[0]))
plt.figure()
plt.imshow(np.flipud(distances), extent=domain)
plt.colorbar()
plt.savefig(os.path.join(folder, 'density_constraint_phi.png'))
plt.close()
function_values = np.reshape(function_values, (n_samples[1], n_samples[0]))
plt.figure()
plt.imshow(np.flipud(function_values), extent=domain)
plt.colorbar()
plt.savefig(os.path.join(folder, 'density_constraint_fs.png'))
plt.close()
def compute(self, positions):
raise Exception("compute not overriden")
class DensityConstraint(DensityConstraintBase):
def __init__(self, sdf, fun):
super().__init__()
self.sdf = sdf
self.fun = fun
def compute(self, positions):
distances = self.sdf.eval_distance(positions)
function_values = self.fun(positions)
return distances, function_values
def has_constraint(self):
return True
def plot_boundary(self, *args, **kwargs):
self.sdf.plot_boundary(*args, **kwargs)
class DensityConstraintNone(DensityConstraintBase):
def apply(self, positions, densities):
return densities
def plot(self, domain, n_samples, folder):
pass
def has_constraint(self):
return False
def plot_boundary(self, *args, **kwargs):
pass
class DensityConstraintAdd(DensityConstraintBase):
def __init__(self, constraint_list):
super().__init__()
self.constraint_list = constraint_list
def compute(self, positions):
distances, function_values = self.constraint_list[0].compute(positions)
for i in range(1, len(self.constraint_list)):
phin, fxsn = self.constraint_list[i].compute(positions)
cond = phin < 0.0
distances = tf.where(phin < distances, phin, distances)
function_values = tf.where(cond, fxsn, function_values)
return distances, function_values
def has_constraint(self):
for constraint_object in self.constraint_list:
if constraint_object.has_constraint():
return True
return False
def plot_boundary(self, *args, **kwargs):
for constraint_object in self.constraint_list:
constraint_object.plot_boundary(*args, **kwargs)
class DisplacementLine:
def __init__(self, point_a, point_b):
super().__init__()
self.point_a = np.reshape(np.array(point_a, dtype=np.float32), (1, 2))
self.point_b = np.reshape(np.array(point_b, dtype=np.float32), (1, 2))
self.ba_sq = np.sum((self.point_b - self.point_a) * (self.point_b - self.point_a))
def eval_distance_square(self, positions):
a_to_p = positions - self.point_a
ba = self.point_b - self.point_a
line_time = tf.clip_by_value(tf.math.reduce_sum(
a_to_p*ba, axis=1, keepdims=True)/self.ba_sq, 0.0, 1.0)
d = a_to_p - line_time * ba
dist_sq = tf.math.reduce_sum(d*d, axis=1, keepdims=True)
return dist_sq
class DisplacementDisk:
def __init__(self, center, radius):
super().__init__()
self.center = tf.convert_to_tensor(np.reshape(
np.array(center, dtype=np.float32), (1, 2)), dtype=tf.float32)
self.radius = radius
def eval_distance_square(self, positions):
center_to_p = positions - self.center
dist_sq_from_center = tf.math.reduce_sum(center_to_p*center_to_p, axis=1, keepdims=True)
d = tf.math.sqrt(dist_sq_from_center)
dist_sq = tf.where(d - self.radius <= 1e-5,
tf.zeros_like(d), tf.math.square(d - self.radius))
return dist_sq
class DisplacementPoint:
def __init__(self, position):
super().__init__()
self.position = tf.convert_to_tensor(np.reshape(
np.array(position, dtype=np.float32), (1, 2)), dtype=tf.float32)
def eval_distance_square(self, positions):
p_to_positions = positions - self.position
dist_sq_from_center = tf.math.reduce_sum(p_to_positions*p_to_positions, axis=1, keepdims=True)
return dist_sq_from_center
class DisplacementHalfspace:
def __init__(self, normal, offset):
super().__init__()
self.normal = tf.convert_to_tensor(np.reshape(
np.array(normal, dtype=np.float32), (2, 1)), dtype=tf.float32)
self.offset = offset
def eval_distance_square(self, positions):
signed_distances = tf.linalg.matmul(positions, self.normal) + self.offset
return signed_distances*signed_distances
def power_smooth_min_already_sq(d_a, d_b):
eps = 1e-4
return (d_a*d_b)/(d_a+d_b + eps)
def tree_reduce(d_sq):
while len(d_sq) > 1:
nd_sq = []
for i in range(0, len(d_sq), 2):
if i + 1 < len(d_sq):
nd_sq.append(power_smooth_min_already_sq(d_sq[i], d_sq[i+1]))
else:
nd_sq.append(d_sq[i])
d_sq = nd_sq
return d_sq[0]
def entity_list_compute_s(positions, domain, entity_list):
assert len(positions.shape) == 2
d_sq = []
for entity_i in entity_list:
d_sqi = entity_i.eval_distance_square(positions)
d_sq.append(d_sqi)
d_sq = tree_reduce(d_sq)
result = 2.0 / max(domain[1] - domain[0], domain[3] -
domain[2]) * tf.math.sqrt(d_sq + 1e-35)
return result
def get_gradient_norm_function(functor):
@tf.function
def gradient_norm_function(inputs):
with tf.GradientTape(persistent=True) as tape:
tape.watch(inputs)
T = functor(inputs)
dTdxy = tape.gradient(T, inputs)
return tf.math.sqrt(tf.math.reduce_sum(tf.square(dTdxy), axis=1, keepdims=True))
return gradient_norm_function
def get_second_order_norm_function(functor):
@tf.function
def norm_function(inputs):
with tf.GradientTape(persistent=True) as tape1:
tape1.watch(inputs)
with tf.GradientTape(persistent=True) as tape:
tape.watch(inputs)
T = functor(inputs)
dTdxy = tape.gradient(T, inputs, unconnected_gradients='zero')
dTdx = tf.gather(dTdxy, [0], axis=1)
dTdy = tf.gather(dTdxy, [1], axis=1)
dTdxdxy = tape1.gradient(dTdx, inputs, unconnected_gradients='zero')
dTdydxy = tape1.gradient(dTdy, inputs, unconnected_gradients='zero')
return tf.math.sqrt(tf.math.reduce_sum(tf.square(dTdxdxy) + tf.square(dTdydxy), axis=1, keepdims=True))
return norm_function
class DisplacementConstraint:
def __init__(self, domain, entity_list):
self.domain = domain
self.entity_list = entity_list
def compute_length_factor(self, positions):
return entity_list_compute_s(positions, self.domain, self.entity_list)
def plot(self, domain, n_samples, folder):
positions = get_grid_centers(domain, n_samples, dtype=np.float32)
length_factors = self.compute_length_factor(positions)
length_factors = np.reshape(length_factors, (n_samples[1], n_samples[0]))
plt.figure()
plt.imshow(np.flipud(length_factors), extent=domain)
plt.colorbar(orientation="horizontal")
plt.savefig(os.path.join(folder, 'DisplacementConstraint_s.png'))
plt.close()
s_function = self.compute_length_factor
gradient_norm_function = get_gradient_norm_function(s_function)
samples_tf = tf.convert_to_tensor(positions)
gradient_norms = gradient_norm_function(samples_tf)
image = np.reshape(gradient_norms, (n_samples[1], n_samples[0]))
plt.figure()
plt.imshow(np.flipud(image), extent=self.domain)
plt.colorbar(orientation="horizontal")
plt.savefig(os.path.join(folder, 'DisplacementConstraint_s-gradient-norm.png'))
plt.close()
so_function = get_second_order_norm_function(s_function)
samples_tf = tf.convert_to_tensor(positions)
so_norms = so_function(samples_tf)
image = np.reshape(so_norms, (n_samples[1], n_samples[0]))
plt.figure()
plt.imshow(np.flipud(image), extent=self.domain)
plt.colorbar(orientation="horizontal")
plt.savefig(os.path.join(
folder, 'DisplacementConstraint_s-so-norm.png'))
plt.close()
```
#### File: ntopo/ntopo/filter.py
```python
import tensorflow as tf
def pad_border(tensor, filter_size):
rep = filter_size // 2
tensor = tf.concat([tf.tile(tensor[:, :, 0:1, :], (1, 1, rep, 1)), tensor, tf.tile(
tensor[:, :, -1:, :], (1, 1, rep, 1))], axis=2)
tensor = tf.concat([tf.tile(tensor[:, 0:1, :, :], (1, rep, 1, 1)), tensor, tf.tile(
tensor[:, -1:, :, :], (1, rep, 1, 1))], axis=1)
return tensor
def pad_border_3d(tensor, filter_size):
rep = filter_size // 2
tensor = tf.concat([tf.tile(tensor[:, :, :, 0:1, :], (1, 1, 1, rep, 1)), tensor, tf.tile(
tensor[:, :, :, -1:, :], (1, 1, 1, rep, 1))], axis=3)
tensor = tf.concat([tf.tile(tensor[:, :, 0:1, :, :], (1, 1, rep, 1, 1)), tensor, tf.tile(
tensor[:, :, -1:, :, :], (1, 1, rep, 1, 1))], axis=2)
tensor = tf.concat([tf.tile(tensor[:, 0:1, :, :, :], (1, rep, 1, 1, 1)), tensor, tf.tile(
tensor[:, -1:, :, :, :], (1, rep, 1, 1, 1))], axis=1)
return tensor
def pad_positions_constant(sample_positions, filter_size):
rep = filter_size // 2
#sample_positions = tf.concat([ tf.tile(sample_positions[:,:, 0:1,:], (1, 1, rep, 1) ), sample_positions, tf.tile(sample_positions[:,:, -1:,:], (1, 1, rep, 1) ) ], axis = 2)
c1 = tf.fill(tf.stack([tf.shape(sample_positions)[0], tf.shape(
sample_positions)[1], rep, tf.shape(sample_positions)[3]]), -1000.0)
sample_positions = tf.concat([c1, sample_positions, c1], axis=2)
#sample_positions = tf.concat([ tf.tile(sample_positions[:, 0:1,:,:], (1, rep, 1, 1) ), sample_positions, tf.tile(sample_positions[:, -1:, :, :], (1, rep, 1, 1) ) ], axis = 1)
c2 = tf.fill(tf.stack([tf.shape(sample_positions)[0], rep, tf.shape(
sample_positions)[2], tf.shape(sample_positions)[3]]), -1000.0)
sample_positions = tf.concat([c2, sample_positions, c2], axis=1)
return sample_positions
def pad_positions_constant_3d(sample_positions, filter_size):
rep = filter_size // 2
c1 = tf.fill(tf.stack([tf.shape(sample_positions)[0], tf.shape(sample_positions)[
1], tf.shape(sample_positions)[2], rep, tf.shape(sample_positions)[4]]), -1000.0)
sample_positions = tf.concat([c1, sample_positions, c1], axis=3)
c2 = tf.fill(tf.stack([tf.shape(sample_positions)[0], tf.shape(sample_positions)[
1], rep, tf.shape(sample_positions)[3], tf.shape(sample_positions)[4]]), -1000.0)
sample_positions = tf.concat([c2, sample_positions, c2], axis=2)
c3 = tf.fill(tf.stack([tf.shape(sample_positions)[0], rep, tf.shape(sample_positions)[
2], tf.shape(sample_positions)[3], tf.shape(sample_positions)[4]]), -1000.0)
sample_positions = tf.concat([c3, sample_positions, c3], axis=1)
return sample_positions
@tf.function
def apply_sensitivity_filter_2d(sample_positions, old_densities, sensitivities, n_samples, domain, radius):
dim = 2
gamma = 1e-3
cell_width = (domain[1] - domain[0]) / n_samples[0]
grads = sensitivities
radius_space = radius * cell_width
filter_size = 2*round(radius) + 1
density_patches = tf.reshape(
old_densities, [1, n_samples[1], n_samples[0], 1])
density_patches = pad_border(density_patches, filter_size)
density_patches = tf.image.extract_patches(
density_patches, sizes=[1, filter_size, filter_size, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding='VALID')
sensitivity_patches = tf.reshape(
sensitivities, [1, n_samples[1], n_samples[0], 1])
sensitivity_patches = pad_border(sensitivity_patches, filter_size)
sensitivity_patches = tf.image.extract_patches(
sensitivity_patches, sizes=[1, filter_size, filter_size, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding='VALID')
sample_positions = tf.reshape(
sample_positions, [1, n_samples[1], n_samples[0], dim])
# we pad such that influence is basically 0
sample_patches = pad_positions_constant(
sample_positions, filter_size)
sample_patches = tf.image.extract_patches(
sample_patches, sizes=[1, filter_size, filter_size, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding='VALID')
# sample_patches.shape is now [1, rows, cols, filter_size ** 2 * * dim]
diff = tf.reshape(sample_patches, [1, n_samples[1], n_samples[0], filter_size * filter_size,
dim]) - tf.reshape(sample_positions, [1, n_samples[1], n_samples[0], 1, dim])
# [1, n_samples[1], n_samples[0], filter_size ** 2]
dists = tf.math.sqrt(tf.math.reduce_sum(diff*diff, axis=4))
# [1, n_samples[1], n_samples[0], filter_size ** 2]
Hei = tf.math.maximum(0.0, radius_space - dists)
# [1, n_samples[1], n_samples[0], filter_size ** 2]
Heixic = Hei * density_patches * sensitivity_patches
sum_Heixic = tf.math.reduce_sum(Heixic, axis=3)
sum_Hei = tf.math.reduce_sum(Hei, axis=3)
old_densities_r = tf.reshape(
old_densities, [1, n_samples[1], n_samples[0]])
assert len(sum_Hei.shape) == len(old_densities_r.shape)
div = tf.math.maximum(gamma, old_densities_r) * sum_Hei
grads = sum_Heixic / div
grads = tf.reshape(grads, (-1, 1))
return grads
@tf.function
def apply_sensitivity_filter_3d(sample_positions, old_densities, sensitivities, n_samples, domain, radius):
dim = 3
gamma = 1e-3
cell_width = (domain[1] - domain[0]) / n_samples[0]
radius_space = radius * cell_width
filter_size = 2*round(radius) + 1
sample_positions = tf.reshape(
sample_positions, [1, n_samples[0], n_samples[1], n_samples[2], dim])
density_patches = tf.reshape(
old_densities, [1, n_samples[0], n_samples[1], n_samples[2], 1])
density_patches = pad_border_3d(density_patches, filter_size)
density_patches = tf.extract_volume_patches(
density_patches, ksizes=[1, filter_size, filter_size, filter_size, 1], strides=[1, 1, 1, 1, 1], padding='VALID')
sensitivity_patches = tf.reshape(
sensitivities, [1, n_samples[0], n_samples[1], n_samples[2], 1])
sensitivity_patches = pad_border_3d(sensitivity_patches, filter_size)
sensitivity_patches = tf.extract_volume_patches(
sensitivity_patches, ksizes=[1, filter_size, filter_size, filter_size, 1], strides=[1, 1, 1, 1, 1], padding='VALID')
# we pad such that influence is basically 0
sample_patches = pad_positions_constant_3d(sample_positions, filter_size)
sample_patches = tf.extract_volume_patches(
sample_patches, ksizes=[1, filter_size, filter_size, filter_size, 1], strides=[1, 1, 1, 1, 1], padding='VALID')
# sample_patches.shape is now [1, rows, cols, filter_size ** 3 * * dim]
diff = tf.reshape(sample_patches, [1, n_samples[0], n_samples[1], n_samples[2], filter_size * filter_size *
filter_size, dim]) - tf.reshape(sample_positions, [1, n_samples[0], n_samples[1], n_samples[2], 1, dim])
# [1, n_samples[0], n_samples[1], n_samples[2], filter_size ** 3]
dists = tf.math.sqrt(tf.math.reduce_sum(diff*diff, axis=5))
# [1, n_samples[0], n_samples[1], n_samples[2], filter_size ** 3]
Hei = tf.math.maximum(0.0, radius_space - dists)
# [1, n_samples[0], n_samples[1], n_samples[2], filter_size ** 3]
Heixic = Hei * density_patches * sensitivity_patches
sum_Heixic = tf.math.reduce_sum(Heixic, axis=4)
sum_Hei = tf.math.reduce_sum(Hei, axis=4)
old_densities_r = tf.reshape(
old_densities, [1, n_samples[0], n_samples[1], n_samples[2]])
assert len(sum_Hei.shape) == len(old_densities_r.shape)
div = tf.math.maximum(gamma, old_densities_r) * sum_Hei
grads = sum_Heixic / div
grads = tf.reshape(grads, (-1, 1))
return grads
def apply_sensitivity_filter(sample_positions, old_densities, sensitivities, n_samples, domain, dim, radius):
if dim == 2:
return apply_sensitivity_filter_2d(sample_positions, old_densities, sensitivities, n_samples, domain, radius)
if dim == 3:
return apply_sensitivity_filter_3d(sample_positions, old_densities, sensitivities, n_samples, domain, radius)
raise Exception('unsupported dim')
```
#### File: ntopo/ntopo/train.py
```python
import os
import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from ntopo.monitors import SimulationMonitor
from ntopo.physics import compute_elasticity_energies, compute_opt_energy, compute_volume_penalty
from ntopo.filter import apply_sensitivity_filter
from ntopo.utils import write_to_file, get_sample_generator, get_single_random_q_sample_generator, get_q_sample_generator, stratified_sampling
from ntopo.oc import compute_oc_multi_batch
from ntopo.render import save_densities_to_file
def get_train_disp_step(opt, problem, disp_model, density_model, disp_variables):
@tf.function
def _train_disp_step(samples):
with tf.GradientTape() as tape:
tape.watch(disp_variables)
internal_energy, force_loss = compute_elasticity_energies(
problem, disp_model, density_model, samples, training=True)
reg_loss = tf.keras.backend.sum(disp_model.losses)
loss = internal_energy + force_loss + reg_loss
dLdwx = tape.gradient(loss, disp_variables)
opt.apply_gradients(zip(dLdwx, disp_variables))
return loss, internal_energy, force_loss, reg_loss
return _train_disp_step
def run_simulation(problem, disp_model, train_disp_step, n_sim_iterations, sim_sample_generator, saving=False, save_path='.', save_postfix=''):
simulation_monitor = SimulationMonitor(n_sim_iterations)
progress_bar = tqdm(simulation_monitor, total=n_sim_iterations)
for disp_iter in progress_bar:
start_time = time.time()
input_samples = next(sim_sample_generator)
loss, internal_energy, force_loss, reg_loss = train_disp_step(input_samples)
simulation_monitor.monitor(loss)
end_time = time.time()
loss = loss.numpy().item()
internal_energy = internal_energy.numpy().item()
duration = end_time - start_time
reg_loss = reg_loss.numpy().item()
progress_bar.set_description(f'loss {loss:.3e} int. energy {internal_energy:.3e}, dur.: {duration:.3e}, reg loss {reg_loss:.3e}')
progress_bar.refresh()
if saving:
simulation_monitor.save_plot(save_path, '', save_postfix)
def get_train_density_step(opt, problem, disp_model, density_model, density_variables, vol_penalty_strength, target_volume_ratio):
sample_volume = problem.domain_volume
target_volume = problem.free_volume * target_volume_ratio
@tf.function
def _train_densities_step(sample_positions):
with tf.GradientTape() as tape:
tape.watch(density_variables)
energy, densities = compute_opt_energy(
problem, disp_model, density_model, sample_positions)
penalty = compute_volume_penalty(densities, sample_volume=sample_volume,
vol_penalty_strength=vol_penalty_strength, target_volume=target_volume)
reg_loss = tf.keras.backend.sum(density_model.losses)
loss = energy + penalty + reg_loss
dLdwx = tape.gradient(loss, density_variables)
opt.apply_gradients(zip(dLdwx, density_variables))
return loss, penalty, reg_loss
return _train_densities_step
@tf.function
def compute_sensitivities(problem, disp_model, density_model, sample_positions, use_oc, vol_penalty_strength, target_volume_ratio=None):
sample_volume = problem.domain_volume
target_volume = problem.free_volume * target_volume_ratio
with tf.GradientTape() as tape:
energy, densities = compute_opt_energy(
problem, disp_model, density_model, sample_positions)
if use_oc:
loss = energy
else:
penalty = compute_volume_penalty(densities, sample_volume=sample_volume,
vol_penalty_strength=vol_penalty_strength, target_volume=target_volume)
loss = energy + penalty
old_densities = densities
grads = tape.gradient(loss, densities)
return old_densities, grads
@tf.function
def compute_target_densities_gradient_descent(old_densities, sensitivities):
projected_sensitivities = [tf.math.maximum(0.0, tf.math.minimum(
1.0, old_densities[i] - sensitivities[i])) - old_densities[i] for i in range(len(old_densities))]
step_size = 0.05 / tf.math.reduce_mean([tf.math.reduce_mean(tf.math.abs(si))
for si in projected_sensitivities])
return [old_densities[i] - step_size * sensitivities[i] for i in range(len(old_densities))]
@tf.function
def optimize_densities_mse(opt, density_model, sample_positions, targets, density_variables):
with tf.GradientTape() as tape:
tape.watch(density_variables)
err = density_model(sample_positions, training=True) - targets
reg_loss = tf.keras.backend.sum(density_model.losses)
reconstruction_loss = tf.reduce_mean(err*err, keepdims=True)
loss = reconstruction_loss + reg_loss
dLdwrho = tape.gradient(loss, density_variables)
opt.apply_gradients(zip(dLdwrho, density_variables))
return loss, reconstruction_loss, reg_loss
def save_model_configs(disp_model, density_model, save_path):
write_to_file(disp_model.to_json(), os.path.join(
save_path, 'disp_model_config.json'))
write_to_file(density_model.to_json(), os.path.join(
save_path, 'density_model_config.json'))
def save_model_weights(disp_model, density_model, save_path, save_postfix):
disp_model.save_weights(os.path.join(
save_path, 'disp_model' + save_postfix))
density_model.save_weights(os.path.join(
save_path, 'density_model' + save_postfix))
def train_non_mmse(problem, disp_model, density_model, opt_disp, opt_density,
opt_sample_generator, sim_sample_generator,
vol_penalty_strength,
target_volume_ratio,
save_path,
save_interval,
n_opt_iterations,
n_sim_iterations
):
train_disp_step = get_train_disp_step(
opt_disp, problem, disp_model, density_model=density_model,
disp_variables=disp_model.trainable_variables)
train_density_step = get_train_density_step(
opt_density, problem, disp_model, density_model=density_model,
density_variables=density_model.trainable_variables,
vol_penalty_strength=vol_penalty_strength,
target_volume_ratio=target_volume_ratio)
save_model_configs(disp_model, density_model, save_path)
def save_state(save_postfix):
save_model_weights(disp_model, density_model, save_path, save_postfix)
problem.plot_densities(density_model, save_path, '', save_postfix)
iteration = 0
saving = True
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
save_state(save_postfix)
for iteration in range(1, n_opt_iterations + 1):
print('Optimization iteration ', iteration)
saving = (iteration % save_interval == 0)
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
sample_positions = next(opt_sample_generator)
train_density_step(sample_positions)
if saving:
save_state(save_postfix)
def train_mmse(problem, disp_model, density_model, opt_disp, opt_density,
opt_sample_generator, sim_sample_generator,
n_opt_samples,
vol_penalty_strength,
target_volume_ratio,
save_path,
filter,
filter_radius,
use_oc,
save_interval,
n_opt_iterations,
n_sim_iterations,
n_opt_batches,
oc_config):
density_variables = density_model.trainable_variables
train_disp_step = get_train_disp_step(
opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables)
save_model_configs(disp_model, density_model, save_path)
def save_state(save_postfix, target_densities=None):
save_model_weights(disp_model, density_model, save_path, save_postfix)
problem.plot_densities(density_model, save_path, '', save_postfix)
if target_densities is not None and problem.dim == 2:
save_densities_to_file(np.reshape(target_densities[0], (n_opt_samples[1], n_opt_samples[0])), filename=os.path.join(
save_path, 'density' + save_postfix + '-target0.png'))
iteration = 0
saving = True
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=True, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
save_state(save_postfix)
for iteration in range(1, n_opt_iterations + 1):
print('Optimization iteration ', iteration)
saving = (iteration % save_interval == 0)
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
old_densities = []
sensitivities = []
sample_positions = []
for _ in range(n_opt_batches):
input_samples = next(opt_sample_generator)
old_di, sensitivities_i = compute_sensitivities(
problem, disp_model, density_model, input_samples, use_oc, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio)
if filter == 'sensitivity':
sensitivities_i = apply_sensitivity_filter(
input_samples, old_di, sensitivities_i, n_samples=n_opt_samples, domain=problem.domain, dim=problem.dim, radius=filter_radius)
else:
assert filter in ('none', ), 'not supported filter'
old_densities.append(old_di)
sensitivities.append(sensitivities_i)
sample_positions.append(input_samples)
if use_oc:
target_densities = compute_oc_multi_batch(
old_densities=old_densities, sensitivities=sensitivities, sample_volume=problem.domain_volume, target_volume=problem.free_volume * target_volume_ratio,
max_move=oc_config['max_move'], damping_parameter=oc_config['damping_parameter'])
else:
target_densities = compute_target_densities_gradient_descent(
old_densities=old_densities, sensitivities=sensitivities)
progress_bar = tqdm(range(n_opt_batches))
for i in progress_bar:
loss, reconstruction_loss, reg_loss = optimize_densities_mse(
opt_density, density_model, sample_positions[i], target_densities[i], density_variables)
loss = loss.numpy().item()
reconstruction_loss = reconstruction_loss.numpy().item()
reg_loss = reg_loss.numpy().item()
progress_bar.set_description(f'loss {loss} rec. loss {reconstruction_loss} reg loss {reg_loss}')
progress_bar.refresh()
if saving:
save_state(save_postfix, target_densities)
def train_mmse_space(problem, disp_model, density_model, opt_disp, opt_density,
n_sim_samples, n_opt_samples,
opt_sample_generator,
vol_penalty_strength,
target_volume_ratio,
save_path,
filter,
filter_radius,
use_oc,
save_interval,
n_opt_iterations,
n_sim_iterations,
n_opt_batches,
n_q_samples,
oc_config):
density_variables = density_model.trainable_variables
train_disp_step = get_train_disp_step(
opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables)
save_model_configs(disp_model, density_model, save_path)
def save_state(save_postfix, target_densities=None):
disp_model.save_weights(os.path.join(
save_path, 'disp_model' + save_postfix))
density_model.save_weights(os.path.join(
save_path, 'density_model' + save_postfix))
problem.plot_densities(density_model, save_path, '', save_postfix)
iteration = 0
saving = True
save_postfix = f'-{iteration:06d}'
sim_sample_generator = get_single_random_q_sample_generator(problem.q_domain, problem.domain, n_sim_samples)
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=2*n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
qs = stratified_sampling(problem.q_domain, n_cells=[
n_q_samples], n_points_per_cell=1, dtype=np.float32).flatten()
for q in qs:
save_postfix_q = f'-{iteration:06d}-q={q:.6f}'
print('q', q)
problem.plot_displacement(
disp_model, save_path, '', save_postfix_q, q=np.array([[q]]))
save_state(save_postfix)
for iteration in range(1, n_opt_iterations + 1):
print('Optimization iteration ', iteration)
saving = (iteration % save_interval == 0)
print('saving', saving)
target_samples_all_q = []
target_densities_all_q = []
qs = stratified_sampling(problem.q_domain, n_cells=[
n_q_samples], n_points_per_cell=1, dtype=np.float32).flatten()
for q in qs:
save_postfix_q = f'-{iteration:06d}-q={q:.6f}'
if problem.volume_ratio_q_idx != -1:
assert problem.volume_ratio_q_idx == 0
target_volume_ratio = q
old_densities = []
sensitivities = []
sample_positions_with_q = []
sim_sample_generator = get_q_sample_generator(
q, problem.domain, n_samples=n_sim_samples)
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix_q)
if saving:
problem.plot_displacement(
disp_model, save_path, '', save_postfix_q, q=np.array([[q]]))
for _ in range(n_opt_batches):
input_samples = next(opt_sample_generator)
q_vec = np.ones((np.prod(n_opt_samples), 1), dtype=np.float32) * q
input_samples_with_q = np.concatenate(
(input_samples, q_vec), axis=1)
old_di, sensitivities_i = compute_sensitivities(
problem, disp_model, density_model, input_samples_with_q, use_oc, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio)
if filter == 'sensitivity':
sensitivities_i = apply_sensitivity_filter(
input_samples, old_di, sensitivities_i, n_samples=n_opt_samples, domain=problem.domain, dim=problem.dim, radius=filter_radius)
else:
assert filter in ('none', ), 'not supported filter'
old_densities.append(old_di)
sensitivities.append(sensitivities_i)
sample_positions_with_q.append(input_samples_with_q)
if use_oc:
target_densities = compute_oc_multi_batch(
old_densities, sensitivities, sample_volume=problem.domain_volume, target_volume=problem.free_volume * target_volume_ratio,
max_move=oc_config['max_move'], damping_parameter=oc_config['damping_parameter'])
else:
target_densities = compute_target_densities_gradient_descent(
old_densities=old_densities, sensitivities=sensitivities)
target_samples_all_q.append(sample_positions_with_q)
target_densities_all_q.append(target_densities)
n_batch = len(target_samples_all_q) * len(target_samples_all_q[0])
n_samples_total = n_batch * np.prod(n_opt_samples)
target_samples_all_q = tf.reshape(
target_samples_all_q, [n_samples_total, problem.dim + problem.q_dim])
target_densities_all_q = tf.reshape(
target_densities_all_q, [n_samples_total, 1])
indices = np.arange(n_samples_total)
np.random.shuffle(indices)
n_per_batch = n_samples_total // n_batch
progress_bar = tqdm(range(n_batch))
for i in progress_bar:
batch_samples = tf.gather(target_samples_all_q, tf.constant(
indices[i*n_per_batch:(i+1)*n_per_batch]), axis=0)
batch_densities = tf.gather(target_densities_all_q, tf.constant(
indices[i*n_per_batch:(i+1)*n_per_batch]), axis=0)
loss, reconstruction_loss, reg_loss = optimize_densities_mse(
opt_density, density_model, batch_samples, batch_densities, density_variables)
loss = loss.numpy().item()
reconstruction_loss = reconstruction_loss.numpy().item()
reg_loss = reg_loss.numpy().item()
progress_bar.set_description(f'loss {loss} rec. loss {reconstruction_loss} reg loss {reg_loss}')
progress_bar.refresh()
if saving:
save_postfix = f'-{iteration:06d}'
save_state(save_postfix, target_densities)
``` |
{
"source": "jonaszierer/ARDISS",
"score": 3
} |
#### File: ARDISS/ardiss/ard_model.py
```python
import numpy as np
import tensorflow as tf
import gpflow
from sklearn import preprocessing
import gc
class GPflowARD(object):
# The class regroups the ARD optimization steps
def __init__(self,
X,
Y,
window_size,
optimizer=gpflow.train.RMSPropOptimizer(0.1, momentum=0.01),
maxiter=100,
threads=1,
scale_X=False,
verbose=False):
# Initialize the class and raise warnings depending on options chosen
self.X = np.copy(X) # The haplotype values, this must be normalized ahead for optimal results
if scale_X: # If X was not scaled before, we scale it here
if self.X.dtype not in [np.float16, np.float32, np.float64]:
self.X = self.X.astype(dtype=np.float16, copy=False) # Need to transform it to float to ensure scaling
gc.collect()
self.X = preprocessing.scale(self.X, axis=1, copy=False)
gc.collect()
self.Y = np.copy(Y) # The typed scores
self.window_size = window_size # The window size used during optimization, this affects performance
self.optimizer = optimizer # The chosen optimizer, RMSProp is set as default
self.maxiter = maxiter # The maximum number of iteration of the optimizer at each window
self.verbose = verbose
self.ards = None
self.config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=threads, inter_op_parallelism_threads=threads, allow_soft_placement=True)
def optimize_weights(self):
ws = self.window_size
n_windows = np.int(np.floor(self.X.shape[0] / ws))
r = np.arange(n_windows)
i = 0
ards = []
for k in r:
if self.verbose:
print("Optimization {}/{}".format(i + 1, n_windows))
i += 1
# In order to avoid excessive growth of graph, we save the ARD vectors at every iterations and replace them
with tf.Session(graph=tf.Graph(), config=self.config):
X_batch = np.array(self.X[ws * k:ws * (k + 1)], dtype=float) # Optimize on non-overlapping windows
Y_batch = np.array(self.Y[ws * k:ws * (k + 1)], dtype=float)
k = gpflow.kernels.Linear(X_batch.shape[1], ARD=True)
m_ard = gpflow.models.GPR(X_batch, Y_batch, k)
m_ard.likelihood.variance = 0.1
m_ard.likelihood.trainable = False
self.optimizer.minimize(m_ard, maxiter=self.maxiter)
ards.append(m_ard.kern.variance.read_value())
self.ards = np.asarray(ards)
return self.ards
def compute_avg_ard_weights(self):
# if weights were not optimized yet:
if self.ards is None:
self.optimize_weights()
return np.mean(self.ards,axis=0)
def save_weights_to_file(self, output="ARD_weigts.txt", pop_file=None):
# Writes out the weights to a txt file. If a population_file is provided, saves popnames in columns
if pop_file is not None:
# CAREFUL: this pop_file must not be the same as the one provided to load the data
with open(pop_file, "r") as pop:
written_lines = []
firstline=pop.readline()[:-1].split()
w_l = firstline[0]
pops_reported = len(firstline)>1
if pops_reported:
w_l += " " + firstline[2] + " " + firstline[1]
# Need to account for haplotypes, hence duplicate everytime
written_lines.append(w_l)
written_lines.append(w_l)
for line in pop:
cols = line[:-1].split()
w_l = cols[0]
if pops_reported:
w_l += " " + cols[2] + " " + cols[1]
written_lines.append(w_l)
written_lines.append(w_l)
with open(output, "w") as w:
for idx,ard in enumerate(self.compute_avg_ard_weights()):
w.write(written_lines[idx] + " " + str(ard) + "\n")
else:
with open(output, "w") as w:
for ard in self.compute_avg_ard_weights():
w.write(str(ard) + "\n")
def load_ard_weights(weight_file, pop_file=None):
# Read the weights from the weight file and check against pop_file
with open(weight_file, "r") as f:
weights = []
ids = []
for line in f:
cols = line[:-1].split()
weights.append(float(cols[-1]))
if len(cols) > 1:
ids.append(cols[0])
if pop_file is not None:
with open(pop_file, "r") as f:
ref_ids = []
for line in f:
ref_ids.append(line[:-1].split()[0])
if 2*len(ref_ids) != len(weights):
print("Warning: the number of weights is different than twice the number of reference samples in the population file")
if len(ids) != 0:
for id in ids:
if id not in ref_ids:
print("Warning: sample {}is not referenced in the population file".format(id))
return np.asarray(weights)
def scale_with_weights(all_haps, typed_index, ard_weights):
# Scales the haps so as to directly incorporate the ARD weights and speed up later computations
all_haps = np.sqrt(ard_weights)*all_haps
gc.collect()
all_haps = preprocessing.scale(all_haps, axis=1, copy=False)
gc.collect()
typed_haps = np.take(all_haps, typed_index, axis=0)
return all_haps, typed_haps
``` |
{
"source": "jonaszierer/genetics-colocalisation",
"score": 2
} |
#### File: jonaszierer/genetics-colocalisation/2_generate_manifest.py
```python
import os
import sys
# import pandas as pd
from pprint import pprint
from glob import glob
import json
from collections import OrderedDict
import gzip
def main():
# Parse args
in_overlap_table = glob('/home/ubuntu/results/coloc/overlap_table/*.json.gz')[0]
out_manifest = 'configs/manifest.json.gz'
overlap_prop_threshold = 0.01
max_credset_threshold = None
# # In path patterns (local)
# sumstats = '../genetics-finemapping/example_data/sumstats/{type}_2/{study_id}.parquet'
# ld_path = '/Users/em21/Projects/reference_data/uk10k_2019Feb/3_liftover_to_GRCh38/output/{chrom}.ALSPAC_TWINSUK.maf01.beagle.csq.shapeit.20131101'
# In path patterns (server)
sumstats = '/home/ubuntu/data/sumstats/filtered/significant_window_2mb/{type}/{study_id}.parquet'
ld_path = '/home/ubuntu/data/genotypes/ukb_v3_downsampled10k_plink/ukb_v3_chr{chrom}.downsampled10k'
# Out path patterns
out = "/home/ubuntu/results/coloc/output/left_study={left_study}/left_phenotype={left_phenotype}/left_bio_feature={left_bio_feature}/left_variant={left_variant}/right_study={right_study}/right_phenotype={right_phenotype}/right_bio_feature={right_bio_feature}/right_variant={right_variant}/coloc_res.json.gz"
log = "/home/ubuntu/results/coloc/logs/left_study={left_study}/left_phenotype={left_phenotype}/left_bio_feature={left_bio_feature}/left_variant={left_variant}/right_study={right_study}/right_phenotype={right_phenotype}/right_bio_feature={right_bio_feature}/right_variant={right_variant}/log_file.txt"
tmpdir = "/home/ubuntu/results/coloc/tmp/left_study={left_study}/left_phenotype={left_phenotype}/left_bio_feature={left_bio_feature}/left_variant={left_variant}/right_study={right_study}/right_phenotype={right_phenotype}/right_bio_feature={right_bio_feature}/right_variant={right_variant}/"
plot = "/home/ubuntu/results/coloc/plot/{left_study}_{left_phenotype}_{left_bio_feature}_{left_variant}_{right_study}_{right_phenotype}_{right_bio_feature}_{right_variant}.png"
manifest = []
with gzip.open(in_overlap_table, 'r') as in_h:
for in_record in in_h:
in_record = json.loads(in_record.decode())
out_record = OrderedDict()
# Skip if proportion_overlap < prop_threshold
if overlap_prop_threshold:
max_overlap_prop = max(in_record['left_overlap_prop'],
in_record['right_overlap_prop'])
if max_overlap_prop < overlap_prop_threshold:
continue
# Skip if the biggest credible has > max_credset_threshold variants
if max_credset_threshold:
max_credset_size = max(in_record['left_num_tags'],
in_record['right_num_tags'])
if max_credset_size > max_credset_threshold:
continue
# Add information for left/right
for side in ['left', 'right']:
# Add file information
study_type = 'gwas' if in_record['{}_type'.format(side)] == 'gwas' else 'molecular_trait'
out_record['{}_sumstats'.format(side)] = sumstats.format(
type=study_type,
study_id=in_record['{}_study_id'.format(side)])
out_record['{}_ld'.format(side)] = ld_path.format(
chrom=in_record['{}_lead_chrom'.format(side)])
# Add study identifiers
identifiers = ['study_id', 'type', 'phenotype_id', 'bio_feature', 'lead_chrom',
'lead_pos', 'lead_ref', 'lead_alt']
for i in identifiers:
out_record['{}_{}'.format(side, i)] = in_record.get('{}_{}'.format(side, i), None)
# Add method (always conditional for now)
out_record['method'] = 'conditional'
# Add output files
left_variant = '_'.join(
[str(in_record['left_lead_{}'.format(part)])
for part in ['chrom', 'pos', 'ref', 'alt']]
)
right_variant = '_'.join(
[str(in_record['right_lead_{}'.format(part)])
for part in ['chrom', 'pos', 'ref', 'alt']]
)
out_record['out'] = out.format(
left_study=in_record['left_study_id'],
left_phenotype=in_record.get('left_phenotype_id', None),
left_bio_feature=in_record.get('left_bio_feature', None),
left_variant=left_variant,
right_study=in_record['right_study_id'],
right_phenotype=in_record.get('right_phenotype_id', None),
right_bio_feature=in_record.get('right_bio_feature', None),
right_variant=right_variant
)
out_record['log'] = log.format(
left_study=in_record['left_study_id'],
left_phenotype=in_record.get('left_phenotype_id', None),
left_bio_feature=in_record.get('left_bio_feature', None),
left_variant=left_variant,
right_study=in_record['right_study_id'],
right_phenotype=in_record.get('right_phenotype_id', None),
right_bio_feature=in_record.get('right_bio_feature', None),
right_variant=right_variant
)
out_record['tmpdir'] = tmpdir.format(
left_study=in_record['left_study_id'],
left_phenotype=in_record.get('left_phenotype_id', None),
left_bio_feature=in_record.get('left_bio_feature', None),
left_variant=left_variant,
right_study=in_record['right_study_id'],
right_phenotype=in_record.get('right_phenotype_id', None),
right_bio_feature=in_record.get('right_bio_feature', None),
right_variant=right_variant
)
out_record['plot'] = plot.format(
left_study=in_record['left_study_id'],
left_phenotype=in_record.get('left_phenotype_id', None),
left_bio_feature=in_record.get('left_bio_feature', None),
left_variant=left_variant,
right_study=in_record['right_study_id'],
right_phenotype=in_record.get('right_phenotype_id', None),
right_bio_feature=in_record.get('right_bio_feature', None),
right_variant=right_variant
)
# Make all paths absolute
for colname in ['left_sumstats', 'left_ld', 'right_sumstats', 'right_ld',
'out', 'log', 'tmpdir', 'plot']:
out_record[colname] = os.path.abspath(out_record[colname])
# Check that all input paths exist
for colname in ['left_sumstats', 'left_ld', 'right_sumstats', 'right_ld']:
# Get path
in_path = out_record[colname]
# If plink prefix, add .bed suffix
if colname == 'left_ld' or colname == 'right_ld':
in_path = in_path + '.bed'
# Assert exists
assert os.path.exists(in_path), \
"Input file not found ({}): {}".format(colname, in_path)
manifest.append(out_record)
# Write manifest file
os.makedirs(os.path.dirname(out_manifest), exist_ok=True)
with gzip.open(out_manifest, 'w') as out_h:
for record in manifest:
out_h.write((json.dumps(record) + '\n').encode())
return 0
if __name__ == '__main__':
main()
``` |
{
"source": "jonaszierer/genetics-finemapping",
"score": 3
} |
#### File: genetics-finemapping/utils/concat_parquet.py
```python
import argparse
import pandas as pd
def main():
# Args
args = parse_args()
# Load
dfs = (pd.read_parquet(inf, engine='fastparquet')
for inf in args.in_parquets)
# Concatenate
full_df = pd.concat(dfs, ignore_index=True)
# Write
full_df.to_parquet(
args.out,
engine='fastparquet',
compression='snappy',
row_group_offsets=500000
)
return 0
def parse_args():
''' Load command line args
'''
p = argparse.ArgumentParser()
# Add input files
p.add_argument('--in_parquets',
metavar="<file>",
help=("List of parquet files to concatenate"),
type=str,
nargs='+',
required=True)
p.add_argument('--out',
metavar="<file>",
help=("Concatenated parquet file"),
type=str,
required=True)
args = p.parse_args()
return args
if __name__ == '__main__':
main()
``` |
{
"source": "jonatak/simple-http-monitor",
"score": 3
} |
#### File: simple-http-monitor/tests/test_parser.py
```python
import rfc3339
import pytest
from aio.monitoring.parser import (
parse_common,
LogLine,
ParserCommonException
)
def test_simple_log_line():
line = (
'127.0.0.1 - james [15/Dec/2019:00:2:50 +0100]'
' "GET /report HTTP/1.0" 200 12'
)
log_line = parse_common(line)
assert log_line == LogLine(
host='127.0.0.1',
rfc931='-',
user='james',
datetime=rfc3339.parse_datetime('2019-12-15T00:02:50+01:00'),
uri='/report',
method='GET',
protocol='HTTP/1.0',
status=200,
bytes_received=12
)
def test_incorrect_log_line():
line = (
'127.0.0.1 - james [15/Dec/2019:00:2:50 +0100]'
' "GET /report HTTP/1.0" 200'
)
with pytest.raises(ParserCommonException):
parse_common(line)
``` |
{
"source": "jonatan098/cursopython",
"score": 4
} |
#### File: mundo 3/aula 20/exer100.py
```python
from time import sleep
from random import randint
def sortear():
print('sorteando 5 valores para lista: ', end='')
for v in range(0,5):
rand = randint(0,100)
num.append(rand)
print(rand, end=' ', flush=True)
sleep(0.5)
print('PRONTO!')
def somepar():
soma = 0
for v in num:
if v % 2 == 0:
soma += v
print(f'somando os valores pares de {num} temos {soma}')
#main cod
num = []
sortear()
somepar()
``` |
{
"source": "jonatan1609/AdventOfCode",
"score": 4
} |
#### File: 2020/day_1/code.py
```python
def read_file(path: str = "input") -> str:
with open(path) as file:
return file.read()
def part_1():
numbers = [int(x) for x in read_file().splitlines()]
for n1 in numbers:
for n2 in numbers:
if n1 + n2 == 2020:
return print("Solution for part 1 of day 1:", n1 * n2)
def part_2():
numbers = [int(x) for x in read_file().splitlines()]
for n1 in numbers:
for n2 in numbers:
for n3 in numbers:
if n1 + n2 + n3 == 2020:
return print("Solution for part 2 of day 1:", n1 * n2 * n3)
part_1()
part_2()
```
#### File: 2020/day_3/code.py
```python
def read_file(path: str = "input") -> str:
with open(path) as file:
return file.read()
def n_trees(area: list, slope: tuple, n_lines: int) -> int:
trees = 0
line = column = 0
for i in range(n_lines):
column += slope[0] # three right
line += slope[1] # one down
if len(area[line]) <= column:
area[line] *= column
if area[line][column] == "#":
trees += 1
if line == n_lines - 1:
break
return trees
def part_1():
area = read_file().splitlines()
n_lines = len(area)
trees = n_trees(area, (3, 1), n_lines)
print("Solution for part 1 of day 3:", trees)
def part_2():
area = read_file().splitlines()
n_lines = len(area)
trees = 1
for slope in (
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2)
):
trees *= n_trees(area, slope, n_lines)
print("Solution for part 2 of day 3:", trees)
part_1()
part_2()
```
#### File: 2020/day_4/code.py
```python
import re
def read_file(path: str = "input") -> str:
with open(path) as file:
return file.read()
def part_1():
fields = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
content = read_file().split("\n\n")
print("Solution for part 1 of day 4:", sum((set(y.split(":")[0] for y in x.split()) - {"cid"}) == fields for x in content))
def part_2():
rules = {
"byr": re.compile(r"(19[2-9][0-9])|(200[0-2])"),
"iyr": re.compile(r"20(1[0-9]|20)"),
"eyr": re.compile(r"20(2[0-9]|30)"),
"hgt": re.compile(r"((1[5-8][0-9])|(19[0-3]))cm|(([56][0-9])|(7[0-6]))in"),
"hcl": re.compile(r"#[0-9a-f]{6}"),
"ecl": re.compile(r"amb|blu|brn|gry|grn|hzl|oth"),
"pid": re.compile(r"[0-9]{9}"),
}
content = read_file().split("\n\n")
valid = 0
for passport in content:
passport = dict(x.split(":") for x in passport.split())
if set(passport.keys()) - {"cid"} == set(rules.keys()):
for field, value in passport.items():
if field == "cid":
continue
if not rules[field].fullmatch(value):
break
else:
valid += 1
print("Solution for part 2 of day 4:", valid)
part_1()
part_2()
``` |
{
"source": "jonatan1609/JoLang",
"score": 3
} |
#### File: JoLang/helpers/shell.py
```python
import platform
import re
from jolang.tokenizer import Tokenizer
from jolang.preprocessor import preprocess
from jolang.parser import Parser
from jolang.parser import ast
print("JoLang Shell on {}".format(platform.platform()))
print("Docs: https://jolang.org")
print("Type exit or quit to close the shell")
class Evaluate:
PATTERN = re.compile(r'(?=[A-Z])')
def __init__(self, node: ast.Body = None):
self.node = node
self.variables = {}
self.macros = {}
@staticmethod
def pascal_case_to_snake_case(string: str):
return "_".join(x.lower() for x in Evaluate.PATTERN.split(string) if x)
@staticmethod
def visit_number(node: ast.Integer):
return node.argument
def visit_unary_add(self, node: ast.UnaryAdd):
return +self._visit(node.argument)
def visit_unary_subtract(self, node: ast.UnarySubtract):
return -self._visit(node.argument)
def visit_unary_logical_not(self, node: ast.UnaryLogicalNot):
return not self._visit(node.argument)
def visit_unary_tilde(self, node: ast.UnaryTilde):
return ~self._visit(node.argument)
def visit_name(self, v: ast.Name):
var = self.variables.get(v.argument)
if not var:
raise NameError(f"Variable {v.argument!r} does not exist!")
return var
def visit_binary_node(self, node: ast.BinaryNode):
left, right = self._visit(node.left), self._visit(node.right)
if isinstance(node.op, ast.Multiply):
return left * right
if isinstance(node.op, ast.Divide):
return left / right
if isinstance(node.op, ast.Add):
return left + right
if isinstance(node.op, ast.Subtract):
return left - right
if isinstance(node.op, ast.Modulo):
return left % right
def visit_assignment(self, node: ast.Assignment):
name = node.const.argument
content = self._visit(node.content)
self.variables[name] = content
def _visit(self, node):
method = 'visit_' + self.pascal_case_to_snake_case(node.__class__.__name__)
method = getattr(self, method, method)
if not callable(method):
raise NotImplementedError(f"method {method!r} isn't implemented yet!")
return method(node)
@staticmethod
def visit_string(node):
return node.argument
def visit_call(self, node: ast.Call):
func = self._visit(node.const)
args = self._visit(node.args)
return func(*args)
def visit_arguments(self, node: ast.Arguments):
return [self._visit(arg) for arg in node.items]
def visit(self):
if self.node.statements:
return self._visit(self.node.statements[0])
return ''
evaluator = Evaluate()
def main(code: str):
stream = Tokenizer(code).tokenize()
preprocessor = preprocess(stream, evaluator.macros)
parser = Parser(preprocessor)
evaluator.node = parser.parse()
evaluator.macros.update(parser.macros)
return evaluator.visit() or None
try:
while (command := input('JoLang >>> ')).lower() not in ('quit', 'exit'):
res = main(command)
if res is not None:
print(res)
except KeyboardInterrupt:
pass
```
#### File: stdlib/builtin_types/Function.py
```python
from dataclasses import dataclass, field
from .object import Object
from .operator import Operator, Attribute
@dataclass
class Function(Object):
def __post_init__(self):
Object.__init__(self)
self._obj = self.__repr__()
name: str
parameters: list = field(default_factory=list)
body: list = field(default_factory=list)
py_bind: ... = None
restype: ... = None
scope: ... = None
method_of: str = ""
@Operator("Call", compatible=["Function"])
def call(self, *args):
pass
def __repr__(self):
if self.method_of:
return f"<Method {self.name!r} of object {self.method_of!r}>"
return f"<Function {self.name!r}>"
Attribute.Function = Function
```
#### File: stdlib/builtin_types/Null.py
```python
from .object import Object
class Null(Object):
def __init__(self, *_):
super().__init__()
self._obj = "null"
```
#### File: stdlib/builtin_types/object.py
```python
from .builtin import BuiltinType
from .operator import Operator, Attribute
from . import empty
class Object(BuiltinType):
def __init__(self):
self.operators = self.__find_operators()
self.attributes = self.__find_attributes()
self._obj = None
def __find_operators(self):
return {
name: op_.call
for op in dir(self) if isinstance(op_ := getattr(self, op), Operator) and not op.startswith("_") for name in op_.f.names
}
def __find_attributes(self):
return {getattr(self, name).op_name: getattr(self, name) for name in dir(self) if isinstance(getattr(self, name, ""), Attribute)}
def operate(self, op_name, *args):
if not (op := self.available_operator(op_name)):
return empty
else:
return self.__do_operate(op, op_name, *args)
def available_operator(self, op_name):
if not self.operators:
self.operators = self.__find_operators()
if op := self.operators.get(op_name):
return op
def __do_operate(self, op, op_name, *args):
return op(op_name, self, *args)
def inheritance(self):
return [cls.__name__ for cls in self.__class__.mro()]
@Operator("GetAttr", compatible=["Object"])
def getattr(self, attr):
obj = self.attributes.get(attr._obj, empty)
if obj is not empty:
obj.init(self)
return obj.function
return obj
def __repr__(self):
return str(self._obj)
@Operator("Equals", compatible=["Object"])
def equals(self, other):
return self._obj == other._obj
@Operator("NotEqual", compatible=["Object"])
def not_equal(self, other):
return self._obj != other._obj
@Operator("UnaryLogicalNot", compatible=["Object"])
def unary_logical_not(self):
return not self._obj
@Operator("LogicAnd", compatible=["Object"])
def logic_and(self, other):
pass
@Operator("LogicOr", compatible=["Object"])
def logic_or(self, other):
pass
```
#### File: jolang/parser/errors.py
```python
import typing
class Error:
def __init__(self, p, current_tok, next_token):
self.current_tok = current_tok
self.next_token = next_token
if not next_token:
self.next_token = p.next_token = current_tok
def __call__(self, message: typing.Optional[str]):
if message:
message += " at "
else:
message = ""
raise SyntaxError(message + f"line {self.current_tok.line} column {self.next_token.col}")
```
#### File: jolang/parser/parser.py
```python
import typing
from .keywords import keywords
from ..tokenizer.tokens import Identifier, Token
from ..tokenizer import tokens
from . import ast, errors
Keyword = Token("KEYWORD")
class Parser:
comp_op_table = {
tokens.IsEqual: ast.Equals,
tokens.NotEqual: ast.NotEqual,
tokens.LessEqual: ast.LessEqual,
tokens.GreatEqual: ast.GreatEqual,
tokens.LesserThan: ast.LesserThan,
tokens.GreaterThan: ast.GreaterThan
}
inplace_op_table = {
tokens.Equals: ast.Assign,
tokens.InplaceAdd: ast.InplaceAdd,
tokens.InplaceSubtract: ast.InplaceSubtract,
tokens.InplaceModulo: ast.InplaceModulo,
tokens.InplaceMultiply: ast.InplaceMultiply,
tokens.InplaceDivide: ast.InplaceDivide,
tokens.InplaceRightShift: ast.InplaceRightShift,
tokens.InplaceLeftShift: ast.InplaceLeftShift,
tokens.InplaceBinOr: ast.InplaceBinOr,
tokens.InplaceBinAnd: ast.InplaceBinAnd,
tokens.InplaceXor: ast.InplaceXor
}
def __init__(self, stream: typing.Iterable[Token]):
self.macros: typing.Dict[typing.Tuple[str, str], typing.List[tokens.Token]] = {}
self.tokens_stream = self.cast_identifier_to_keyword(stream)
self._current_token: typing.Optional[Token] = None
self.next_token: typing.Optional[Token] = None
self.advance()
@property
def current_token(self):
return self._current_token or self.next_token
@current_token.setter
def current_token(self, item):
self._current_token = item
def advance(self) -> None:
self.current_token, self.next_token = self.next_token, next(self.tokens_stream, None)
def push_token_back(self):
self.tokens_stream = iter([self.current_token, self.next_token] + list(self.tokens_stream))
self.advance()
def is_eof(self) -> bool:
return not self.next_token
def cast_identifier_to_keyword(self, stream: typing.Iterable[Token]) -> typing.Generator[Token, None, None]:
for token in stream:
if isinstance(token, dict):
self.macros = token
continue
if token.name == Identifier.name and token.content in keywords:
yield Keyword.set_content(token.line, token.col, token.content)
else:
yield token
def accept(self, token: Token):
if self.next_token and isinstance(self.next_token, token):
self.advance()
return True
return False
@property
def throw(self):
return errors.Error(self, self.current_token, self.next_token)
def parse_literal(self):
# Literal: Digit | String | Identifier
if self.accept(tokens.Integer): # Digit: '0'|'1'|'2'|'3'|'4'|'5'|'6'|'7'|'8'|'9'
return ast.Integer(self.current_token.line, self.current_token.col, int(self.current_token.content))
elif self.accept(tokens.String): # String: ('"' {char} '"') | ("'" {char} "'")
return ast.String(self.current_token.line, self.current_token.col, self.current_token.content)
elif self.accept(tokens.Identifier): # Identifier: (LowerCase | UpperCase | '_') {Digit} {Identifier}
return ast.Name(self.current_token.line, self.current_token.col, self.current_token.content)
elif self.accept(tokens.Float): # Float: {Digit} '.' {Digit}
return ast.Float(self.current_token.line, self.current_token.col, float(self.current_token.content))
def parse_atom(self):
node = None
unary_op = True
# Atom: ({'~'|'-'|'+'|'!'} Atom) | '(' [Assignment] ')' | Literal | (Literal '(' [Args] ')')
if self.accept(tokens.UnaryTilde):
node = ast.UnaryTilde(self.current_token.line, self.current_token.col, self.parse_atom())
elif self.accept(tokens.LogicNot):
node = ast.UnaryLogicalNot(self.current_token.line, self.current_token.col, self.parse_atom())
elif self.accept(tokens.Add):
node = ast.UnaryAdd(self.current_token.line, self.current_token.col, self.parse_atom())
elif self.accept(tokens.Subtract):
node = ast.UnarySubtract(self.current_token.line, self.current_token.col, self.parse_atom())
else:
unary_op = False
if not unary_op:
if literal := self.parse_literal():
node = literal
elif self.accept(tokens.LeftParen):
if self.accept(tokens.RightParen):
node = ast.Node(self.current_token.line, self.current_token.col-2)
else:
node = self.parse_assignment()
if not self.accept(tokens.RightParen):
self.throw(f"Parenthesis were not closed")
elif self.accept(tokens.LeftBracket):
self.push_token_back()
node = self.parse_array()
while not self.is_eof() and isinstance(self.next_token, (tokens.LeftParen, tokens.LeftBracket, tokens.Dot)):
while self.accept(tokens.LeftParen):
if self.accept(tokens.RightParen):
node = ast.Call(self.current_token.line, self.current_token.col - 1, node, ast.Arguments(self.current_token.line, self.current_token.col, []))
else:
line, col = self.current_token.line, self.current_token.col
args = self.parse_args()
if not self.accept(tokens.RightParen):
self.throw(f"Parenthesis were not closed")
node = ast.Call(line, col, node, args)
while self.accept(tokens.LeftBracket):
line, col = self.current_token.line, self.current_token.col
self.push_token_back()
start, stop, step = self.parse_index()
node = ast.Index(line, col, start, stop, step, node)
while self.accept(tokens.Dot):
line, col = self.current_token.line, self.current_token.col
if not self.accept(tokens.Identifier):
self.throw("SyntaxError")
node = ast.Attribute(line, col, node, ast.Name(self.current_token.line, self.current_token.col, self.current_token.content))
if unary_op and not node.argument:
self.current_token.col += 1
self.throw(None)
return node
def parse_comp_op(self):
# CompOp: '==' | '!=' | '<=' | '>=' | '<' | '>'
for i, l in (
(tokens.IsEqual, 2), (tokens.NotEqual, 2),
(tokens.LessEqual, 2), (tokens.GreatEqual, 2),
(tokens.LesserThan, 1), (tokens.GreaterThan, 1),
):
if self.accept(i):
return self.comp_op_table[i], l
def parse_comp(self):
# CompExpr: BinaryOrExpr {CompOp BinaryOrExpr}
node = self.parse_binary_or()
while op := self.parse_comp_op():
node = ast.Compare(self.current_token.line, self.current_token.col, node, op[0](self.current_token.line, self.current_token.col), self.parse_binary_or())
if not node.right or not node.left:
self.current_token.col += op[1]
self.throw(None)
return node
def parse_binary_or(self):
# BinaryOrExpr: BinaryXorExpr {'||' BinaryXorExpr}
node = self.parse_binary_xor()
while self.accept(tokens.BinOr):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.Or(self.current_token.line, self.current_token.col), self.parse_binary_xor())
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_binary_xor(self):
# BinaryXorExpr: BinaryAndExpr {'^' BinaryAndExpr}
node = self.parse_binary_and()
while self.accept(tokens.Xor):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.Xor(self.current_token.line, self.current_token.col), self.parse_binary_and())
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_binary_and(self):
# BinaryAndExpr: ShiftExpr {'&&' ShiftExpr}
node = self.parse_shift_expr()
while self.accept(tokens.BinAnd):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.And(self.current_token.line, self.current_token.col), self.parse_shift_expr())
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_shift_expr(self):
# ShiftExpr: Expr {('<<' | '>>') Expr}
node = self.parse_expr()
while not self.is_eof():
if self.accept(tokens.RightShift):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.RightShift(self.current_token.line, self.current_token.col), self.parse_expr())
elif self.accept(tokens.LeftShift):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.RightShift(self.current_token.line, self.current_token.col), self.parse_expr())
else:
break
if not node.right or not node.left:
self.current_token.col += 2
self.throw(None)
return node
def parse_return(self, *_):
# 'return' Assignment
return ast.Return(line=self.current_token.line, column=self.current_token.col, argument=self.parse_assignment())
def parse_logical_and(self):
# LogicalAndExpr: CompExpr {'||' CompExpr}
node = self.parse_comp()
while self.accept(tokens.LogicAnd):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.LogicAnd(self.current_token.line, self.current_token.col), self.parse_comp())
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_logical_or(self):
# LogicalOrExpr: LogicalAndExpr {'||' LogicalAndExpr}
node = self.parse_logical_and()
while self.accept(tokens.LogicOr):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, node, ast.LogicOr(self.current_token.line, self.current_token.col), self.parse_logical_and())
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_args(self):
# Args: Atom {',' Atom}
args = [self.parse_assignment()]
while self.accept(tokens.Comma):
args.append(self.parse_assignment())
return ast.Arguments(self.current_token.line, self.current_token.col, args)
def parse_term(self):
# Term: Atom {'*'|'/'|'%' Atom}
node = self.parse_atom()
while not self.is_eof():
if self.accept(tokens.Multiply):
node = ast.BinaryNode(line=self.current_token.line, column=self.current_token.col,left=node, op=ast.Multiply(self.current_token.line, column=self.current_token.col), right=self.parse_atom())
elif self.accept(tokens.Divide):
node = ast.BinaryNode(line=self.current_token.line, column=self.current_token.col, left=node, op=ast.Divide(self.current_token.line, column=self.current_token.col), right=self.parse_atom())
elif self.accept(tokens.Modulo):
node = ast.BinaryNode(line=self.current_token.line, column=self.current_token.col,left=node, op=ast.Modulo(self.current_token.line, column=self.current_token.col), right=self.parse_atom())
else:
break
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_expr(self):
# Expr: Term {'+'|'-' Term}
node = self.parse_term()
while not self.is_eof():
if self.accept(tokens.Add):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, left=node, op=ast.Add(self.current_token.line, self.current_token.col, ), right=self.parse_term())
elif self.accept(tokens.Subtract):
node = ast.BinaryNode(self.current_token.line, self.current_token.col, left=node, op=ast.Subtract(self.current_token.line, self.current_token.col, ), right=self.parse_term())
else:
break
if not node.right or not node.left:
self.current_token.col += 1
self.throw(None)
return node
def parse_assignment(self):
# Assignment: {Identifier AssignOp} LogicalOrExpr
asses = []
while self.accept(tokens.Identifier):
name = ast.Name(self.current_token.line, self.current_token.col, self.current_token.content)
for i, l in (
(tokens.Equals, 1), (tokens.InplaceAdd, 2),
(tokens.InplaceSubtract, 2), (tokens.InplaceModulo, 2),
(tokens.InplaceMultiply, 2), (tokens.InplaceDivide, 2),
(tokens.InplaceRightShift, 2), (tokens.InplaceLeftShift, 2),
(tokens.InplaceBinOr, 2), (tokens.InplaceBinAnd, 2), (tokens.InplaceXor, 2)
):
if self.accept(i):
asses.append(self.inplace_op_table[i](self.current_token.line, self.current_token.col))
asses.append(name)
break
else:
if isinstance(self.current_token, tokens.Identifier):
self.push_token_back()
break
if not asses:
self.push_token_back()
break
line = self.current_token.line
node = self.parse_logical_or()
while asses:
if not node:
self.current_token.col += l
self.throw(None)
node = ast.Assignment(line, self.current_token.col, asses.pop(), asses.pop(), node)
return node
def parse_params(self):
# Params: Identifier {',' Identifier}
params = ast.Arguments(self.current_token.line, self.current_token.col, [])
while self.accept(tokens.Identifier):
params.items.append(ast.Name(self.current_token.line, self.current_token.col, argument=self.current_token.content))
if not self.accept(tokens.Comma):
break
return params
def parse_func(self, keywords):
for i in ('continue', 'break'):
if i in keywords:
del keywords[i]
# Func: 'func' Identifier '(' [Params] ')' '{' FuncBlock '}'
if self.accept(tokens.Identifier):
name = ast.Name(self.current_token.line, self.current_token.col, argument=self.current_token.content)
params = ast.Arguments(self.current_token.line, self.current_token.col, [])
if self.accept(tokens.LeftParen):
params = self.parse_params()
if not self.accept(tokens.RightParen):
self.throw(f"Expected ')', got {self.next_token.name}")
if self.accept(tokens.LeftBrace):
statements = self.parse_block(keywords={
**keywords,
"return": self.parse_return
})
if not self.accept(tokens.RightBrace):
self.throw(f"Expected '}}', got {self.next_token.name}")
else:
self.throw(f"Expected '{{', got {self.next_token.name}")
else:
self.throw(f"Expected '(', got {self.next_token.name}")
return ast.Function(self.current_token.line, self.current_token.col, name=name, params=params, body=statements)
else:
self.throw(f"Expected an identifier, got {self.next_token.name}")
def parse_block(self, keywords=None):
# Block: {Assignment | Func | IfStmt | WhileLoop | ForLoop | NEWLINE}
statements = []
if not keywords:
keywords = {}
keywords = {
"if": self.parse_if_stmt,
"func": self.parse_func,
"for": self.parse_for_loop,
"while": self.parse_while_loop,
**keywords
}
while not self.is_eof():
while self.accept(tokens.Newline):
pass
if self.accept(Keyword):
if f := keywords.get(self.current_token.content):
statements.append(f(keywords))
else:
self.throw("Did not expect a keyword.")
elif assignment := self.parse_assignment():
statements.append(assignment)
elif isinstance((self.next_token or self.current_token), tokens.RightBrace):
break
else:
break
if (not isinstance(self.current_token, tokens.RightBrace)) and self.next_token and (not isinstance(self.next_token, tokens.RightBrace)) and (not self.accept(tokens.Newline)):
if not isinstance(self.current_token, tokens.Newline):
self.throw(f"Expected a newline, got {self.next_token}")
return statements
def parse_if_stmt(self, keywords):
# IfStmt: 'if' '(' Assignment ')' '{' Block '}'
else_block = None
if self.accept(tokens.LeftParen):
stmt = self.parse_assignment()
if not stmt:
self.throw(f"Expected expression")
if not self.accept(tokens.RightParen):
self.throw(f"Expected ')', got {self.next_token.name}")
while self.accept(tokens.Newline):
pass
if not self.accept(tokens.LeftBrace):
self.throw(f"Expected '{{', got {self.next_token.name}")
block = self.parse_block(keywords)
if not self.accept(tokens.RightBrace):
self.throw(f"Expected '}}', got {self.next_token.name}")
else:
self.throw(f"Expected '(', got {self.next_token.name}")
elifs = []
while self.accept(tokens.Newline):
pass
while not self.is_eof() and self.next_token.content == "elif":
# ElifStmt: 'elif' '(' Assignment ')' '{' Block '}'
self.advance()
elifs.append(self.parse_if_stmt(keywords))
while self.accept(tokens.Newline):
pass
if not self.is_eof() and self.next_token.content == "else":
# ElseStmt: 'else' '{' Block '}'
self.advance()
if not self.accept(tokens.LeftBrace):
self.throw(f"Expected '{{', got {self.next_token.name}")
else_block = self.parse_block()
if not self.accept(tokens.RightBrace):
self.throw(f"Expected '}}', got {self.next_token.name}")
return ast.If(self.current_token.line, self.current_token.col, condition=stmt, body=block, elifs=elifs, else_block=else_block)
def parse_while_loop(self, keywords):
# WhileLoop: 'while' '(' Assignment ')' '{' Block '}'
if self.accept(tokens.LeftParen):
stmt = self.parse_assignment()
if not stmt:
self.throw(f"Expected expression")
if not self.accept(tokens.RightParen):
self.throw(f"Expected ')', got {self.next_token.name}")
if not self.accept(tokens.LeftBrace):
self.throw(f"Expected '{{', got {self.next_token.name}")
block = self.parse_block(keywords={
**keywords,
"continue": lambda *_: ast.Continue(line=self.current_token.line, column=self.current_token.col),
"break": lambda *_: ast.Break(line=self.current_token.line, column=self.current_token.col)
})
if not self.accept(tokens.RightBrace):
self.throw(f"Expected '}}', got {self.next_token.name}")
else:
self.throw(f"Expected '(', got {self.next_token.name}")
return ast.While(self.current_token.line, self.current_token.col, stmt, block)
def parse_for_loop(self, keywords):
# ForLoop: 'for' '(' [Assignment] ';' [Assignment] ';' [Assignment] ')' '{' Block '}'
parts = []
if self.accept(tokens.LeftParen):
for i in range(2): # three parts in a for loop
if self.accept(tokens.Semicolon):
parts.append(ast.Node(self.current_token.line, self.current_token.col, ))
else:
parts.append(self.parse_assignment())
if not self.accept(tokens.Semicolon):
self.throw(f"Expected ';', got {self.next_token.name}")
parts.append(self.parse_assignment() or ast.Node(self.current_token.line, self.current_token.col))
if not self.accept(tokens.RightParen):
self.throw(f"Expected ')', got {self.next_token.name}")
if not self.accept(tokens.LeftBrace):
self.throw(f"Expected '{{', got {self.next_token.name}")
block = self.parse_block(keywords={
**keywords,
"continue": lambda *_: ast.Continue(line=self.current_token.line, column=self.current_token.col),
"break": lambda *_: ast.Break(line=self.current_token.line, column=self.current_token.col)
})
if not self.accept(tokens.RightBrace):
self.throw(f"Expected '}}', got {self.next_token.name}")
else:
self.throw(f"Expected '(', got {self.next_token.name}")
return ast.For(self.current_token.line, self.current_token.col, parts, block)
def parse_array(self):
# Array: '[' Assignment {',' Assignment} ']'
if self.accept(tokens.LeftBracket):
line = self.current_token.line
col = self.current_token.col
if self.accept(tokens.RightBracket):
return ast.Array(line=line, column=col, items=[])
if isinstance(self.next_token, tokens.Comma):
self.throw("Syntax Error")
items = [self.parse_assignment()]
while self.accept(tokens.Comma):
items.append(self.parse_assignment())
if isinstance(self.current_token, tokens.Comma):
items.pop()
if not self.accept(tokens.RightBracket):
self.throw("Syntax Error")
return ast.Array(line=line, column=col, items=items)
def parse_index(self):
# Index = '[' Assignment [':' Assignment [':' Assignment]] ']'
start = stop = step = None
if self.accept(tokens.LeftBracket):
if self.accept(tokens.RightBracket):
self.throw("Syntax Error")
start = self.parse_assignment()
if not start:
start = ast.Node(self.current_token.line, self.current_token.col)
if not self.accept(tokens.RightBracket):
if not self.accept(tokens.Colon):
self.throw("Syntax Error")
stop = self.parse_assignment()
if not stop:
stop = ast.Node(self.current_token.line, self.current_token.col)
if not self.accept(tokens.RightBracket):
if not self.accept(tokens.Colon):
self.throw("Syntax Error")
step = self.parse_assignment()
if not step:
step = ast.Node(self.current_token.line, self.current_token.col)
if not self.accept(tokens.RightBracket):
self.throw("Syntax Error")
return start, stop, step
def parse(self):
body = ast.Body(self.next_token.line, self.next_token.col, [])
while not self.is_eof():
node = self.parse_block()
if self.next_token and not self.accept(tokens.Newline):
self.throw(f"got {self.next_token.name}")
body.statements = node
return body
```
#### File: JoLang/tests/test_preprocessor.py
```python
from unittest import TestCase
from jolang.preprocessor import preprocess
from jolang.tokenizer import Tokenizer, tokens
class TestPreprocessor(TestCase):
tests_pass = {
"%macro a": [{('IDENTIFIER', 'a'): []}],
"%macro a 1": [{('IDENTIFIER', 'a'): [tokens.Integer]}],
"%macro a 2 + a": [{('IDENTIFIER', 'a'): [tokens.Integer, tokens.Add, tokens.Identifier]}]
}
tests_fail = ["%", "%macro 0", "%macro", "%macro", "%macro ~ []"]
def test_pass(self):
for test, expect in self.tests_pass.items():
p = list(preprocess(Tokenizer(test).tokenize()))
self.assertEqual(p[0].keys(), expect[0].keys())
pv, = list(p[0].values())
ev, = list(expect[0].values())
for i in range(len(ev)):
self.assertIsInstance(pv[i], ev[i])
def test_fail(self):
for test in self.tests_fail:
self.assertRaises((AssertionError, SyntaxError), lambda: list(preprocess(Tokenizer(test).tokenize())))
```
#### File: JoLang/tests/test_tokenizer.py
```python
from unittest import TestCase
from jolang.tokenizer import Tokenizer, tokens
class TestTokenizer(TestCase):
tests_pass = {
"+": [tokens.Add],
"-": [tokens.Subtract],
">>": [tokens.RightShift],
">>=": [tokens.InplaceRightShift],
"|": [tokens.BinOr],
"||": [tokens.LogicOr],
"abc a0 01": [tokens.Identifier, tokens.Identifier, tokens.Integer],
"0x222 0o222 2.2": [tokens.Integer, tokens.Integer, tokens.Float],
"func a(){return a % 2 - 1 == 2}": [tokens.Identifier, tokens.Identifier, tokens.LeftParen, tokens.RightParen, tokens.LeftBrace, tokens.Identifier, tokens.Identifier, tokens.Modulo, tokens.Integer, tokens.Subtract, tokens.Integer, tokens.IsEqual, tokens.Integer, tokens.RightBrace],
"$ abc": [],
"a $abc \n a": [tokens.Identifier, tokens.Identifier]
}
tests_fail = ["0a", "0.a", "0o8", "@"]
def test_tokenizer_pass(self):
for test, expect in self.tests_pass.items():
t = list(Tokenizer(test).tokenize())
self.assertTrue(len(t) == len(expect), f"Length of tokens isn't {len(expect)}")
for i in range(len(expect)):
self.assertIsInstance(t[i], expect[i])
def test_tokenizer_fail(self):
for test in self.tests_fail:
self.assertRaises(SyntaxError, lambda: list(Tokenizer(test).tokenize()))
``` |
{
"source": "jonatan1609/TrashGuy",
"score": 2
} |
#### File: TrashGuy/trashguy/__main__.py
```python
import sys
from .trashguy import TrashGuy
def main(trash_items):
print(TrashGuy(trash_items))
DEFAULT_INPUT = '\U0001F353\U0001F34A\U0001F345'
# 'Temporary' feature to force single character trash items
CMD_LINE = tuple([x for x in sys.argv[1:] if x != ' '])
if CMD_LINE:
main(CMD_LINE)
else:
main(DEFAULT_INPUT)
```
#### File: TrashGuy/trashguy/_tgpy_engine.py
```python
from typing import Tuple
from .lut import _LUT as LUT
import math
# Defined for 'performance' reasons
sqrt = math.sqrt
class FrameEngine:
def __init__(self, trash_items: Tuple[str, ...],
glyph_can: str,
glyph_left: str,
glyph_right: str,
spacer: str):
self.trash_items = trash_items
self.glyph_can = glyph_can
self.glyph_left = glyph_left
self.glyph_right = glyph_right
self.spacer = spacer
# Length of the input items
self.t_len = len(trash_items)
# Calculating frame group sizes and total animation frame count
# Minimum frame group size, i.e. 6 frames long
self.min_fg_size = 6
# Maximum frame group size, i.e. the most of amount of frames needed
# for throwing away just the last item from beginning to end
self.max_fg_size_index = self.min_fg_size + (self.t_len * 2)
# Calculate sum of sizes to get the total animation frame count
self.total_frame_count = (self.min_fg_size +
self.max_fg_size_index) // 2 * self.t_len
def convert_linear_index(self, index: int) -> Tuple[int, bool, int]:
# Map the given index to variables needed for the specific frame
try:
# Attempt to look up the index first in the table
item_index, group_length = LUT[index]
except IndexError:
# Alternate method optimized for higher indices
# Based on submissions by <NAME> (<EMAIL>)
item_index = int(sqrt(10 + index)) - 3
group_length = item_index * 2 + 7
# Median of the frame group size to calculate the apogee of guy
# where guy hits the item and turns around
# (similar to Math.ceil of the median float result)
fg_size_median = group_length // 2
relative_index = index # Integers are immutable
# Faster special case for the first frame group
if group_length == 7:
# Calculate the forward direction with index-to-apogee relation
forward = index < fg_size_median
# Subsequent frame groups require compensation of previous frame group sums
else:
# These equations were generated using AI and algorithms
all_sum = self.min_fg_size + group_length
# The sum of frames from all previous frame groups
previous_frames_sum = all_sum // 2 * item_index
# The current frame index minus the sum of frames from all previous frame groups
# to calculate for our relative position within the current frame group
relative_index = index - previous_frames_sum
# Calculate the forward direction with relative-index-to-apogee relation
forward = relative_index < fg_size_median
# Define where the position should be based on forwards or backwards direction
if forward:
position = relative_index + 2
else:
position = group_length - relative_index - 1
# Return the final variables
return position, forward, item_index
def get_frame(self, index: int) -> str:
# input should be sanitized already but for good measure we
# make sure index is a positive integer over 0 and under max frame len
san_index = int(index)
if san_index < 0 or san_index >= self.total_frame_count:
raise ValueError('index out of range')
position, forward, item_index = self.convert_linear_index(san_index)
# Frame Generator
# Incrementally removes thrown items from the trash pile
trunc_items = self.trash_items[item_index:]
# The items which have been removed
remainder_items = self.trash_items[:item_index]
# Length of the missing items
missing_items_len = len(''.join(remainder_items))
# Calculate the padding based on missing items to keep truncated trash
# in the same position as things get thrown away
# (Only applies to points before reaching the trash can with the item)
padding = [self.spacer] * (missing_items_len + 3)
# Create a static canvas while each item disappears
canvas = [self.glyph_can, *padding, *trunc_items]
# The last item's index that was picked to be trashed
last_index = len(canvas) - len(trunc_items)
# Start sequence, forward motion, going right (> ^_^)>
if forward:
if position < last_index:
# Start from second space after the trash can
canvas[position] = self.glyph_right
# Snapshot the frames of the animation going right
return ''.join(canvas)
# End of forward motion, look left with item
# Set item position in front of trash guy
canvas[position - 1] = canvas[last_index]
# Set position of trash guy where item was
canvas[position] = self.glyph_left
# Snapshot frame looking across at trash can
return ''.join(canvas)
# Reverse motion, going left <(^_^ <)
else:
# Going left with item towards trash can
if position > 0:
canvas[position] = self.glyph_left
# Place item in front while not yet at the trash can
if canvas[position - 1] != self.glyph_can:
canvas[position - 1] = canvas[last_index]
# Temporarily remove item from pile while holding it
canvas[last_index] = self.spacer
else:
# If trash can reached, replace spacing of missing item
if len(self.spacer) == 1:
last_item_len = len(canvas[last_index])
canvas = (canvas[:last_index] +
[self.spacer] * last_item_len +
canvas[last_index + 1:])
else:
# Unknown spacer size, use as directed
canvas[last_index] = self.spacer
# Snapshot the frames of the animation going left
return ''.join(canvas)
else: # End of reverse motion, look right for one frame
canvas[position + 1] = self.glyph_right
# Temporarily remove item from canvas for last frame also
if len(self.spacer) == 1:
last_item_len = len(canvas[last_index])
canvas = (canvas[:last_index] +
[self.spacer] * last_item_len +
canvas[last_index + 1:])
else:
# Unknown spacer size, use as directed
canvas[last_index] = self.spacer
# Snapshot the frame looking right
return ''.join(canvas)
``` |
{
"source": "jonatan5524/doodle-classifier",
"score": 2
} |
#### File: jonatan5524/doodle-classifier/app.py
```python
from flask import Flask
from flask import render_template
from flask_cors import CORS, cross_origin
from flask import request
from model import Model
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
model = Model()
@app.route('/')
def index():
""" Index route of the server
Returns:
Text: The index.html file
"""
return render_template('index.html')
@app.route('/load')
@cross_origin()
def load():
""" Loads the model from the last checkpoint
Returns:
Str: Loaded approval
"""
model.load()
return "loaded"
@app.route('/capture', methods=['POST'])
@cross_origin()
def capture():
""" Predict the current drawing of the user
Returns:
Str: The model prediction
"""
data = request.stream.read()
data = data.decode("utf-8").split(',')
return model.predict(data)
if __name__ == "__main__":
app.run()
``` |
{
"source": "jonatan5524/ftp-client",
"score": 3
} |
#### File: jonatan5524/ftp-client/ftpclient.py
```python
import ftplib
import argparse
from ftpshell import ftpShell
parser = argparse.ArgumentParser()
parser.add_argument('host',help='host name or ip address of the ftp server')
parser.add_argument('-p','--port',help='port number to onnect to, defualt port nubmer: 21',default=21,type=int)
parser.add_argument('-u','--username',help='username to the ftp server, defualt username: anonymous',default='anonymous')
parser.add_argument('--password','--pass',help='password to the ftp server, defualt password: anonymous',default='anonymous')
args = parser.parse_args()
class ftpClient:
def __init__(self, host, port, username, password):
self.host = host
self.password = password
self.username = username
self.port = port
self.ftp = ftplib.FTP()
self.shell = ftpShell(self.ftp)
def connect(self):
try:
print(f'connecting to {self.host} : {self.port}\nusername and password {self.username} : {self.password}')
self.ftp.connect(host= self.host, port = self.port)
self.ftp.login(user = self.username, passwd = self.password)
print('login successful')
print(self.ftp.getwelcome())
except ConnectionRefusedError:
print('connection to host refused, check if the server is up')
except TimeoutError:
print('timeout Error check if the ftp server is up')
except ftplib.error_perm as err:
print(err)
def start_shell(self):
self.shell.prompt = 'FTP>>'
self.shell.cmdloop('Starting prompt...')
def close(self):
self.ftp.quit()
if __name__ == "__main__":
client = ftpClient(args.host, args.port, args.username, args.password)
client.connect()
client.start_shell()
``` |
{
"source": "jonatan5524/own-git",
"score": 2
} |
#### File: own-git/ugit/base.py
```python
from genericpath import exists
import itertools
import operator
import os
import string
from typing import Deque, Dict, Iterator, List, Tuple
from collections import deque, namedtuple
from . import data
from . import diff
def init():
data.init()
data.update_ref("HEAD", data.RefValue(
symbolic=True, value=os.path.join("refs", "heads", "master")))
def create_branch(name: str, object_id: str):
data.update_ref(os.path.join("refs", "heads", name),
data.RefValue(symbolic=False, value=object_id))
def reset(object_id: str):
data.update_ref("HEAD", data.RefValue(symbolic=False, value=object_id))
def create_tag(name: str, object_id: str):
data.update_ref(os.path.join("refs", "tags", name),
data.RefValue(symbolic=False, value=object_id))
def checkout(name: str):
object_id = get_object_id(name)
commit = get_commit(object_id)
read_tree(commit.tree, update_working=True)
if is_branch(name):
head = data.RefValue(
symbolic=True, value=os.path.join("refs", "heads", name))
else:
head = data.RefValue(symbolic=False, value=object_id)
data.update_ref("HEAD", head, deref=False)
def get_branch_name():
head = data.get_ref("HEAD", deref=False)
if not head.symbolic:
return None
head = head.value
assert head.startswith(os.path.join("refs", "heads"))
return os.path.relpath(head, os.path.join("refs", "heads"))
Commit = namedtuple("Commit", ["tree", "parents", "message"])
def iter_branch_names():
for refname, _ in data.iter_refs(os.path.join("refs", "heads")):
yield os.path.relpath(refname, os.path.join("refs", "heads"))
def is_branch(branch: str) -> bool:
return data.get_ref(os.path.join("refs", "heads", branch)).value is not None
def get_commit(object_id: str) -> Commit:
parents = []
commit = data.get_object(object_id, 'commit').decode()
lines = iter(commit.splitlines())
for line in itertools.takewhile(operator.truth, lines):
key, value = line.split(' ', 1)
if key == 'tree':
tree = value
elif key == 'parent':
parents.append(value)
else:
assert False, f'Unknown field {key}'
message = '\n'.join(lines)
return Commit(tree=tree, parents=parents, message=message)
def commit(massage: str) -> str:
commit = f"tree {write_tree()}\n"
head = data.get_ref("HEAD").value
if head:
commit += f"parent {head}\n"
merge_head = data.get_ref("MERGE_HEAD").value
if merge_head:
commit += f"parent {merge_head}\n"
data.delete_ref("MERGE_HEAD", defer=False)
commit += "\n"
commit += f"{massage}\n"
object_id = data.hash_object(commit.encode(), "commit")
data.update_ref("HEAD", data.RefValue(symbolic=False, value=object_id))
return object_id
def write_tree() -> str:
index_as_tree = {}
with data.get_index() as index:
for path, object_id in index.items():
dirpath, filename = os.path.split(path)
current = index_as_tree
for direname in dirpath:
current = current.setdefault(direname, {})
current[filename] = object_id
def write_tree_recursive(tree_dict: Dict[str, str]) -> str:
entries = []
for name, value in tree_dict.items():
if type(value) is dict:
type_ = 'tree'
object_id = write_tree_recursive(value)
else:
type_ = 'blob'
object_id = value
entries.append((name, object_id, type_))
tree = ''.join(f'{type_} {object_id} {name}\n'
for name, object_id, type_ in sorted(entries))
return data.hash_object(tree.encode(), "tree")
return write_tree_recursive(index_as_tree)
def is_ignored(path: str) -> bool:
return ".ugit" in path.split("/")
def read_tree(tree_object_id: str, update_working: bool = False):
with data.get_index() as index:
index.clear()
index.update(get_tree(tree_object_id))
if update_working:
_checkout_index(index)
def read_tree_merged(tree_base: str, tree_head: str, tree_other: str, update_working: bool = False):
with data.get_index() as index:
index.clear()
index.update(diff.merge_trees(
get_tree(tree_base),
get_tree(tree_head),
get_tree(tree_other)
))
if update_working:
_checkout_index(index)
def _checkout_index(index):
_empty_current_directory()
for path, object_id in index.items():
os.makedirs(os.path.dirname(os.path.join("./", path)), exist_ok=True)
with open(path, "wb") as f:
f.write(data.get_object(object_id, "blob"))
def get_index_tree():
with data.get_index() as index:
return index
def _empty_current_directory():
for root, dirnames, filenames in os.walk(".", topdown=False):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename))
if is_ignored(path) or not os.path.isfile(path):
continue
os.remove(path)
for dirname in dirnames:
path = os.path.relpath(os.path.join(root, dirname))
if is_ignored(path):
continue
try:
os.rmdir(path)
except (FileNotFoundError, OSError):
pass
def get_tree(object_id: str, base_path: str = "") -> Dict[str, str]:
result = {}
for fmt, object_id, name in _iter_tree_entries(object_id):
assert "/" not in name
assert name not in ("..", ".")
path = base_path + name
if fmt == "blob":
result[path] = object_id
elif fmt == "tree":
result.update(get_tree(object_id, f"{path}/"))
else:
assert False, f"Uknown tree entry {fmt}"
return result
def get_object_id(name: str) -> str:
if name == "@":
name = "HEAD"
ref_to_try = [
name,
os.path.join("refs", name),
os.path.join("refs", "tags", name),
os.path.join("refs", "heads", name),
]
for ref in ref_to_try:
if data.get_ref(ref, deref=False).value:
return data.get_ref(ref).value
is_hex = all(c in string.hexdigits for c in name)
if len(name) == 40 and is_hex:
return name
assert False, f"Unkown name {name}"
def _iter_tree_entries(object_id: str) -> Iterator[Tuple[str, str, str]]:
if not object_id:
return
tree = data.get_object(object_id, "tree")
for entry in tree.decode().splitlines():
fmt, object_id, name = entry.split(" ", 2)
yield fmt, object_id, name
def iter_commits_and_parents(object_ids: Deque[str]) -> Iterator[str]:
object_ids = deque(object_ids)
visited = set()
while object_ids:
object_id = object_ids.popleft()
if not object_id or object_id in visited:
continue
visited.add(object_id)
yield object_id
commit = get_commit(object_id)
object_ids.extendleft(commit.parents[:1])
object_ids.extend(commit.parents[1:])
def iter_objects_in_commits(object_ids: List[str]) -> Iterator[str]:
visited = set()
def iter_object_in_tree(object_id: str):
visited.add(object_id)
yield object_id
for type_, object_id, _ in _iter_tree_entries(object_id):
if object_id not in visited:
if type_ == "tree":
yield from iter_object_in_tree(object_id)
else:
visited.add(object_id)
yield object_id
for object_id in iter_commits_and_parents(object_ids):
yield object_id
commit = get_commit(object_id)
if commit.tree not in visited:
yield from iter_object_in_tree(commit.tree)
def get_working_tree() -> Dict[str, str]:
result = {}
for root, _, filenames in os.walk("."):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename))
if is_ignored(path) or not os.path.isfile(path):
continue
with open(path, "rb") as f:
result[path] = data.hash_object(f.read())
return result
def merge(other: str):
head = data.get_ref("HEAD").value
assert head
merge_base = get_merge_base(other, head)
commit_other = get_commit(other)
if merge_base == head:
read_tree(commit_other.tree, update_working=True)
data.update_ref('HEAD', data.RefValue(symbolic=False, value=other))
print('Fast-forward merge, no need to commit')
return
data.update_ref("MERGE_HEAD", data.RefValue(symbolic=False, value=other))
commit_base = get_commit(merge_base)
commit_head = get_commit(head)
read_tree_merged(commit_base.tree, commit_head.tree,
commit_other.tree, update_working=True)
print("Merged in working tree\nPlease commit")
def get_merge_base(object_id: str, second_object_id: str) -> str:
parents = set(iter_commits_and_parents({object_id}))
for oid in iter_commits_and_parents({second_object_id}):
if oid in parents:
return oid
def is_ancestor_of(commit: str, maybe_ancestor: str) -> bool:
return maybe_ancestor in iter_commits_and_parents({commit})
def add(filenames: List[str]):
def add_file(filename: str):
filename = os.path.relpath(filename)
with open(filename, 'rb') as f:
object_id = data.hash_object(f.read())
index[filename] = object_id
def add_directory(dirname: str):
for root, _, filename in os.walk(dirname):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename))
if is_ignored(path) or not os.path.isfile(path):
continue
add_file(path)
with data.get_index() as index:
for name in filenames:
if os.path.isfile(name):
add_file(name)
elif os.path.isdir(name):
add_directory(name)
```
#### File: own-git/ugit/data.py
```python
from contextlib import contextmanager
import hashlib
import json
import os
import shutil
from typing import Iterator, Tuple
import zlib
from collections import namedtuple
GIT_DIR = None
@contextmanager
def change_git_dir(new_dir):
global GIT_DIR
old_dir = GIT_DIR
GIT_DIR = f'{new_dir}/.ugit'
yield
GIT_DIR = old_dir
def init():
os.makedirs(GIT_DIR)
os.makedirs(os.path.join(GIT_DIR, "objects"))
def hash_object(raw_file: bytes, fmt: str = "blob") -> str:
obj = fmt.encode() + b' ' + str(len(raw_file)).encode() + b'\x00' + raw_file
object_id = hashlib.sha1(obj).hexdigest()
path = os.path.join(GIT_DIR, "objects", object_id[0:2], object_id[2:])
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(zlib.compress(obj))
return object_id
def get_object(object_id: str, expected: str = "blob") -> bytes:
with open(os.path.join(GIT_DIR, "objects", object_id[0:2], object_id[2:]), "rb") as f:
obj = zlib.decompress(f.read())
space_index = obj.find(b' ')
fmt = obj[0:space_index].decode("ascii")
null_index = obj.find(b'\x00', space_index)
size = int(obj[space_index:null_index].decode("ascii"))
content = obj[null_index + 1:]
assert size == len(content), f"bad length for object: {object_id}"
if expected is not None:
assert fmt == expected, f"Expected {expected}, got {fmt}"
return content
RefValue = namedtuple("RefValue", ["symbolic", "value"])
def update_ref(ref: str, refValue: RefValue, deref: bool = True):
ref = _get_ref_internal(ref, deref)[0]
assert refValue.value
if refValue.symbolic:
value = f"ref: {refValue.value}"
else:
value = refValue.value
ref_path = os.path.join(GIT_DIR, ref)
os.makedirs(os.path.dirname(ref_path), exist_ok=True)
with open(ref_path, "w") as f:
f.write(value)
def get_ref(ref: str, deref=True) -> RefValue:
return _get_ref_internal(ref, deref)[1]
def _get_ref_internal(ref: str, deref) -> Tuple[str, RefValue]:
ref_path = os.path.join(GIT_DIR, ref)
value = None
if os.path.isfile(ref_path):
with open(ref_path, "r") as f:
value = f.read().strip()
symbolic = bool(value) and value.startswith("ref:")
if symbolic:
value = value.split(":", 1)[1].strip()
if deref:
return _get_ref_internal(value, deref)
return ref, RefValue(symbolic=symbolic, value=value)
def iter_refs(prefix: str = "", deref: bool = True) -> Iterator[Tuple[str, RefValue]]:
refs = ["HEAD", "MERGE_HEAD"]
for root, _, filenames in os.walk(os.path.join(GIT_DIR, "refs")):
root = os.path.relpath(root, GIT_DIR)
refs.extend(os.path.join(root, name) for name in filenames)
for refname in refs:
if not refname.startswith(prefix):
continue
ref = get_ref(refname, deref=deref)
if ref.value:
yield refname, ref
def delete_ref(ref: str, defer=True):
ref = _get_ref_internal(ref, defer)[0]
os.remove(os.path.join(GIT_DIR, ref))
def object_exists(object_id: str) -> bool:
return os.path.isfile(os.path.join(GIT_DIR, "objects", object_id))
def fetch_object_if_missing(object_id: str, remote_git_dir: str):
if object_exists(object_id):
return
remote_git_dir += "/.ugit"
os.makedirs(os.path.join(GIT_DIR, "objects",
object_id[:2]), exist_ok=True)
shutil.copy(os.path.join(remote_git_dir, "objects", object_id[:2], object_id[2:]),
os.path.join(GIT_DIR, "objects", object_id[:2], object_id[2:]))
def push_object(object_id: str, remote_git_dir: str):
remote_git_dir += "/.ugit"
os.makedirs(os.path.join(remote_git_dir, "objects",
object_id[:2]), exist_ok=True)
shutil.copy(os.path.join(GIT_DIR, "objects", object_id[:2], object_id[2:]),
os.path.join(remote_git_dir, "objects", object_id[:2], object_id[2:]))
@contextmanager
def get_index():
index = {}
if os.path.isfile(os.path.join(GIT_DIR, "index")):
with open(os.path.join(GIT_DIR, "index")) as f:
index = json.load(f)
yield index
with open(os.path.join(GIT_DIR, "index"), "w") as f:
json.dump(index, f)
``` |
{
"source": "Jonatan966/SODA",
"score": 3
} |
#### File: Modulos/Criptografia/Scrt_Fence.py
```python
import sys
sys.path.append('Nucleo/')
from Nucleo.smain_Variaveis import *
class sMain_Fence:
def __railfence_id(self, tam, key):
''' (Railfence, int, int) -> list of int
Retorna um lista de inteiros com a posicao da
linha que o caracter do texto ira ocupar,
variando de 0 ate key - 1.
'''
j = 0
inc = 0
idx = []
for i in range(tam):
if j == key - 1:
inc = -1
elif j == 0:
inc = 1
idx.append(j)
j += inc
return idx
def encrypt(self, texto, key):
''' (Railfence, str, int) -> str
Retorna o texto plano cifrado na cifra rail
fence com a chave key.
'''
texto = texto.replace(' ', '')
tam = len(texto)
idx = self.__railfence_id(tam, key)
cifrado = ''
for i in range(key):
for z in range(tam):
if idx[z] == i:
cifrado += texto[z]
return cifrado.upper()
def decrypt(self, texto, key):
''' (Railfence, str, int) -> str
Retorna o texto plano para um texto cifrado
com a cifra rail fence com a chave key.
'''
texto = texto.replace(' ', '')
tam = len(texto)
idx = self.__railfence_id(tam, key)
idx_sorted = sorted(idx)
texto_plano = ''
for i in range(tam):
for j in range(tam):
if idx[i] == idx_sorted[j] and idx[i] > -1:
texto_plano += texto[j]
idx[i] = -1
idx_sorted[j] = -1
return texto_plano.lower()
def crt_FENCE():
sMain_Avisos().Welcome('FENCE')
while True:
option = sMain_Saidas().Prompt('CRIPTOGRAFIA', 'FENCE')
if option == 'CIFRAR' or option == '0':
txt = sMain_Avisos().Resposta('Digite o texto a ser cifrado', 'STRING')
senha = sMain_Avisos().Resposta(f'Digite a senha (SOMENTE NÚMEROS) (Tamanho da senha: {len(txt)})', 'INT')
if senha >= len(txt) or senha <= 1:
sMain_Avisos().Resposta("ERRO: A senha não pode ser, maior ou igual ao tamanho do texto, igual ou menor que 1")
else:
sMain_Avisos().Resposta(sMain_Fence().encrypt(txt, senha))
elif option == 'DECIFRAR' or option == '1':
txt = sMain_Avisos().Resposta('Digite o texto a ser decifrado', 'STRING')
senha = sMain_Avisos().Resposta('Difite a senha (SOMENTE NÚMEROS)', 'INT')
if senha >= len(txt) or senha <= 1:
sMain_Avisos().Resposta("ERRO: A senha não pode ser, maior ou igual ao tamanho do texto, igual ou menor que 1")
else:
sMain_Avisos().Resposta(sMain_Fence().decrypt(txt, senha))
elif option == 'SAIR' or option == 'QUIT' or option == 'VOLTAR':
break
```
#### File: Modulos/Matematica/Smat_Trigonometria.py
```python
import sys
from Nucleo import *
import math
def mat_TRIGONOMETRIA():
sMain_Avisos().Welcome('TRIGONOMETRIA')
while True:
try:
option = sMain_Saidas().Prompt('MATEMATICA', 'TRIGONOMETRIA')
if option == 'PROGRESSAO ARITMETICA' or option == 'P.A' or option == '0':
n1 = sMain_Avisos().Resposta('Digite o primeiro termo de sua P.A', 'FLOAT')
n2 = sMain_Avisos().Resposta('Digite o segundo termo de sua P.A','FLOAT')
n3 = sMain_Avisos().Resposta('Digite o termo a ser descoberto', 'FLOAT')
sMain_Avisos().Resposta(f'O {n3}º termo de sua PA é:\n\nAn = {n1}+({n3}-1)*{n2-n1} = {n1 + (n3 - 1) * (n2 - n1)}')
elif option == 'PROGRESSAO GEOMETRICA' or option == 'P.G' or option == '1':
n1 = sMain_Avisos().Resposta('Digite o termo a ser descoberto', 'FLOAT')
sMain_Avisos().Resposta(f'O {n1}º termo da PG é:\n({n1}²+{n1})/2 = {(n1**2 + n1)/2}')
elif option == 'RAZAO' or option == '2':
n1 = sMain_Avisos().Resposta('Digite o primeiro termo de sua sequencia', 'FLOAT')
n2 = sMain_Avisos().Resposta('Digite o segundo termo de sua sequencia', 'FLOAT')
sMain_Avisos().Resposta(f'A razao de sua sequencia é:\n{n2}-{n1} = {n2-n1}')
elif option == 'SOMA DOS TERMOS DE UMA PA' or option == 'S.N' or option == '3':
n1 = sMain_Avisos().Resposta('Digite o numero de termos de sua P.A', 'FLOAT')
n2 = sMain_Avisos().Resposta('Digite a razao de sua P.A', 'FLOAT')
n3 = sMain_Avisos().Resposta('Digite o primeiro termo de sua P.A', 'FLOAT')
an = n3 + (n1 - 1)*n2
sMain_Avisos().Resposta(f'O ultimo termo desta P.A equivale a:\n\nAn = {n3} + ({n1} - 1)*{n2}\nAn = {n3 + (n1 - 1)*n2}')
sMain_Avisos().Resposta(f'A soma dos termos desta P.A equivale a:\n\nSn = ({n1}({n2} + {an}))/2 = {(n1*(n2 + an))/2}')
elif option == 'NUMEROS TRIANGULARES' or option == 'N.T' or option == '4':
n1 = sMain_Avisos().Resposta('Digite o termo desejado (n)', 'INT')
sMain_Avisos().Resposta(f'O {n1}º termo é: {(n1 * (n1 + 1)) / 2}')
elif option == 'DELTA' or option == '5':
sMain_Avisos().Resposta('OBS: Caso a formula não tenha um dos termos , digite 0')
n1 = sMain_Avisos().Resposta('Digite o termo (a)', 'FLOAT')
n2 = sMain_Avisos().Resposta('Digite o termo (b)', 'FLOAT')
n3 = sMain_Avisos().Resposta('Digite o termo (c)', 'FLOAT')
r = n2 ** 2 - 4 * n1 * n3
if r <= 0:
sMain_Avisos().Resposta('Delta igual ou menor que 0')
else:
sMain_Avisos().Resposta(f'Delta é igual a: {n2}²-4*{n1}*{n3} = {r}')
r = math.sqrt(r)
r2 = (-(n2) + r) / 2 * (n1)
r3 = (-(n2) - r) / 2 * (n1)
sMain_Avisos().Resposta(f'+X é igual a: (-{n2}+{r})/2*{n1} = {r2}')
sMain_Avisos().Resposta(f'-X é igual a: (-{n2}-{r})/2*{n1} = {r3}')
elif option == 'HIPOTENUSA' or option == '6':
n1 = sMain_Avisos().Resposta('Digite o valor de um cateto', 'FLOAT')
n2 = sMain_Avisos().Resposta('Digite o valor de outro cateto', 'FLOAT')
sMain_Avisos().Resposta(f'A hipotenusa é igual a:\nH² = {n1}² + {n2}² = {math.sqrt(n1**2 + n2**2)}²\nH = {n1**2} + {n2**2} = {n1**2 + n2**2}')
elif option == 'CATETO' or option == '7':
n1 = sMain_Avisos().Resposta('Digite o valor do cateto', 'FLOAT')
n2 = sMain_Avisos().Resposta('Digite o valor da HIPOTENUSA', 'FLOAT')
sMain_Avisos().Resposta(f'C² + {n1}² = {n2}²\nC² = {n2**2} - {n1**2}\nC² = {n2**2 - n1**2}\nC = {math.sqrt(n2**2 - n1**2)}')
elif option == 'VOLTAR' or option == 'SAIR' or option == 'QUIT':
break
elif option == '':
pass
else:
sMain_Avisos().nFound(option)
except ValueError:
sMain_Avisos().Erro(0)
```
#### File: SODA/Nucleo/smain_Quimica.py
```python
qui_version = 'MODULO QUIMICA v0.0.1'
import sys
sys.path.append('Nucleo/')
sys.path.append('Modulos/Quimica/')
from Nucleo.smain_Handler import *
def soda_QUIMICA():
sMain_Saidas().Welcome("QUIMICA")
while True:
q = sMain_Saidas().Prompt("QUIMICA")
handler = sMain_Handlers().quiHandler(q)
if handler != None:
handler()
elif q == 'SAIR' or q == 'QUIT' or q == 'VOLTAR':
break
elif q == '':
pass
else:
sMain_Avisos().nFound(q, qui_version)
```
#### File: Jonatan966/SODA/Smain.py
```python
from Nucleo import *
print(f'{soda_version}\nDigite INFO para mais informações.\n')
def getHandler(name):
name = name
return globals().get(f"soda_{name}")
def Main():
while True:
resp = sMain_Saidas().Prompt()
handler = getHandler(resp)
if handler != None :
handler()
elif resp == 'SAIR' or resp == 'QUIT':
q = input('Deseja mesmo sair? <S/N> ').upper().strip()
if q == 'Y' or q == 'YES' or q == 'SIM' or q == 'S':
sys.exit()
elif q == 'N' or q == 'NAO' or q == 'NO':
continue
elif resp == '':
pass
else:
sMain_Avisos().nFound(resp)
Main()
``` |
{
"source": "JonatanAntoni/Open-CMSIS-Pack-Spec",
"score": 2
} |
#### File: Open-CMSIS-Pack-Spec/test/pack_schema_version_history.py
```python
import re
import subprocess
import sys
from pathlib import Path
from dateutil.parser import parse
DIRNAME = Path(__file__).parent.absolute()
PATTERN = "::error file={},line={}::{}"
rc = 0
def log_error(file, line, message):
global rc
print(PATTERN.format(file.relative_to(DIRNAME.joinpath("..").resolve()), line, message))
rc = 1
def get_from_cmd(cmd, cwd=Path.cwd()):
print("./"+cwd.relative_to(Path.cwd()).as_posix(), "$", " ".join(cmd))
result = subprocess.run(cmd, capture_output=True, cwd=cwd)
stdout = result.stdout.decode("utf-8").strip()
stderr = result.stderr.decode("utf-8").strip()
if stdout:
print(">", stdout)
if stderr:
print(">", stderr)
print()
return stdout
def main():
print("Checking PACK.xsd version information...\n")
schema_file = DIRNAME.joinpath('../schema/PACK.xsd').resolve()
dxy_file = DIRNAME.joinpath('../doxygen/pack.dxy').resolve()
doc_file = DIRNAME.joinpath('../doxygen/src/General.txt').resolve()
date_pattern = re.compile('\\$Date:\\s+(\\d+\\. \\w+ \\d{4})')
rev_pattern = re.compile('\\$Revision:\\s+(\\d+\\.\\d+\\.\\d+)')
version_pattern = re.compile('SchemaVersion=(\\d+\\.\\d+\\.\\d+)')
history_pattern = re.compile('(\\d+\\. \\w+ \\d{4}): v(\\d+\\.\\d+\\.\\d+)')
property_pattern = re.compile('^\\s*<xs:schema.*version="([^"]+)".*>\\s*$')
date = (None, 0)
revision = (None, 0)
version = (None, 0)
history = (None, 0)
xsproperty = (None, 0)
with(open(schema_file)) as schema:
for i, line in enumerate(schema):
date = (date_pattern.search(line), i) if not date[0] else date
revision = (rev_pattern.search(line), i) if not revision[0] else revision
version = (version_pattern.search(line), i) if not version[0] else version
history = (history_pattern.search(line), i) if not history[0] else history
xsproperty = (property_pattern.search(line), i) if not xsproperty[0] else xsproperty
date = (parse(date[0][1]), date[1] + 1) if date[0] else None
revision = (revision[0][1], revision[1] + 1) if revision[0] else None
version = (version[0][1], version[1] + 1) if version[0] else None
history = (parse(history[0][1]), history[0][2], history[1] + 1) if history[0] else None
xsproperty = (xsproperty[0][1], xsproperty[1] + 1) if xsproperty[0] else None
author_date = parse(get_from_cmd(['git', 'log', '-1', '--pretty=%ad', '--date=format:%Y-%m-%d', schema_file.name],
cwd=schema_file.parent))
base_rev = get_from_cmd(['git', 'log', '-1', '--pretty=%P', schema_file.name], cwd=schema_file.parent)
head_rev = get_from_cmd(["git", "log", "-1", '--pretty=%H', schema_file.name], cwd=schema_file.parent)
blame = get_from_cmd(["git", "blame", f"{base_rev}..{head_rev}", "-l", "-L", f"{revision[1]},{revision[1]}",
schema_file.name], cwd=schema_file.parent)
blamed_rev = blame.split(' ')[0]
dxy_version_pattern = re.compile('PROJECT_NUMBER\s*=\s*"Version (\\d+\\.\\d+\\.\\d+)"')
dxy_version = (None, 0)
with(open(dxy_file)) as dxy:
for i, line in enumerate(dxy):
dxy_version = (dxy_version_pattern.search(line), i) if not dxy_version[0] else dxy_version
dxy_version = (dxy_version[0][1], dxy_version[1] + 1) if dxy_version[0] else None
history_version_pattern = re.compile('<td>(\\d+\\.\\d+\\.\\d+)</td>')
history_version = dict()
with(open(doc_file)) as doc:
accept = False
for i, line in enumerate(doc):
if line == '<table class="cmtable" summary="Revision History">\n':
accept = True
if accept:
if line == '</table>':
break
v = history_version_pattern.search(line)
if v:
history_version[i + 1] = v[1]
if not date:
log_error(schema_file, 0, "Modification date tag '$Date:' missing!")
elif date[0] != author_date:
log_error(schema_file, date[1], "Modification date tag not updated, "
f"should be {author_date.strftime('%d. %b %Y')}")
if not revision:
log_error(schema_file, 0, "Latest version tag '$Revision:' missing!")
elif head_rev != blamed_rev:
log_error(schema_file, revision[1], f"Revision tag not updated, should be incremented!")
else:
if not version:
log_error(schema_file, 0, "Schema version tag 'SchemaVersion' missing!")
elif version[0] != revision[0]:
log_error(schema_file, version[1], f"Schema version tag not updated, should be {revision[0]}")
if not history:
log_error(schema_file, 0, "Change history missing!")
elif history[1] != revision[0] or history[0] != author_date:
log_error(schema_file, history[2], "Change history not updated, should contain "
f"{author_date.strftime('%d. %B %Y')}: v{revision[0]}")
if not xsproperty:
log_error(schema_file, 0, "xs:schema property 'version' missing!")
elif xsproperty[0] != revision[0]:
log_error(schema_file, xsproperty[1], f"xs:schema property 'version' not updated, should be {revision[0]}")
if not dxy_version:
log_error(dxy_version, 0, "PROJECT_VERSION missing!")
elif dxy_version[0] != revision[0]:
log_error(dxy_file, dxy_version[1], f"PROJECT_VERSION not updated, should be {revision[0]}")
if revision[0] not in history_version.values():
line = sorted(history_version.keys())[0]
log_error(doc_file, line, "Revision History not updated, should contain "
f"table row for version {revision[0]}")
if __name__ == "__main__":
if 'schema/PACK.xsd' in sys.argv \
or 'test/pack_schema_version_history.py' in sys.argv:
main()
sys.exit(rc)
``` |
{
"source": "jonatanbarkan/CausalInference",
"score": 2
} |
#### File: jonatanbarkan/CausalInference/us_data_ncc_run_script.py
```python
import argparse
import json
import os
from datetime import datetime
from itertools import product
import jsbeautifier
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from CausalDiscuveryToolboxClone.Models.NCC import NCC
from utils.data_loader import load_split_data
from utils.visualization import make_separate_plots
def save_json(output_path, model_name, **kwargs):
output_path = os.path.join(output_path, model_name + '.json')
with open(output_path, "w", encoding="utf8") as f:
opts = jsbeautifier.default_options()
opts.indent_size = 4
f.write(jsbeautifier.beautify(json.dumps(kwargs), opts))
def get_network(filename='', freeze_encoder=False, num_effect=1, **kwargs):
assert num_effect in [1, 2]
obj = NCC()
if filename: # transfer learning
obj.load_model(os.path.join(os.getcwd(), 'Models'), file_path=filename + '.pth', **kwargs)
else:
obj.get_model(**kwargs)
if freeze_encoder:
obj.freeze_weights('encoder')
if num_effect == 2:
obj.anti = False
return obj
def train(obj, train_data, train_labels, validation_data, validation_labels, epochs=10, learning_rate=1e-4,
optimizer='rms', **kwargs):
obj.create_loss(learning_rate, optimizer=optimizer)
logs = obj.train(train_data, train_labels, validation_data, validation_labels, epochs=epochs, batch_size=16, )
return logs
def split_data(dat, lab, train_size=0.8):
return train_test_split(dat, lab, train_size=train_size)
def create_df(pair_1, pair_2, labels=()):
pair_1_x, pair_1_y = np.hstack(pair_1)
pair_2_x, pair_2_y = np.hstack(pair_2)
aggregated_pair_1 = np.hstack([pair_1_x.reshape(-1, 1), pair_1_y.reshape(-1, 1)])
aggregated_pair_2 = np.hstack([pair_2_x.reshape(-1, 1), pair_2_y.reshape(-1, 1)])
aggregated = np.vstack([aggregated_pair_1, aggregated_pair_2])
df = pd.DataFrame(aggregated, columns=['x', 'y'])
df['kind'] = [labels[0]] * aggregated_pair_1.shape[0] + [labels[1]] * aggregated_pair_2.shape[0]
return df
def save_model(obj, folder_path: str, name: str):
obj.save_model(folder_path, file_path=f'{name}.pth')
def run_model(FLAGS, **kwargs):
# create save path
plots_path = os.path.join(os.getcwd(), "Results", FLAGS.save_model_name)
model_path = os.path.join(os.getcwd(), "Models")
jsons_path = os.path.join(os.getcwd(), "Jsons")
split_data_path = os.path.join(os.getcwd(), "SplitData")
os.makedirs(plots_path, exist_ok=True)
os.makedirs(model_path, exist_ok=True)
os.makedirs(jsons_path, exist_ok=True)
os.makedirs(split_data_path, exist_ok=True)
name = '_'.join([FLAGS.data_file_1, FLAGS.data_file_2])
X_tr, y_tr = load_split_data(split_data_path, name + '_train')
X_val, y_val = load_split_data(split_data_path, name + '_val')
# load/initialize network
network = get_network(FLAGS.loaded_model_name, FLAGS.freeze_encoder, FLAGS.num_effects,
n_hiddens=FLAGS.n_hiddens,
kernel_size=3,
dropout_rate=FLAGS.dropout_rate,
additional_num_hidden_layers=FLAGS.additional_num_hidden_layers)
# train network
network.create_loss(learning_rate=FLAGS.learning_rate, optimizer=FLAGS.optimizer)
try:
network.train(X_tr, y_tr, X_val, y_val, epochs=FLAGS.epochs, batch_size=FLAGS.batch_size, )
except KeyboardInterrupt:
pass
logged_values = network.get_log_dict()
make_separate_plots(logged_values, plot_path=plots_path, model_name=FLAGS.save_model_name)
save_model(network, folder_path=model_path, name=FLAGS.save_model_name)
save_json(jsons_path, FLAGS.save_model_name, **vars(FLAGS))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-num_effects', default=1, choices={1, 2})
parser.add_argument('-data_file_1', default='medium_2_causal')
parser.add_argument('-data_file_2', default='')
# parser.add_argument('-data_file_1', default='medium_3_causal')
# parser.add_argument('-data_file_2', default='medium_3_confounded')
parser.add_argument('-save_model_name', default='large_experiment_1_model')
parser.add_argument('-epochs', default=3, type=int)
parser.add_argument('-loaded_model_name', default='')
parser.add_argument('-freeze_encoder', default=0, choices={0, 1})
parser.add_argument('-learning_rate', default=1e-4)
parser.add_argument('-optimizer', default='rms', choices={'rms', 'adam', 'momentum'})
parser.add_argument('-batch_size', default=16, choices={8, 16, 32, 64})
parser.add_argument('-n_hiddens', default=100, choices={50, 100, 500})
parser.add_argument('-dropout_rate', default=0., choices={0., 0.1, 0.25, 0.3})
parser.add_argument('-additional_num_hidden_layers', default=0, choices={0, 1})
arguments = parser.parse_args()
if arguments.num_effects == 1:
arguments.data_file_1 = 'large_3_causal'
arguments.data_file_2 = ''
elif arguments.num_effects == 2:
arguments.data_file_1 = 'large_4_causal'
arguments.data_file_2 = 'large_4_confounded'
run_grid = False
save_model_name = arguments.save_model_name
if run_grid:
learning_rates = [0.01]
optimizers = ['rms', 'momentum']
additional_num_hidden_layers = [0, 1]
dropout_rates = [0.0, 0.1, 0.25, 0.3]
num_hiddens = [50, 100, 500]
else:
learning_rates = [1e-2]
optimizers = ['rms', 'momentum']
additional_num_hidden_layers = [0]
dropout_rates = [0.0]
num_hiddens = [50, 100, 500]
grid = product(num_hiddens, learning_rates, optimizers, additional_num_hidden_layers, dropout_rates)
print('lr, opt, add_layers, p, n_hidd:')
for n_hidd, lr, opt, add_layers, p in grid:
print(lr, opt, add_layers, p, n_hidd)
arguments.learning_rate = lr
arguments.optimizer = opt
arguments.additional_num_hidden_layers = add_layers
arguments.dropout_rate = p
arguments.n_hiddens = n_hidd
arguments.save_model_name = save_model_name + datetime.now().strftime('_%y%m%d_%H%M%S')
run_model(arguments)
```
#### File: CausalInference/utils/confounded_pair_generation_ncc.py
```python
from CausalDiscuveryToolboxClone.Models.NCC import NCC
import networkx as nx
import matplotlib.pyplot as plt
from cdt.data import load_dataset
from sklearn.model_selection import train_test_split
from CausalDiscuveryToolboxClone.DataGeneration import functions
import scipy
from scipy.interpolate import PchipInterpolator, CubicHermiteSpline, UnivariateSpline
import numpy as np
from scipy.special import expit
import os
# data, labels = load_dataset('tuebingen')
# data, labels = functions.swap_cause_effect(data, labels)
def draw_mixture_weights(k_i):
mw = np.abs(np.random.standard_normal(k_i))
return mw / np.sum(mw)
def draw_mechanism():
pass
def draw_cause(k_i, r_i, s_i, m):
w = draw_mixture_weights(k_i)
mu = np.random.normal(0., r_i)
sd = np.abs(np.random.normal(0., s_i))
x = np.dot(w, np.random.normal(loc=mu, scale=sd, size=(k_i, m)))
return (x - x.mean()) / x.std()
def reduce_support(f, support):
def supported(*args):
x = args[0]
y = f(x)
cond = (x > support[1]) | (x < support[0])
y[cond] = 0
return y
return supported
def create_mechanism(a_knots, b_knots, support):
f = PchipInterpolator(a_knots, b_knots)
return reduce_support(f, support)
def create_noise_mechanism(a_knots, b_knots, support):
f = UnivariateSpline(a_knots, b_knots)
return reduce_support(f, support)
def generate_noiseless_effect(f, cause):
effect = f(cause)
effect = (effect - effect.mean()) / effect.std()
return effect
def generate_effect(cause_knots, effect_knots):
x_i_knots = np.linspace(*support_i, d[i])
y_i_knots = np.random.normal(0., 1., d[i])
f_i = create_mechanism(x_i_knots, y_i_knots, support_i)
if __name__ == '__main__':
save = True
folder_path = os.path.dirname(os.getcwd())
name = 'non_causal_xy_temp'
n = 20
# m = 30
m = np.random.randint(100, 1500, n)
r = 5 * np.random.random(n)
s = 5 * np.random.random(n)
k = np.random.randint(1, 6, n)
d = np.random.randint(4, 6, n)
v = 5 * np.random.random(n)
S = []
L = []
for i in range(n):
# m_i = m[i]
m_i = m[0]
x_i = draw_cause(k[i], r[i], s[i], m_i)
sd_i = x_i.std()
support_i = [x_i.min() - sd_i, x_i.max() + sd_i]
x_i_knots = np.linspace(*support_i, d[i])
y_i_knots = np.random.normal(0., 1., d[i])
f_i = create_mechanism(x_i_knots, y_i_knots, support_i)
y_i = generate_noiseless_effect(f_i, x_i)
e_i = np.random.normal(0., v[i], m_i)
v_x_knots = np.linspace(*support_i, d[i])
v_y_knots = np.random.uniform(0, 5, d[i])
v_spline = create_mechanism(x_i_knots, v_y_knots, support_i)
v_i = v_spline(x_i)
noise_i = e_i * v_i
y_noisy = y_i + noise_i
y_noisy = (y_noisy - y_noisy.mean()) / y_noisy.std()
# print(np.abs(y_noisy - y_i))
S.append([x_i, y_noisy])
L.append(0)
S = np.array(S)
L = np.array(L)
if save:
data_folder_path = os.path.join(folder_path, 'Data')
os.makedirs(data_folder_path, exist_ok=True)
np.savez_compressed(os.path.join(data_folder_path, name), data=S, labels=L)
```
#### File: CausalInference/utils/visualization.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import os
sns.set_style("whitegrid")
def make_separate_plots(logged_values, plot_path: str, model_name: str):
file_path = os.path.join(plot_path, f'{model_name}_accuracy')
fig, ax = plt.subplots(dpi=300)
plot_titles = list(logged_values.keys())
dict_hierarchy = {(plot_title, plot_type): values for plot_title, train_val_dict in logged_values.items() for
plot_type, values in train_val_dict.items()}
df_plot = pd.DataFrame(dict_hierarchy)
# df_plot.set_index(pd.Index(range(1, df_plot.shape[0] + 1)), inplace=True)
for i, plot_title in enumerate(plot_titles):
sns.lineplot(data=df_plot[plot_title], ax=ax)
ax.set_title(plot_title)
ax.set_ylabel('' if plot_title.lower() in ['symmetry'] else r'1-acc')
ax.set_ylim(-0.01, 1.01)
ax.set_xlabel('Epochs')
# plt.tight_layout()
plt.savefig(file_path + f'_{plot_title}_accuracy.png')
ax.cla()
df_plot.to_csv(os.path.join(plot_path, f'{model_name}_accuracy.csv'))
def make_plots(logged_values, plot_path: str, model_name: str):
file_path = os.path.join(plot_path, f'{model_name}_accuracy.png')
fig, ax = plt.subplots(4, sharex=True, dpi=300)
ax = ax.flat
plot_titles = list(logged_values.keys())
dict_hierarchy = {(plot_title, plot_type): values for plot_title, train_val_dict in logged_values.items() for
plot_type, values in train_val_dict.items()}
df_plot = pd.DataFrame(dict_hierarchy)
# df_plot.set_index(pd.Index(range(1, df_plot.shape[0] + 1)), inplace=True)
for i, plot_title in enumerate(plot_titles):
sns.lineplot(data=df_plot[plot_title])
# ax[i].set_title(plot_title)
ax[i].set_title(plot_title)
ax[i].set_ylabel('' if plot_title.lower() in ['symmetry'] else r'1-acc')
ax[i].set_ylim(-0.05, 1.05)
ax[-1].set_xlabel('Epochs')
plt.tight_layout()
plt.savefig(file_path)
plt.close(fig)
df_plot.to_csv(os.path.join(plot_path, f'{model_name}_accuracy.csv'))
``` |
{
"source": "jonatanbarkan/vision_loop",
"score": 3
} |
#### File: jonatanbarkan/vision_loop/functions.py
```python
from __future__ import division
import cv2
import numpy as np
import tensorflow as tf
def downgrade(image, target_res_width):
target_res_width = int(target_res_width)
r = target_res_width / image.shape[1]
dim = (target_res_width, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
return resized
def rotate_image_180(image):
# grab the dimensions of the image and calculate the center
# of the image
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
# rotate the image by 180 degrees
M = cv2.getRotationMatrix2D(center, 180, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def vis_conv(v,ix,iy,ch,cy,cx, p = 0) :
v = np.reshape(v,(iy,ix,ch))
ix += 2
iy += 2
npad = ((1,1), (1,1), (0,0))
v = np.pad(v, pad_width=npad, mode='constant', constant_values=p)
v = np.reshape(v,(iy,ix,cy,cx))
v = np.transpose(v,(2,0,3,1)) #cy,iy,cx,ix
v = np.reshape(v,(cy*iy,cx*ix))
return v
def regularize(frame):
frame = np.divide(frame, [255])
return frame
def visualize(W):
# scale weights to [0 255] and convert to uint8 (maybe change scaling?)
x_min = tf.reduce_min(W)
x_max = tf.reduce_max(W)
weights_0_to_1 = tf.div(tf.sub(W, x_min), tf.sub(x_max, x_min))
weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)
# to tf.image_summary format [batch_size, height, width, channels]
weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])
return weights_transposed
def weight_variable(shape):
# initial = tf.zeros(shape, dtype=tf.float32)
# initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
# initial = tf.zeros(shape, dtype=tf.float32)
initial = tf.div(tf.ones(shape, dtype=tf.float32), shape[0]*shape[1])
return tf.Variable(initial, trainable=True)
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def forward_conv2d(x, W, b, stride_size=1, act = tf.nn.relu, name=''):
with tf.name_scope('convolution'):
conv = tf.nn.conv2d(x, W, strides=[1, stride_size, stride_size, 1], padding='SAME', name=name+'convolution')
variable_summaries(conv, 'convolution')
with tf.name_scope('preactivate'):
preactivate = conv + b
variable_summaries(preactivate, 'preactivate')
with tf.name_scope('activate'):
activate = tf.minimum(tf.maximum(conv, 0),1, name=name+'activation')
return activate
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope(name + '_' + 'summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary(name + '_' + 'mean/' + name, mean)
with tf.name_scope(name + '_' + 'stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(var, mean))))
tf.scalar_summary(name + '_' + 'sttdev/' + name, stddev)
tf.scalar_summary(name + '_' + 'max/' + name, tf.reduce_max(var))
tf.scalar_summary(name + '_' + 'min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
``` |
{
"source": "jonatanblue/dinghy",
"score": 2
} |
#### File: jonatanblue/dinghy/test_keeper.py
```python
import logging
import unittest
import os
import glob
import shutil
import tarfile
import keeper
from keeper import Keeper
# Enable verbose logs for tests
logger = logging.getLogger()
logger.level = logging.DEBUG
# Get current directory
BASE_DIRECTORY = os.getcwd()
# Override rundeck service check for unittests
def rundeck_is_running(arg):
return False
Keeper._rundeck_is_running = rundeck_is_running
class MockedKeeper(Keeper):
def __init__(self, *args):
pass
class TestKeeper(unittest.TestCase):
"""Tests for `keeper.py`"""
def setUp(self):
"""Set up defaults for all tests"""
self.maxDiff = None
def _create_dir(self, path):
"""Creates directory"""
if not os.path.exists(path):
os.makedirs(path)
def _purge_directory(self, path):
"""Purges a directory and all its subdirectories
WARNING: This will recursively delete the directory and all
subdirectories forever.
"""
shutil.rmtree(path)
def _list_files_in_tar(self, path):
"""Returns list of all file paths inside a tar file"""
with tarfile.open(path, 'r:gz') as archive:
return archive.getnames()
def test_instantiating(self):
"""Test that Keeper class can be instantiated"""
directories = [
"/var/lib/rundeck/data", # database
"/var/lib/rundeck/logs", # execution logs (by far biggest)
"/var/lib/rundeck/.ssh", # ssh keys
"/var/lib/rundeck/var/storage", # key storage files and metadata
"/var/rundeck/projects" # project definitions
]
Keeper(system_directories=directories)
def test_has_overlap(self):
"""Test that overlap check works"""
overlapping_dirs = [
"/tmp/a/b",
"/tmp/a"
]
keeper = MockedKeeper()
self.assertTrue(keeper._has_duplicate_or_overlap(overlapping_dirs))
def test_has_overlap_reverse(self):
"""Test that overlap check works"""
overlapping_dirs = [
"/tmp/a",
"/tmp/a/b"
]
keeper = MockedKeeper()
self.assertTrue(keeper._has_duplicate_or_overlap(overlapping_dirs))
def test_has_duplicate(self):
"""Test that duplicate check works"""
duplicate_dirs = [
"/tmp/a/b",
"/tmp/a/b"
]
keeper = MockedKeeper()
self.assertTrue(keeper._has_duplicate_or_overlap(duplicate_dirs))
def test_valid_path_list(self):
"""Test that a valid path list is valid according to check"""
valid_dirs = [
"/tmp/a/b/c",
"/tmp/a/b/d",
"/tmp/q",
"/var/troll"
]
keeper = MockedKeeper()
self.assertFalse(keeper._has_duplicate_or_overlap(valid_dirs))
def test_raises_exception_on_relative_paths(self):
"""Test that relative paths raise an exception"""
contains_relative_paths = [
"some/path/here",
"some/other/path",
"/this/is/valid/though"
]
with self.assertRaises(Exception):
Keeper(system_directories=contains_relative_paths)
def test_raises_exception_on_overlapping_dirs(self):
"""Test that exception is raised for overlapping dirs
Passing overlapping directories should raise an exception.
For example /tmp/a/b/c,/tmp/a/b should fail
"""
# Set bad directories
bad_directories = [
"/tmp/keeper_python_unittest_raises/a/b/c",
"/tmp/keeper_python_unittest_raises/a/b"
]
# Set sails
with self.assertRaises(Exception):
Keeper(system_directories=bad_directories)
def test_raises_exception_on_overlapping_dirs_reversed(self):
"""Test that exception is raised for overlapping dirs.
For example /tmp/a/b,/tmp/a/b/c should fail
"""
# Set bad directories
bad_directories = [
"/tmp/keeper_python_unittest_raises/a/b",
"/tmp/keeper_python_unittest_raises/a/b/c"
]
# Set sails
with self.assertRaises(Exception):
Keeper(system_directories=bad_directories)
def test_backup(self):
"""Test creating a backup file from a set of directories"""
cwd = os.getcwd()
# Set paths
file_paths = [
cwd + "/tmp/keeper_test_backup/house/room/file1.txt",
cwd + "/tmp/keeper_test_backup/house/room/desk/file2.txt",
cwd + "/tmp/keeper_test_backup/house/room/desk/file3.txt",
cwd + "/tmp/keeper_test_backup/house/room/desk/drawer/file4",
cwd + "/tmp/keeper_test_backup/house/room/locker/file5.txt"
]
folder_paths_to_create = [
cwd + "/tmp/keeper_test_backup/house/room/desk/drawer",
cwd + "/tmp/keeper_test_backup/house/room/locker"
]
directories_to_backup = [
cwd + "/tmp/keeper_test_backup/house/room/desk/drawer",
cwd + "/tmp/keeper_test_backup/house/room/locker"
]
files_expected_in_tar = [
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/desk/drawer"
),
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/desk/drawer/file4"
),
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/locker"
),
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/locker/file5.txt"
)
]
keeper = Keeper(system_directories=directories_to_backup)
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup test
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
keeper.backup(
destination_path=cwd + "/tmp/keeper_test_backup",
filename="backup_test.tar.gz"
)
# Get list of all file paths inside tar file
files_in_tar = self._list_files_in_tar(
cwd + "/tmp/keeper_test_backup/backup_test.tar.gz")
# tar file can't be empty
self.assertNotEqual(len(files_in_tar), 0)
# Normpath the paths
# NOTE: I don't know why this is necessary
files_expected_in_tar = [
os.path.normpath(p) for p in files_expected_in_tar
]
files_in_tar = [
os.path.normpath(p) for p in files_in_tar
]
# Compare tar file and list of files
self.assertEqual(set(files_expected_in_tar), set(files_in_tar))
# Recursively remove all directories and files used in test
self._purge_directory(cwd + "/tmp/keeper_test_backup")
def test_backup_skips_missing_dir(self):
"""Test that missing directory is skipped"""
cwd = os.getcwd()
# Set paths
file_paths = [
cwd + "/tmp/keeper_test_backup/house/room/file1.txt",
cwd + "/tmp/keeper_test_backup/house/room/desk/file2.txt",
cwd + "/tmp/keeper_test_backup/house/room/desk/file3.txt",
cwd + "/tmp/keeper_test_backup/house/room/desk/drawer/file4",
cwd + "/tmp/keeper_test_backup/house/room/locker/file5.txt"
]
folder_paths_to_create = [
cwd + "/tmp/keeper_test_backup/house/room/desk/drawer",
cwd + "/tmp/keeper_test_backup/house/room/locker"
]
directories_to_backup = [
cwd + "/tmp/keeper_test_backup/house/room/desk/drawer",
cwd + "/tmp/keeper_test_backup/house/room/locker",
cwd + "/tmp/keeper_test_backup/ghosthouse" # this does not exist
]
files_expected_in_tar = [
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/desk/drawer"
),
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/desk/drawer/file4"
),
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/locker"
),
os.path.join(
cwd.strip("/"),
"tmp/keeper_test_backup/house/room/locker/file5.txt"
)
]
keeper = Keeper(system_directories=directories_to_backup)
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup test
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
keeper.backup(
destination_path=cwd + "/tmp/keeper_test_backup",
filename="backup_test.tar.gz"
)
# Get list of all file paths inside tar file
files_in_tar = self._list_files_in_tar(
cwd + "/tmp/keeper_test_backup/backup_test.tar.gz")
# tar file can't be empty
self.assertNotEqual(len(files_in_tar), 0)
# Normpath the paths
# NOTE: I don't know why this is necessary
files_expected_in_tar = [
os.path.normpath(p) for p in files_expected_in_tar
]
files_in_tar = [
os.path.normpath(p) for p in files_in_tar
]
# Compare tar file and list of files
self.assertEqual(set(files_expected_in_tar), set(files_in_tar))
# Recursively remove all directories and files used in test
self._purge_directory(cwd + "/tmp/keeper_test_backup")
def test_restore(self):
"""Test restoring a set of directories and files from a backup file"""
# Set paths
cwd = os.getcwd()
file_paths = [
cwd + "/tmp/keeper_test_restore/hotel/lobby/file1.txt",
cwd + "/tmp/keeper_test_restore/hotel/lobby/desk/file2.txt",
cwd + "/tmp/keeper_test_restore/hotel/lobby/desk/file3.txt",
cwd + "/tmp/keeper_test_restore/hotel/lobby/desk/drawer/f4",
cwd + "/tmp/keeper_test_restore/hotel/lobby/locker/file5.txt"
]
folder_paths_to_create = [
cwd + "/tmp/keeper_test_restore/hotel/lobby/desk/drawer/",
cwd + "/tmp/keeper_test_restore/hotel/lobby/locker"
]
directories_to_backup = [
cwd + "/tmp/keeper_test_restore/hotel/lobby/desk/drawer/",
cwd + "/tmp/keeper_test_restore/hotel/lobby/locker/"
]
files_expected_in_restore = [
cwd + "/tmp/keeper_test_restore/hotel/lobby/locker/file5.txt",
cwd + "/tmp/keeper_test_restore/hotel/lobby/desk/drawer/f4"
]
keeper = Keeper(system_directories=directories_to_backup)
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
keeper.backup(
destination_path=cwd + "/tmp/keeper_test_restore",
filename="restore_test.tar.gz"
)
# Purge the source directory
self._purge_directory(cwd + "/tmp/keeper_test_restore/hotel")
# Restore
keeper.restore(
cwd + "/tmp/keeper_test_restore/restore_test.tar.gz")
# List all directories
restored = cwd + "/tmp/keeper_test_restore/hotel"
files_found = []
for root, dirs, files in os.walk(restored):
for f in files:
files_found.append(os.path.join(root, f))
self.assertEqual(set(files_found), set(files_expected_in_restore))
# Clean up test files and directories
self._purge_directory(cwd + "/tmp/keeper_test_restore")
def test_restore_check_content(self):
"""Test restoring a file and check contents"""
# Set paths
cwd = os.getcwd()
file_paths = [
cwd + "/tmp/keeper_test_r_check/a/b/file1.txt",
cwd + "/tmp/keeper_test_r_check/a/b/c/file2.txt",
cwd + "/tmp/keeper_test_r_check/a/b/c/file3.txt",
cwd + "/tmp/keeper_test_r_check/a/b/c/e/f4",
cwd + "/tmp/keeper_test_r_check/a/b/d/file5.txt"
]
folder_paths_to_create = [
cwd + "/tmp/keeper_test_r_check/a/b/c/e/",
cwd + "/tmp/keeper_test_r_check/a/b/d"
]
directories_to_backup = [
cwd + "/tmp/keeper_test_r_check/a/b/d/"
]
file_expected_in_restore = os.path.join(
cwd + "/tmp/keeper_test_r_check/a/b/d/file5.txt"
)
keeper = Keeper(system_directories=directories_to_backup)
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
keeper.backup(
destination_path=cwd + "/tmp/keeper_test_r_check",
filename="restore_test.tar.gz"
)
# Purge the source directory
self._purge_directory(cwd + "/tmp/keeper_test_r_check/a")
# Restore
keeper.restore(
cwd + "/tmp/keeper_test_r_check/restore_test.tar.gz")
# Get file contents
with open(file_expected_in_restore, 'r') as restored_file:
content = restored_file.read()
logging.debug("content " + content)
self.assertEqual(content, "lorem ipsum\n")
# Clean up test files and directories
self._purge_directory(cwd + "/tmp/keeper_test_r_check")
def test_restore_does_not_overwrite(self):
"""Test that existing files are not overwritten by restore"""
cwd = os.getcwd()
base = cwd + "/tmp/keeper_python_unittest_restore_no_overwrite"
# Set paths
file_paths = [
base + "/hotel/lobby/file1.txt",
base + "/hotel/lobby/desk/file2.txt",
base + "/hotel/lobby/desk/file3.txt",
base + "/hotel/lobby/desk/drawer/f4",
base + "/hotel/lobby/locker/file5.txt"
]
folder_paths_to_create = [
base + "/hotel/lobby/desk/drawer/",
base + "/hotel/lobby/locker"
]
directories_to_backup = [
base + "/hotel/lobby/desk/drawer/",
base + "/hotel/lobby/locker/"
]
files_expected_in_restore = [
base + "/hotel/lobby/desk/drawer/f4",
base + "/hotel/lobby/locker/file5.txt"
]
keeper = Keeper(system_directories=directories_to_backup)
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
keeper.backup(
destination_path=base,
filename="restore_test.tar.gz"
)
# Write to files again
for name in files_expected_in_restore:
with open(name, "w") as file_handle:
file_handle.write("new version\n")
# Restore should raise exception on existing file
with self.assertRaises(Exception):
keeper.restore(base + "/restore_test.tar.gz")
# Get file contents
files_content = []
for name in files_expected_in_restore:
with open(name, "r") as file_handle:
content = file_handle.read()
files_content.append(content)
self.assertEqual(
files_content,
[
"new version\n",
"new version\n"
]
)
# Purge the test directory
self._purge_directory(base)
def test_backup_file_name_different_for_partial(self):
"""Test that partial backup file is named correctly
If there is a directory override, the file should have
"partial" in the name
"""
# Set paths
cwd = os.getcwd()
base = cwd + "/tmp/keeper_python_unittest_partial_name"
file_paths = [
base + "/a/b/c.txt",
base + "/q/r.txt"
]
folder_paths_to_create = [
base + "/a/b",
base + "/q"
]
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup test
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
args = keeper.parse_args([
'--dirs=' + cwd + '/tmp/keeper_python_unittest_partial_name/a/b',
'backup',
'--dest', 'tmp/keeper_python_unittest_partial_name'
])
keeper.main(args)
# Get filename
archive_filename = glob.glob(base + "/*.tar.gz")[0]
self.assertTrue("partial" in archive_filename)
# Recursively remove all directories and files used in test
self._purge_directory(cwd + "/tmp/keeper_python_unittest_partial_name")
def test_restore_subset_directories(self):
"""Test restoring a subset of directories"""
# Set paths
cwd = os.getcwd()
base = cwd + "/tmp/keeper_python_unittest_restore_subset"
file_paths = [
base + "/a/b/file1.txt",
base + "/a/b/c/file2.txt",
base + "/a/b/c/file3.txt",
base + "/a/b/c/e/file4.txt",
base + "/a/b/d/file5.txt"
]
folder_paths_to_create = [
base + "/a/b/c/e/",
base + "/a/b/d"
]
files_expected_in_restore = [
base + "/a/b/c/e/file4.txt"
]
# Create all directories
for path in folder_paths_to_create:
self._create_dir(path)
# Create all files for backup
for path in file_paths:
# Create file
with open(path, "w") as file_handle:
file_handle.write("lorem ipsum\n")
# Create backup
args = keeper.parse_args([
'--dirs=' + base + '/a/b',
'backup',
'--dest', base,
'--filename', "test.tar.gz"
])
keeper.main(args)
# Purge the source directory
self._purge_directory(base + "/a")
# Restore
args = keeper.parse_args([
'--dirs=' + base + '/a/b/c/e',
'restore',
'--file=' + base + '/test.tar.gz'
])
keeper.main(args)
# List all directories
restored = base + "/a"
files_found = []
for root, dirs, files in os.walk(restored):
for f in files:
files_found.append(os.path.join(root, f))
self.assertEqual(set(files_found), set(files_expected_in_restore))
# Clean up directory
self._purge_directory(base)
``` |
{
"source": "jonatanblue/kubeless-python-vs-go",
"score": 3
} |
#### File: kubeless-python-vs-go/python/loop.py
```python
import flask
import datetime as dt
def hello(event, context):
print(flask)
print(dt.datetime.now())
sum = 0
for i in range(0, 1000000):
sum += 1
print(sum)
print(dt.datetime.now())
return "Hello world!"
``` |
{
"source": "JonatanGarbuyo/asterisk_doorphone",
"score": 3
} |
#### File: asterisk_doorphone/asterisk_doorphone/asterisk_ami.py
```python
import socket
class AsteriskAMI:
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, server_ip: str, server_port: int):
"""Connects with the asterisk AMI"""
try:
self.socket.connect((server_ip, server_port))
except Exception as error:
raise ConnectionError("Couldn't connect to asterisk. Error:", error)
def login(self, username: str, password: str):
"""Login with asterisk AMI """
self.send_command({
"Action": "Login",
"Username": username,
"Secret": password,
"Events": "OFF"})
response = self.receive_response()
if 'Response: Success' not in response:
raise ConnectionRefusedError(response)
print(response)
def receive_response(self):
"""receive the response from asterisk AMI"""
response = ""
while not response.endswith('\r\n\r\n'):
buf = self.socket.recv(1024)
if not len(buf) > 0:
break
response += buf.decode()
return response
def send_command(self, args: dict):
"""Sends the command to asterisk AMI"""
command = ""
for key, value in args.items():
command += key + ": " + value + "\r\n"
command += "\r\n"
try:
self.socket.send(command.encode())
except Exception as error:
raise error
def originate(self, channel="", exten="", context="", caller_id="", priority="1", timeout="2000", variable=""):
"""
* @param string $channel Channel name to call
* @param string $exten Extension to use (requires 'Context' and 'Priority')
* @param string $context Context to use (requires 'Exten' and 'Priority')
* @param string $priority Priority to use (requires 'Exten' and 'Context')
* @param string $application Application to use
* @param string $data Data to use (requires 'Application')
* @param integer $timeout How long to wait for call to be answered (in ms)
* @param string $callerid Caller ID to be set on the outgoing channel
* @param string $variable Channel variable to set (VAR1=value1|VAR2=value2)
* @param string $account Account code
* @param boolean $async true fast origination
* @param string $actionid message matching variable
"""
params = {
"Action": "Originate",
"Channel": channel,
"Context": context,
"Exten": exten,
"Priority": priority,
"Timeout": timeout,
"CallerID": caller_id,
"Variable": variable,
}
print("Calling apartment {extension}".format(extension=exten))
self.send_command(params)
response = self.receive_response()
return response
def sip_peer_status(self, extension: str):
self.send_command(dict(Action="SIPpeerstatus", Peer=extension))
response = ""
while True:
response += self.receive_response()
if "Event: SIPpeerstatusComplete" in response:
break
print(response)
return response
def extension_state(self, extension: str, context: str):
self.send_command(dict(Action="ExtensionState", Exten=extension, Context=context))
response = self.receive_response()
return response
def disconnect(self):
"""Closes the connection to Asterisk AMI"""
self.socket.close()
``` |
{
"source": "Jonatanjrss/django-pagseguro2",
"score": 3
} |
#### File: django-pagseguro2/pagseguro/forms.py
```python
from decimal import Decimal
from django import forms
class PagSeguroItemForm(forms.Form):
id = forms.CharField(max_length=100)
description = forms.CharField(max_length=100)
amount = forms.DecimalField(
max_digits=9, max_value=Decimal("9999999.00"), min_value=Decimal("0.01"), decimal_places=2,
)
quantity = forms.IntegerField(min_value=1, max_value=999)
shipping_cost = forms.DecimalField(
max_digits=9,
max_value=Decimal("9999999.00"),
min_value=Decimal("0.01"),
decimal_places=2,
required=False,
)
weight = forms.IntegerField(min_value=1, max_value=30000, required=False)
def clean_amount(self):
amount = self.cleaned_data.get("amount")
if amount:
exponent = abs(amount.as_tuple().exponent)
if exponent != 2:
raise forms.ValidationError("O amount deve conter duas casas decimais.")
return amount
def clean_shipping_cost(self):
shipping_cost = self.cleaned_data.get("shipping_cost")
if shipping_cost:
exponent = abs(shipping_cost.as_tuple().exponent)
if exponent != 2:
raise forms.ValidationError("O shipping_cost deve conter duas casas decimais.")
return shipping_cost
```
#### File: pagseguro/tests/test_models.py
```python
from django.test import TestCase
from django.utils import timezone
from pagseguro.models import Checkout, Transaction, TransactionHistory
class CheckoutTest(TestCase):
def test_create_model(self):
checkout = Checkout.objects.create(
code='007',
date=timezone.now(),
success=True
)
self.assertEqual(
str(checkout), '{0}'.format(checkout.pk)
)
class TransactionTest(TestCase):
def test_create_model(self):
transaction = Transaction.objects.create(
code='007',
reference='nothing',
status='aguardando',
date=timezone.now(),
last_event_date=timezone.now()
)
self.assertEqual(
str(transaction), transaction.code
)
class TransactionHistoryTest(TestCase):
def test_create_model(self):
transaction = Transaction.objects.create(
code='007',
reference='nothing',
status='aguardando',
date=timezone.now(),
last_event_date=timezone.now()
)
tx_history = TransactionHistory.objects.create(
transaction=transaction,
date=timezone.now(),
status='aguardando'
)
self.assertEqual(
str(tx_history),
'{0} - {1} - {2}'.format(
tx_history.transaction, tx_history.status, tx_history.date
)
)
```
#### File: django-pagseguro2/pagseguro/views.py
```python
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from pagseguro.api import PagSeguroApi
@csrf_exempt
@require_http_methods(["POST"])
def receive_notification(request):
notification_code = request.POST.get("notificationCode", None)
notification_type = request.POST.get("notificationType", None)
if notification_code and notification_type == "transaction":
pagseguro_api = PagSeguroApi()
response = pagseguro_api.get_notification(notification_code)
if response.status_code == 200:
return HttpResponse("Notificação recebida com sucesso.")
return HttpResponse("Notificação inválida.", status=400)
``` |
{
"source": "jonatanlinden/PR",
"score": 4
} |
#### File: jonatanlinden/PR/gdb_skiplist_print.py
```python
import gdb
class SkiplistPrintCommand(gdb.Command):
"""Iterate and print a list.
skip <EXPR> [MAX]
Given a list EXPR, iterate though the list nodes' ->next pointers, printing
each node iterated. We will iterate thorugh MAX list nodes, to prevent
infinite loops with corrupt lists. If MAX is zero, we will iterate the
entire list.
List nodes types are expected to have a member named "next". List types
may be the same as node types, or a separate type with an explicit
head node, called "head"."""
MAX_ITER = 10
def __init__(self):
super(SkiplistPrintCommand, self).__init__("skiplist-print", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, _args, from_tty):
args = gdb.string_to_argv(_args)
start_node = args[0]
if len(args) > 1:
max_iter = int(args[1])
else:
max_iter = self.MAX_ITER
if len(args) > 2:
lvl = int(args[2])
else:
lvl = 0
p_node_t = gdb.lookup_type('node_t').pointer()
long_t = gdb.lookup_type('long')
node = gdb.parse_and_eval(start_node)
print node
for i in xrange(max_iter):
nexts = node['next']
nxt = gdb.Value(nexts[lvl]).cast(long_t)
nxt = nxt & ~1
node = gdb.Value(nxt).cast(p_node_t).dereference()
nexts = node['next']
print node['k'], node['level'], node['inserting'],
k = 0
while k < node['level']:
print(nexts[k]),
k+=1
print("")
SkiplistPrintCommand()
``` |
{
"source": "jonatanlv/nsga-ii",
"score": 3
} |
#### File: jonatanlv/nsga-ii/metricas.py
```python
import numpy as np
import logging
#Medidas de diversidad
def Lambda(pop1, problema):
'''Descrita en 'A fast and elitist multiobjective genetic algorithm: NSGA-II
Suponemos que la población está "ordenada" con respecto a los índices'''
distancias = []
auxP = None
for p1 in pop1:
if auxP is None:
auxP = p1
continue
else:
distancias.append(distancia(auxP.evaluado_en(problema), p1.evaluado_en(problema)))
auxP = p1
media = np.mean(distancias)
#DONE esto está mal, no podemos suponer que los extremos de la población son el primero y el último
#logging.warn('Calculando Lambda. Los extremos puede que no sean los correctos')
df = minimaDistancia(pop1[0].evaluado_en(problema), [problema.fp['extremos'][0]])
dl = minimaDistancia(pop1[-1].evaluado_en(problema), [problema.fp['extremos'][1]])
return (df + dl + np.sum(np.abs(distancias - media))) / (df + dl + media * len(distancias))
def Spread(pop1, problema):
'''Dispersión de la población sin tener en cuenta los puntos extremos'''
distancias = []
for p1 in pop1:
distancias_a_p1 = []
for p2 in pop1:
if not p1 is p2:
distancias_a_p1.append(distancia(p1.evaluado_en(problema), p2.evaluado_en(problema)))
else:
distancias_a_p1.append(float('inf'))
distancias.append(np.min(distancias_a_p1))
return np.std(distancias)
#Medidas de ajuste
def Upsilon(pop1, problema):
'''Descrita en 'A fast and elitist multiobjective genetic algorithm: NSGA-II'''
distancias = np.array([minimaDistancia(p.evaluado_en(problema), problema.fp['frente']) for p in pop1])
return np.mean(distancias)
#Medidas de cobertura
def C(pop1, pop2, problema):
'''Proporción de soluciones en pop2 que son débilemente domindas por pop1'''
cont = 0
for p2 in pop2:
for p1 in pop1:
if problema.dominado(p1, p2) > 0:
cont += 1
break
return cont / len(pop2)
#Medidas mixtas
#Funciones de apoyo
def minimaDistancia(p1, conjunto, lp = 2):
'''Devuelve al mínima distancia entre el punto y cada elemento de pop2'''
minimo = float('inf')
for p in conjunto:
psd = distancia(p, p1, lp = 2, raiz = False)
if psd < minimo:
minimo = psd
return np.power(minimo, 1 / lp)
def distancia(p1, p2, lp = 2, raiz = True):
suma = np.sum((p1 - p2) ** lp)
if raiz:
return float(np.power(suma, 1 / lp))
else:
return float(suma)
def extremos(pop1):
'''Suponemos minimización y lo que buscamos son los extremos'''
pass
```
#### File: jonatanlv/nsga-ii/representacion.py
```python
import numpy as np
from Estadisticas import Estadisticas
class Punto(np.ndarray):
'''Hereda de np.ndarray, representa una solución
En los puntos tenemos la mutación
Siempre vamos a considerar el propio punto como el genotipo
'''
def __new__(cls, dimensiones, initValue = None, rango = None, \
operadores = None, crowded_distance = None, generacion = 1, dist_fenotipo = None, **kwargs):
'''Para heredar de np.ndarray es necesario usar __new__ en lugar de __init__'''
obj = np.ndarray.__new__(cls, dimensiones, **kwargs)
obj.gen = generacion
obj.vals = None
obj.rest = None
obj.rgo = rango
obj.crwd = crowded_distance
obj.np = 0
obj.Sp = []
'''Operadores es un diccionario de operadores evolutivos'''
if not operadores is None:
Punto._mutar = operadores['mutador']
Punto._fenotipo = operadores['fenotipo']
if not dist_fenotipo is None:
Punto.dist_fenotipo = dist_fenotipo
obj.setPunto(vector = initValue)
return obj
def setPunto(self, vector = None):
if vector is None:
self[:] = 0
else:
for i in range(len(self)):
self[i] = vector[i]
def copy(self, **kwargs):
'''Devolvemos otro punto copia del actual'''
p = Punto(dimensiones = len(self), **kwargs)
p.gen = self.gen
p.vals = self.vals
p.rest = self.rest
p.rgo = self.rgo
p.crwd = self.crwd
p.np = self.np
p.Sp = self.Sp[:]
p.setPunto(vector = self)
return p
def fenotipo(self):
'''De momento trabajamos con representación real: fenotipo = genotipo'''
return self.__class__._fenotipo(self)
def rand(self, problema):
if problema.parametros.get('tipo_var', 'real') == 'real':
self[:] = (problema.lims[:, 1] - problema.lims[:, 0]) * np.random.rand(problema.dims) + problema.lims[:, 0]
else:
for i in range(problema.dims):
self[i] = np.random.choice(problema.lims[i])
def evaluado_en(self, problema):
'''Evaluamos el punto con las funciones que nos da el problema'''
if self.vals is None:
self.vals = problema.evaluador(self)
return self.vals
def violacion_restricciones(self, problema):
'''Calculamos el nivel de violación de las restricciones'''
if self.rest is None:
self.rest = problema.violacion_restricciones(self)
return self.rest
def mutar(self, problema):
'''Con esta orden le pedimos al punto que se mute'''
self.__class__._mutar(self, problema)
class Poblacion(list):
'''La población será una lista de Puntos que representará a las soluciones
En las poblaciones definimos el cruce y la selección'''
def __init__(self, size, operadores, generacion = 0, stats = None):
self.size = size
self.gen = generacion
if stats is None:
self.stats = Estadisticas('Estadisticas')
else:
self.stats = stats
self.stats.nuevo_Contador('gens') # Generación actual
if not operadores is None:
self.__class__._selector = operadores['selector']
self.__class__._cruzador = operadores['cruzador']
self.__class__._seleccionador = operadores['seleccionador']
def select_with(self, nomCaracteristica, valor):
'''Seleccionamos los puntos con cierta caracteristica'''
resultado = []
for p in self:
if p.__getattribute__(nomCaracteristica) == valor:
resultado.append(p)
return resultado
def selector(self, problema):
'''Seleccionamos para cruce'''
return self.__class__._selector(self, problema)
def cruzador(self, padre, madre, problema):
'''Cruzamos dos puntos'''
return self.__class__._cruzador(padre, madre, problema)
def seleccionador(self, subpoblacion, problema):
'''Seleccionamos de la población y quitamos los que no sirvan'''
return self.__class__._seleccionador(self, subpoblacion, problema)
def union(self, pop):
for p in pop:
self.append(p)
def borrar(self, conjunto):
for p in conjunto:
if p in self:
self.remove(p)
def fast_non_dominated_sort(self, problema):
'''Seguimos el algoritmo descrito en "A fast and elitist multiobjective GA: NSGA-II"'''
#TODO: Este procedimiento se puede mejorar no teniendo que calcular el rango de toda la población
frentes = [[]]
for p in self:
p.Sp, p.np = [], 0
for q in self:
dominio = problema.dominadoC(p, q)
if dominio == 1: # p domina a q
p.Sp.append(q)
elif dominio == -1: # q domina a p
p.np += 1
if p.np == 0:
p.rgo = 1
frentes[0].append(p)
i = 0
while True:
siguienteFrente = []
for p in frentes[i]:
for q in p.Sp:
q.np -= 1
if q.np == 0:
q.rgo = i + 2
siguienteFrente.append(q)
if siguienteFrente == []:
break
frentes.append(siguienteFrente[:])
i += 1
def __contains__(self, item):
for p in self:
if p is item:
return True
return False
def crowding_distance_assignment(I, problema):
'''Seguimos el algoritmo descrito en "A fast and elitist multiobjective GA: NSGA-II"'''
I.sort(reverse = True, key = lambda x: x[0])
extremos = [I[0], I[-1]]
for p in I:
p.crwd = 0
for p in extremos:
p.crwd = float('inf')
#TODO No encuentro la manera de hacer esto con numpy
objetivos = []
for p in I:
parcial = [p]
parcial.extend(p.evaluado_en(problema))
objetivos.append(parcial[:])
# objetivos[i] = [p_i, f1(p_i), f2(p_i), ..., fn(p_i)]
for i in range(1, len(problema.objetivos) + 1):
objetivos.sort(key=lambda x: x[i])
fmax = max(objetivos, key=lambda x: x[i])[i]
fmin = min(objetivos, key=lambda x: x[i])[i]
for j in range(1, len(objetivos) - 1):
objetivos[j][0].crwd += (objetivos[j+1][i] - objetivos[j-1][i]) / (fmax - fmin)
############################################
# FENOTIPOS
# Siempre tienen la misma firma:
# def nombre(punto)
# Devuelven
# el fenotipo que le corresponde al punto
############################################
def real(punto):
'''Representación real, fenotipo = genotipo'''
return punto
def binario(punto):
'''Representación binaria'''
fenotipo = []
for i in range(len(punto.dist_fenotipo)):
li = np.sum(punto.dist_fenotipo[:i])
ui = np.sum(punto.dist_fenotipo[:i + 1])
fenotipo.append(punto[li:ui])
return fenotipo
############################################
# OPERADORES DE MUTACIÓN
# Siempre tienen la misma firma:
# def nombre(punto, problema)
############################################
def mutador1(punto, problema):
'''Mutamos cualquier componente con probabilidad proporcional
a la dimensión del espacio y esa componente puede tomar cualquier punto'''
p = problema.parametros.get('pm', 1 / problema.dims)
mascara = np.random.rand(problema.dims) < p
punto[mascara] = (problema.lims[mascara, 1] - problema.lims[mascara, 0]) \
* np.random.rand(mascara.sum()) + problema.lims[mascara, 0]
def mutadorb(punto, problema):
'''Mutador de estados para variables discretas, se elige una componente
y se fuerza a que cambie a alguno de los otros estados'''
p = problema.parametros.get('pm', 1 / problema.dims)
mascara = np.random.rand(problema.dims) < p
for i in range(len(problema.lims)):
if not mascara[i]:
continue
nvalor = np.random.choice(problema.lims[i])
while nvalor == punto[i]:
nvalor = np.random.choice(problema.lims[i])
punto[i] = nvalor
def mutador_init(punto, problema):
'''Escogemos un punto cualquiera del espacio de decisión'''
punto.rand(problema)
def mutacion_aleatoria(punto, problema):
'''Cada componente es variada uniformemente con el rango máximo que se le permita'''
copy_lims = problema.lims.copy()
copy_lims[:,0] = np.abs(copy_lims[:,0] - punto)
copy_lims[:,1] = np.abs(copy_lims[:,1] - punto)
deltas = np.min(copy_lims, axis = 1) #máxima variabilidad permitida en cada componente
u = np.random.rand(problema.dims) * 2 - 1 # la variación que vamos a hacer en cada componente
punto.setPunto(vector = punto + u * deltas)
def mutacion_polinomial(punto, problema):
'''mutación polinomial'''
p = problema.parametros.get('pm', 1 / problema.dims)
eta = problema.parametros.get('mp', 2)
for i in range(problema.dims):
if np.random.rand() >= p:
#No mutamos este gen
continue
u = np.random.rand()
if u <= .5:
delta = np.power(2 * u, 1 / (eta + 1)) - 1
punto[i] += delta * (punto[i] - problema.lims[i,0])
else:
delta = 1 - np.power(2 * (1 - u), 1 / (eta + 1))
punto[i] += delta * (problema.lims[i,1] - punto[i])
############################################
# GENERADORES DE CRUCES
# Siempre tienen la misma firma:
# def nombre(seleccionParaCruce, cruzadorBasico)
# Devuelve una función con la siguiente firma
# def $(poblacion, problema)
# que a su vez devolverá:
# 2 hijos
############################################
def generar_cruzador(selector, cruzador):
def funcion(poblacion, problema):
p1 = selector(poblacion, problema)
p2 = selector(poblacion, problema)
return cruzador(p1, p2, problema)
return funcion
############################################
# CRUZADORES BÁSICOS
# Siempre tienen la misma firma:
# def nombre(padre, madre, problema)
# Devuelven:
# dos puntos soluciones
############################################
'''Cruces básicos'''
def line_recombination(padre, madre, problema):
pass
def intermediate_recombination(padre, madre, problema):
pass
def trivial(padre, madre, problema):
'''Recombinador trivial: devuelve al padre y la madre'''
return padre, madre
def blended_crossover(padre, madre, problema):
'''blended crossover BLX-alpha'''
'''uniform compound recombination'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
alpha = problema.parametros.get('blx', .5)
for i in range(problema.dims):
alpha = problema.parametros.get('blx', .5)
entrar = True
while entrar:
factor = np.random.rand() * (1 + 2 * alpha) - alpha
hijo[i] = (1 - factor) * padre[i] + factor * madre[i]
hija[i] = factor * padre[i] + (1 - factor) * madre[i]
if problema.lims[i, 0] <= hijo[i] <= problema.lims[i, 1] and\
problema.lims[i, 0] <= hija[i] <= problema.lims[i, 1]:
entrar = False
else:
alpha = alpha / 2
#print('padre {}, madre {}, alpha = {}'.format(str(padre), str(madre), alpha))
return hijo, hija
def simulated_binary_crossover2(padre, madre, problema):
'''simulated binary crossover'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
n = problema.parametros.get('sbx', 2)
for i in range(problema.dims):
#Lo hacemos componente a componente para evitar sacar muchos valores
y1, y2 = min(padre[i], madre[i]), max(padre[i], madre[i])
yl, yu = problema.lims[i,0], problema.lims[i,1]
if np.abs(y2 - y1) > problema.parametros.get('sbx_prec', 10**-6):
u = np.random.rand()
beta = 1 + 2 * (y1 - yl) / (y2 - y1)
alpha = 2 - np.power(beta, -(n + 1))
if u <= 1 / alpha:
betaq = np.power(u * alpha, 1 / (n + 1))
else:
betaq = np.power(1 / (2 - u * alpha), 1 / (n + 1))
h1 = .5 * ( y1 + y2 - betaq * (y2 - y1))
beta = 1 + 2 * (yu - y2) / (y2 - y1)
alpha = 2 - np.power(beta, -(n + 1))
if u <= 1 / alpha:
betaq = np.power(u * alpha, 1 / (n + 1))
else:
betaq = np.power(1 / (2 - u * alpha), 1 / (n + 1))
h2 = .5 * ( y1 + y2 - betaq * (y2 - y1))
if h1 < yl:
h1 = yl
elif h1 > yu:
h1 = yu
if h2 < yl:
h2 = yl
elif h2 > yu:
h2 = yu
if np.random.rand() < .5:
hijo[i] = h1
hija[i] = h2
else:
hijo[i] = h2
hija[i] = h1
return hijo, hija
def simulated_binary_crossover(padre, madre, problema):
'''simulated binary crossover'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
n = problema.parametros.get('sbx', 2)
for i in range(problema.dims):
#Lo hacemos componente a componente para evitar sacar muchos valores
u = np.random.rand()
if u < .5:
beta = np.power(2*u, 1/ (n + 1))
else:
beta = np.power(1 / (2 - 2 * u), 1 / (n + 1))
hijo[i] = .5 * (padre[i] + madre[i]) + .5 * beta * (padre[i] - madre[i])
hija[i] = .5 * (padre[i] + madre[i]) - .5 * beta * (padre[i] - madre[i])
if hijo[i] < problema.lims[i, 0]:
hijo[i] = problema.lims[i, 0]
elif hijo[i] > problema.lims[i, 1]:
hijo[i] = problema.lims[i, 1]
if hija[i] < problema.lims[i, 0]:
hija[i] = problema.lims[i, 0]
elif hija[i] > problema.lims[i, 1]:
hija[i] = problema.lims[i, 1]
return hijo, hija
def uniform_compound_recombination(padre, madre, problema):
'''uniform compound recombination'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
if 'ucr' in problema.parametros:
c = problema.parametros['ucr']
else:
c = -0.75
sw = None
while sw is None or not (problema.en_el_dominio(hijo) and problema.en_el_dominio(hija)):
sw = True
#factor son los factores por cada componente vs ~ U[-1, 1]
vs = np.random.rand(problema.dims) * 2 - 1
factor = vs * np.abs(vs) ** c + .5
hijo.setPunto(vector = factor * padre + (1 - factor) * madre)
hija.setPunto(vector = (1 - factor) * padre + factor * madre)
return hijo, hija
def one_point_crossover(padre, madre, problema):
'''one point crossover'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
cut = None
while cut is None or not (problema.en_el_dominio(hijo) and problema.en_el_dominio(hija)):
cut = np.random.randint(problema.dims)
hijo[:cut] = padre[:cut]
hijo[cut:] = madre[cut:]
hija[cut:] = padre[cut:]
hija[:cut] = madre[:cut]
return hijo, hija
def two_point_crossover(padre, madre, problema):
'''two point crossover'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
cut1 = None
while cut1 is None or not (problema.en_el_dominio(hijo) and problema.en_el_dominio(hija)):
cut1 = np.random.randint(problema.dims - 1)
cut2 = np.random.randint(cut1, problema.dims)
hijo[:cut1] = padre[:cut1]
hijo[cut1:cut2] = madre[cut1:cut2]
hijo[cut2:] = padre[cut2:]
hija[:cut1] = madre[:cut1]
hija[cut1:cut2] = padre[cut1:cut2]
hija[cut2:] = madre[cut2:]
return hijo, hija
def uniform_crossover(padre, madre, problema):
'''uniform crossover, naive para representaciones reales'''
hijo = Punto(dimensiones = problema.dims, **problema.extras)
hija = Punto(dimensiones = problema.dims, **problema.extras)
cut = None
while cut is None or not (problema.en_el_dominio(hijo) and problema.en_el_dominio(hija)):
cut = np.random.rand(problema.dims)
cut1 = (cut <= .5)
cut2 = (cut > .5)
hijo[cut2] = padre[cut2]
hijo[cut1] = madre[cut1]
hija[cut1] = padre[cut1]
hija[cut2] = madre[cut2]
return hijo, hija
############################################
# OPERADORES DE SELECCION
# Siempre tienen la misma firma:
# def nombre(poblacion, subpoblacion, problema)
# Devuelve
# Un punto de la población
############################################
'''Dada la población seleccionamos los más aptos'''
def noneSelect(poblacion, subpoblacion, problema):
'''No selecciona, va devolviendo los puntos de subpoblación en orden'''
i = 0
while True:
yield subpoblacion[i]
i = (i + 1) % len(subpoblacion)
def ns_tournament_selection_sparsity(poblacion, subpoblacion, problema):
'''Non-dominated sorting lexicographic tournament with sparsity'''
t = problema.parametros.get('t_size', 2) #Tamaño del torneo
p1 = seleccion_uniforme(subpoblacion, problema)
best = p1.copy(**problema.extras)
for i in range(t-1):
p2 = seleccion_uniforme(subpoblacion, problema)
if p2.rgo < p1.rgo:
best = p2
elif best.rgo == p2.rgo:
if p2.crwd > best.crwd:
best = p2
return best
def ns_tournament_selection_sparsity_constraint(poblacion, subpoblacion, problema):
'''Non-dominated sorting lexicographic tournament with sparsity and restrictions'''
t = problema.parametros.get('t_size', 2)
p1 = seleccion_uniforme(subpoblacion, problema)
best = p1.copy(**problema.extras)
for i in range(t-1):
p2 = seleccion_uniforme(subpoblacion, problema)
if best.violacion_restricciones(problema) == p2.violacion_restricciones(problema) == 0:
#Si los dos cumplen todas las restricciones el criterio normal
if p2.rgo < best.rgo:
best = p2
elif best.rgo == p2.rgo:
if p2.crwd > best.crwd:
best = p2
elif best.violacion_restricciones(problema) == 0:
#p1 cumple las restricciones y p2 no
continue
elif p2.violacion_restricciones(problema) == 0:
#p2 cumple las restricciones y p1 no
best = p2
else:
#ni p1 ni p2 cumplen las restricciones
if best.violacion_restricciones(problema) > p2.violacion_restricciones(problema):
#mayor violación por parte de p1
best = p2
elif best.violacion_restricciones(problema) == p2.violacion_restricciones(problema):
#escogemos al azar
if np.random.rand() < .5:
best = p2
return best
############################################
# OPERADORES DE SELECCION PARA CRUCE
# Siempre tienen la misma firma:
# def nombre(poblacion, problema)
# Devuelve:
# un punto de la población
############################################
'''Dada la población seleccionamos los candidatos a cruzarse'''
def seleccion_uniforme(poblacion, problema):
return poblacion[np.random.randint(problema.parametros['pop_size'])]
def selector(seleccionador):
'''Convertimos un seleccionador en un selector'''
def f(poblacion, problema):
return seleccionador(poblacion, poblacion, problema)
return f
``` |
{
"source": "JonatanMartens/zeebe_python_worker",
"score": 3
} |
#### File: worker/decorators/log_decorator.py
```python
from loguru import logger
from pyzeebe import Job, JobStatus
def log_decorator(job: Job) -> Job:
if job.status == JobStatus.Running:
logger.info(f"Received job: {job}")
elif job.status == JobStatus.Completed:
logger.info(f"Completed job: {job}")
elif job.status in [JobStatus.Failed, JobStatus.ErrorThrown]:
logger.warning(f"Failed to complete job: {job}")
return job
``` |
{
"source": "jonatanolofsson/amqtt",
"score": 2
} |
#### File: amqtt/amqtt/utils.py
```python
from __future__ import annotations
import logging
import random
import yaml
import typing
if typing.TYPE_CHECKING:
from amqtt.session import Session
logger = logging.getLogger(__name__)
def format_client_message(
session: Session = None, address: str = None, port: int = None
) -> str:
if session:
return "(client id=%s)" % session.client_id
elif address is not None and port is not None:
return "(client @=%s:%d)" % (address, port)
else:
return "(unknown client)"
def gen_client_id() -> str:
"""Generates random client ID"""
gen_id = "amqtt/"
for i in range(7, 23):
gen_id += chr(random.randint(0, 74) + 48)
return gen_id
def read_yaml_config(config_file: str) -> dict:
config = None
try:
with open(config_file) as stream:
config = (
yaml.full_load(stream)
if hasattr(yaml, "full_load")
else yaml.load(stream)
)
except yaml.YAMLError as exc:
logger.error("Invalid config_file %s: %r", config_file, exc)
return config
``` |
{
"source": "jonatanolofsson/home-assistant",
"score": 2
} |
#### File: components/alarm_control_panel/demo.py
```python
import homeassistant.components.alarm_control_panel.manual as manual
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS, STATE_ALARM_TRIGGERED, CONF_PENDING_TIME)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Demo alarm control panel platform."""
add_devices([
manual.ManualAlarm(hass, 'Alarm', '1234', 5, 10, False, {
STATE_ALARM_ARMED_AWAY: {
CONF_PENDING_TIME: 5
},
STATE_ALARM_ARMED_HOME: {
CONF_PENDING_TIME: 5
},
STATE_ALARM_ARMED_NIGHT: {
CONF_PENDING_TIME: 5
},
STATE_ALARM_ARMED_CUSTOM_BYPASS: {
CONF_PENDING_TIME: 5
},
STATE_ALARM_TRIGGERED: {
CONF_PENDING_TIME: 5
},
}),
])
```
#### File: components/device_tracker/test_unifi_direct.py
```python
import os
from datetime import timedelta
import unittest
from unittest import mock
from unittest.mock import patch
import pytest
import voluptuous as vol
from homeassistant.setup import setup_component
from homeassistant.components import device_tracker
from homeassistant.components.device_tracker import (
CONF_CONSIDER_HOME, CONF_TRACK_NEW)
from homeassistant.components.device_tracker.unifi_direct import (
DOMAIN, CONF_PORT, PLATFORM_SCHEMA, _response_to_json, get_scanner)
from homeassistant.const import (CONF_PLATFORM, CONF_PASSWORD, CONF_USERNAME,
CONF_HOST)
from tests.common import (
get_test_home_assistant, assert_setup_component,
mock_component, load_fixture)
class TestComponentsDeviceTrackerUnifiDirect(unittest.TestCase):
"""Tests for the Unifi direct device tracker platform."""
hass = None
scanner_path = 'homeassistant.components.device_tracker.' + \
'unifi_direct.UnifiDeviceScanner'
def setup_method(self, _):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'zone')
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
@mock.patch(scanner_path,
return_value=mock.MagicMock())
def test_get_scanner(self, unifi_mock): \
# pylint: disable=invalid-name
"""Test creating an Unifi direct scanner with a password."""
conf_dict = {
DOMAIN: {
CONF_PLATFORM: 'unifi_direct',
CONF_HOST: 'fake_host',
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '<PASSWORD>',
CONF_TRACK_NEW: True,
CONF_CONSIDER_HOME: timedelta(seconds=180)
}
}
with assert_setup_component(1, DOMAIN):
assert setup_component(self.hass, DOMAIN, conf_dict)
conf_dict[DOMAIN][CONF_PORT] = 22
self.assertEqual(unifi_mock.call_args, mock.call(conf_dict[DOMAIN]))
@patch('pexpect.pxssh.pxssh')
def test_get_device_name(self, mock_ssh):
""""Testing MAC matching."""
conf_dict = {
DOMAIN: {
CONF_PLATFORM: 'unifi_direct',
CONF_HOST: 'fake_host',
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '<PASSWORD>',
CONF_PORT: 22,
CONF_TRACK_NEW: True,
CONF_CONSIDER_HOME: timedelta(seconds=180)
}
}
mock_ssh.return_value.before = load_fixture('unifi_direct.txt')
scanner = get_scanner(self.hass, conf_dict)
devices = scanner.scan_devices()
self.assertEqual(23, len(devices))
self.assertEqual("iPhone",
scanner.get_device_name("98:00:c6:56:34:12"))
self.assertEqual("iPhone",
scanner.get_device_name("98:00:C6:56:34:12"))
@patch('pexpect.pxssh.pxssh.logout')
@patch('pexpect.pxssh.pxssh.login')
def test_failed_to_log_in(self, mock_login, mock_logout):
""""Testing exception at login results in False."""
from pexpect import exceptions
conf_dict = {
DOMAIN: {
CONF_PLATFORM: 'unifi_direct',
CONF_HOST: 'fake_host',
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '<PASSWORD>',
CONF_PORT: 22,
CONF_TRACK_NEW: True,
CONF_CONSIDER_HOME: timedelta(seconds=180)
}
}
mock_login.side_effect = exceptions.EOF("Test")
scanner = get_scanner(self.hass, conf_dict)
self.assertFalse(scanner)
@patch('pexpect.pxssh.pxssh.logout')
@patch('pexpect.pxssh.pxssh.login', autospec=True)
@patch('pexpect.pxssh.pxssh.prompt')
@patch('pexpect.pxssh.pxssh.sendline')
def test_to_get_update(self, mock_sendline, mock_prompt, mock_login,
mock_logout):
""""Testing exception in get_update matching."""
conf_dict = {
DOMAIN: {
CONF_PLATFORM: 'unifi_direct',
CONF_HOST: 'fake_host',
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '<PASSWORD>',
CONF_PORT: 22,
CONF_TRACK_NEW: True,
CONF_CONSIDER_HOME: timedelta(seconds=180)
}
}
scanner = get_scanner(self.hass, conf_dict)
# mock_sendline.side_effect = AssertionError("Test")
mock_prompt.side_effect = AssertionError("Test")
devices = scanner._get_update() # pylint: disable=protected-access
self.assertTrue(devices is None)
def test_good_reponse_parses(self):
"""Test that the response form the AP parses to JSON correctly."""
response = _response_to_json(load_fixture('unifi_direct.txt'))
self.assertTrue(response != {})
def test_bad_reponse_returns_none(self):
"""Test that a bad response form the AP parses to JSON correctly."""
self.assertTrue(_response_to_json("{(}") == {})
def test_config_error():
"""Test for configuration errors."""
with pytest.raises(vol.Invalid):
PLATFORM_SCHEMA({
# no username
CONF_PASSWORD: 'password',
CONF_PLATFORM: DOMAIN,
CONF_HOST: 'myhost',
'port': 123,
})
with pytest.raises(vol.Invalid):
PLATFORM_SCHEMA({
# no password
CONF_USERNAME: 'foo',
CONF_PLATFORM: DOMAIN,
CONF_HOST: 'myhost',
'port': 123,
})
with pytest.raises(vol.Invalid):
PLATFORM_SCHEMA({
CONF_PLATFORM: DOMAIN,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
CONF_HOST: 'myhost',
'port': 'foo', # bad port!
})
``` |
{
"source": "JonatanRoig/django-online-tetris",
"score": 2
} |
#### File: django-online-tetris/online_tetris/views.py
```python
from django.views.generic import DetailView, ListView, UpdateView, CreateView, TemplateView
from .models import Sesion, Castigo
from .forms import SesionForm, CastigoForm
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponse
import simplejson as json
import time
import datetime
from datetime import timedelta
from django.utils import timezone
from django.db.models import Q
from random import randint
@csrf_exempt
def index(request):
# return HttpResponse('Hello from Python!')
if (request.method=='POST') and request.is_ajax():
# Datos que mandamos de vuelta al HTML
response_data={}
# Recojemos el NICKNAME introducido
nickname=request.POST.get('nickname')
# Creamos el objeto SESION
new_sesion = Sesion(nombre=nickname)
new_sesion.save()
response_data['user_pk']= new_sesion.pk
return HttpResponse(json.dumps(response_data),content_type="application/json")
else:
return render(request, 'tetris_main.html')
@csrf_exempt
def get_score(request):
# return HttpResponse('Hello from Python!')
if (request.method=='POST') and request.is_ajax():
# Recojemos el SCORE introducido
score_actual=request.POST.get('score_actual')
# Recojemos el USER_PK introducido
user_pk=request.POST.get('user_pk')
user_pk = int(user_pk)
# Obtenemos el objeto SESION correspondiente al usuario
sesion = Sesion.objects.get(pk=user_pk)
sesion.puntos = score_actual
sesion.save()
# Datos que mandamos de vuelta al HTML
response_data={}
# Barrera de tiempo que sirve como tope para decir a partir
# de que punto ya no se considera activa la sesion
now = timezone.now()
time_limit_barrier = now - datetime.timedelta(minutes=5)
# Capturamos todos los objetos Sesion existentes en la base de datos
all_sesions = Sesion.objects.filter(last_updated__range=(time_limit_barrier,now)).order_by('-puntos')
# Renderizamos el marcador de puntuaciones en un string html
response_data['html_scores']=render_to_string("marcador.html",{'all_sesions':all_sesions, 'user_pk': user_pk })
response_data['user_velocidad'] = sesion.velocidad
return HttpResponse(json.dumps(response_data),content_type="application/json")
else:
return HttpResponse(json.dumps({"nothing to see":"this isn't happening"}),content_type="application/json")
@csrf_exempt
def get_castigs(request):
if (request.method=='POST') and request.is_ajax():
# Recojemos el USER_PK introducido
user_pk=request.POST.get('user_pk')
user_pk = int(user_pk)
# Obtenemos el objeto SESION correspondiente al usuario
sesion = Sesion.objects.get(pk=user_pk)
all_castigos = Castigo.objects.filter( Q(emisor=sesion) | Q(receptor=sesion) ).order_by('-created')
# Datos que mandamos de vuelta al HTML
response_data={}
# Renderizamos el marcador de puntuaciones en un string html
response_data['html_castigos']=render_to_string("castigos.html",{'all_castigos':all_castigos, 'user_pk': user_pk })
response_data['numero_castigos'] = len(Castigo.objects.filter(receptor=sesion))
return HttpResponse(json.dumps(response_data),content_type="application/json")
else:
return HttpResponse(json.dumps({"nothing to see":"this isn't happening"}),content_type="application/json")
@csrf_exempt
def new_castig(request):
if (request.method=='POST') and request.is_ajax():
# Recojemos el USER_PK introducido
user_pk=request.POST.get('user_pk')
user_pk = int(user_pk)
# Obtenemos el objeto SESION correspondiente al usuario
sesion = Sesion.objects.get(pk=user_pk)
# Escollim receptor entre sesion actives en la BD
# Capturamos todos los objetos Sesion existentes en la base de datos
# Barrera de tiempo que sirve como tope para decir a partir
# de que punto ya no se considera activa la sesion
now = timezone.now()
time_limit_barrier = now - datetime.timedelta(minutes=5)
# Capturamos todos los objetos Sesion existentes en la base de datos
all_sesions = Sesion.objects.filter(last_updated__range=(time_limit_barrier,now)).order_by('-puntos').exclude(pk=sesion.pk)
# Si existe algún otro usuario activo elegimos aleatoriamente a cual le enviamos el castigo
if all_sesions:
num_sesions = len(all_sesions)
receptor_random = randint(0, num_sesions - 1)
receptor_random = all_sesions[receptor_random]
# Busquem un random de 0 a num_sesions i assignem all_sesions[random] a receptor
new_castigo = Castigo(emisor=sesion, receptor=receptor_random)
new_castigo.save()
# Cambiamos las velocidades del usuario actual y del receptor
sesion.reducir_velocidad()
sesion.save()
receptor_random.augmentar_velocidad()
receptor_random.save()
# Datos que mandamos de vuelta al HTML
response_data={}
return HttpResponse(json.dumps(response_data),content_type="application/json")
else:
return HttpResponse(json.dumps({"nothing to see":"this isn't happening"}),content_type="application/json")
class SesionListView(ListView):
model = Sesion
class SesionCreateView(CreateView):
model = Sesion
form_class = SesionForm
class SesionDetailView(DetailView):
model = Sesion
class SesionUpdateView(UpdateView):
model = Sesion
form_class = SesionForm
class CastigoListView(ListView):
model = Castigo
class CastigoCreateView(CreateView):
model = Castigo
form_class = CastigoForm
class CastigoDetailView(DetailView):
model = Castigo
class CastigoUpdateView(UpdateView):
model = Castigo
form_class = CastigoForm
``` |
{
"source": "jonatanskogsfors/fikabot",
"score": 2
} |
#### File: fikabot/fikabot/run_fikabot.py
```python
import os
from fikabot import FikaBot
def main():
bot_id = os.environ.get("BOT_ID")
slack_bot_token = os.environ.get('SLACK_BOT_TOKEN')
FikaBot(bot_id, slack_bot_token).do_your_thing()
if __name__ == "__main__":
main()
``` |
{
"source": "jonatanvm/HI-VAE",
"score": 2
} |
#### File: jonatanvm/HI-VAE/VAE_functions.py
```python
import csv
import numpy as np
import tensorflow as tf
import loglik_models_missing_normalize
def place_holder_types(types_file, batch_size):
# Read the types of the data from the files
with open(types_file) as f:
types_list = [{k: v for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
# Create placeholders for every data type, with appropriate dimensions
batch_data_list = []
for i in range(len(types_list)):
batch_data_list.append(tf.placeholder(tf.float32, shape=(batch_size, types_list[i]['dim'])))
tf.concat(batch_data_list, axis=1)
# Create placeholders for every missing data type, with appropriate dimensions
batch_data_list_observed = []
for i in range(len(types_list)):
batch_data_list_observed.append(tf.placeholder(tf.float32, shape=(batch_size, types_list[i]['dim'])))
tf.concat(batch_data_list_observed, axis=1)
# Create placeholders for the missing data indicator variable
miss_list = tf.placeholder(tf.int32, shape=(batch_size, len(types_list)))
# Placeholder for Gumbel-softmax parameter
tau = tf.placeholder(tf.float32, shape=())
tau2 = tf.placeholder(tf.float32, shape=())
return batch_data_list, batch_data_list_observed, miss_list, tau, tau2, types_list
def batch_normalization(batch_data_list, types_list, miss_list):
normalized_data = []
normalization_parameters = []
for i, d in enumerate(batch_data_list):
# Partition the data in missing data (0) and observed data n(1)
missing_data, observed_data = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
if types_list[i]['type'] == 'real':
# We transform the data to a gaussian with mean 0 and std 1
data_mean, data_var = tf.nn.moments(observed_data, 0)
data_var = tf.clip_by_value(data_var, 1e-6, 1e20) # Avoid zero values
aux_X = tf.nn.batch_normalization(observed_data, data_mean, data_var, offset=0.0, scale=1.0,
variance_epsilon=1e-6)
normalized_data.append(tf.dynamic_stitch(condition_indices, [missing_data, aux_X]))
normalization_parameters.append([data_mean, data_var])
# When using log-normal
elif types_list[i]['type'] == 'pos':
# #We transform the log of the data to a gaussian with mean 0 and std 1
observed_data_log = tf.log(1.0 + observed_data)
data_mean_log, data_var_log = tf.nn.moments(observed_data_log, 0)
data_var_log = tf.clip_by_value(data_var_log, 1e-6, 1e20) # Avoid zero values
aux_X = tf.nn.batch_normalization(observed_data_log, data_mean_log, data_var_log, offset=0.0, scale=1.0,
variance_epsilon=1e-6)
normalized_data.append(tf.dynamic_stitch(condition_indices, [missing_data, aux_X]))
normalization_parameters.append([data_mean_log, data_var_log])
elif types_list[i]['type'] == 'count':
# Input log of the data
aux_X = tf.log(observed_data)
normalized_data.append(tf.dynamic_stitch(condition_indices, [missing_data, aux_X]))
normalization_parameters.append([0.0, 1.0])
else:
# Don't normalize the categorical and ordinal variables
normalized_data.append(d)
normalization_parameters.append([0.0, 1.0]) # No normalization here
return normalized_data, normalization_parameters
def s_proposal_multinomial(X, batch_size, s_dim, tau, reuse):
# We propose a categorical distribution to create a GMM for the latent space z
log_pi = tf.layers.dense(inputs=X, units=s_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05), name='layer_1_' + 'enc_s',
reuse=reuse)
# Gumbel-softmax trick
log_pi_aux = tf.log(tf.clip_by_value(tf.nn.softmax(log_pi), 1e-6, 1))
U = -tf.log(-tf.log(tf.random_uniform([batch_size, s_dim])))
samples_s = tf.nn.softmax((log_pi_aux + U) / tau)
return samples_s, log_pi_aux
def z_proposal_GMM(X, samples_s, batch_size, z_dim, reuse):
# X_in = tf.layers.dense(inputs=X, units=100, activation=tf.nn.tanh,
# kernel_initializer=tf.random_normal_initializer(stddev=0.05), name='layer_0_' + 'mean_enc_z', reuse=reuse)
# We propose a GMM for z
mean_qz = tf.layers.dense(inputs=tf.concat([X, samples_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_enc_z', reuse=reuse)
log_var_qz = tf.layers.dense(inputs=tf.concat([X, samples_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'logvar_enc_z', reuse=reuse)
# Avoid numerical problems
log_var_qz = tf.clip_by_value(log_var_qz, -15.0, 15.0)
# Rep-trick
eps = tf.random_normal((batch_size, z_dim), 0, 1, dtype=tf.float32)
samples_z = mean_qz + tf.multiply(tf.exp(log_var_qz / 2), eps)
return samples_z, [mean_qz, log_var_qz]
def z_proposal_Normal(X, batch_size, z_dim, reuse):
# We propose a GMM for z
mean_qz = tf.layers.dense(inputs=X, units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_enc_z', reuse=reuse)
log_var_qz = tf.layers.dense(inputs=X, units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'logvar_enc_z', reuse=reuse)
# Avoid numerical problems
log_var_qz = tf.clip_by_value(log_var_qz, -15.0, 15.0)
# Rep-trick
eps = tf.random_normal((batch_size, z_dim), 0, 1, dtype=tf.float32)
samples_z = mean_qz + tf.multiply(tf.exp(log_var_qz / 2), eps)
return samples_z, [mean_qz, log_var_qz]
def z_proposal_GMM_factorized(X, samples_s, miss_list, batch_size, z_dim, reuse):
mean_qz = []
log_var_qz = []
for i, d in enumerate(X):
# Partition the data in missing data (0) and observed data n(1)
missing_data, observed_data = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
missing_s, observed_s = tf.dynamic_partition(samples_s, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
# Get the dimensions of the observed data
nObs = tf.shape(observed_data)[0]
# Mean layer
aux_m = tf.layers.dense(inputs=tf.concat([observed_data, observed_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_enc_z' + str(i), reuse=reuse)
# Reconstruct means with zeros (so they don't affect the mean_joint)
aux_mean_qz = tf.dynamic_stitch(condition_indices,
[tf.zeros([batch_size - nObs, z_dim], dtype=tf.float32), aux_m])
# Logvar layers
aux_lv = tf.layers.dense(inputs=tf.concat([observed_data, observed_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'logvar_enc_z' + str(i), reuse=reuse)
# Set a high value to make the variance in the missing cases negligible
aux_log_var_qz = tf.dynamic_stitch(condition_indices, [tf.fill([batch_size - nObs, z_dim], 15.0), aux_lv])
mean_qz.append(aux_mean_qz)
log_var_qz.append(aux_log_var_qz)
# Input prior
log_var_qz.append(tf.zeros([batch_size, z_dim]))
mean_qz.append(tf.zeros([batch_size, z_dim]))
# Compute full parameters, as a product of Gaussians distribution
log_var_qz_joint = -tf.reduce_logsumexp(tf.negative(log_var_qz), 0)
mean_qz_joint = tf.multiply(tf.exp(log_var_qz_joint),
tf.reduce_sum(tf.multiply(mean_qz, tf.exp(tf.negative(log_var_qz))), 0))
# Avoid numerical problems
log_var_qz = tf.clip_by_value(log_var_qz, -15.0, 15.0)
# Rep-trick
eps = tf.random_normal((batch_size, z_dim), 0, 1, dtype=tf.float32)
samples_z = mean_qz_joint + tf.multiply(tf.exp(log_var_qz_joint / 2), eps)
return samples_z, [mean_qz_joint, log_var_qz_joint]
def z_distribution_GMM(samples_s, z_dim, reuse):
# We propose a GMM for z
mean_pz = tf.layers.dense(inputs=samples_s, units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_dec_z', reuse=reuse)
log_var_pz = tf.zeros([tf.shape(samples_s)[0], z_dim])
# Avoid numerical problems
log_var_pz = tf.clip_by_value(log_var_pz, -15.0, 15.0)
return mean_pz, log_var_pz
def y_partition(samples_y, types_list, y_dim_partition):
grouped_samples_y = []
# First element must be 0 and the length of the partition vector must be len(types_dict)+1
if len(y_dim_partition) != len(types_list):
raise Exception("The length of the partition vector must match the number of variables in the data + 1")
# Insert a 0 at the beginning of the cumsum vector
partition_vector_cumsum = np.insert(np.cumsum(y_dim_partition), 0, 0)
for i in range(len(types_list)):
grouped_samples_y.append(samples_y[:, partition_vector_cumsum[i]:partition_vector_cumsum[i + 1]])
return grouped_samples_y
def theta_estimation_from_z(samples_z, types_list, miss_list, batch_size, reuse):
theta = []
# Independet yd -> Compute p(xd|yd)
for i, d in enumerate(types_list):
# Partition the data in missing data (0) and observed data (1)
missing_y, observed_y = tf.dynamic_partition(samples_z, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(samples_z)[0]), miss_list[:, i], num_partitions=2)
nObs = tf.shape(observed_y)[0]
# Different layer models for each type of variable
if types_list[i]['type'] == 'real':
params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'pos':
params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'count':
params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'cat':
params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'ordinal':
params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
theta.append(params)
return theta
def theta_estimation_from_y(samples_y, types_list, miss_list, batch_size, reuse):
theta = []
# Independet yd -> Compute p(xd|yd)
for i, d in enumerate(samples_y):
# Partition the data in missing data (0) and observed data (1)
missing_y, observed_y = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
nObs = tf.shape(observed_y)[0]
# Different layer models for each type of variable
if types_list[i]['type'] == 'real':
params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'pos':
params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'count':
params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'cat':
params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'ordinal':
params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
theta.append(params)
return theta
def theta_estimation_from_ys(samples_y, samples_s, types_list, miss_list, batch_size, reuse):
theta = []
# Independet yd -> Compute p(xd|yd)
for i, d in enumerate(samples_y):
# Partition the data in missing data (0) and observed data (1)
missing_y, observed_y = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
missing_s, observed_s = tf.dynamic_partition(samples_s, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
nObs = tf.shape(observed_y)[0]
# Different layer models for each type of variable
if types_list[i]['type'] == 'real':
# params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_real_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'pos':
# params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_pos_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'count':
# params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_count_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'cat':
# params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_cat_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'ordinal':
# params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_ordinal_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
theta.append(params)
return theta
def theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse):
# Mean layer
h2_mean = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2' + str(i), reuse=reuse, bias=True)
# Sigma Layer
h2_sigma = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2_sigma' + str(i), reuse=reuse, bias=True)
return [h2_mean, h2_sigma]
def theta_real_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i,
reuse):
# Mean layer
h2_mean = observed_data_layer(tf.concat([observed_y, observed_s], 1), tf.concat([missing_y, missing_s], 1),
condition_indices, output_dim=types_list[i]['dim'], name='layer_h2' + str(i),
reuse=reuse, bias=False)
# Sigma Layer
h2_sigma = observed_data_layer(observed_s, missing_s, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2_sigma' + str(i), reuse=reuse, bias=False)
return [h2_mean, h2_sigma]
def theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse):
# Mean layer
h2_mean = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2' + str(i), reuse=reuse, bias=True)
# Sigma Layer
h2_sigma = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2_sigma' + str(i), reuse=reuse, bias=True)
return [h2_mean, h2_sigma]
def theta_pos_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i,
reuse):
# Mean layer
h2_mean = observed_data_layer(tf.concat([observed_y, observed_s], 1), tf.concat([missing_y, missing_s], 1),
condition_indices, output_dim=types_list[i]['dim'], name='layer_h2' + str(i),
reuse=reuse, bias=False)
# Sigma Layer
h2_sigma = observed_data_layer(observed_s, missing_s, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2_sigma' + str(i), reuse=reuse, bias=False)
return [h2_mean, h2_sigma]
def theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse):
# Lambda Layer
h2_lambda = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2' + str(i), reuse=reuse, bias=True)
return h2_lambda
def theta_count_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i,
reuse):
# Lambda Layer
h2_lambda = observed_data_layer(tf.concat([observed_y, observed_s], 1), tf.concat([missing_y, missing_s], 1),
condition_indices, output_dim=types_list[i]['dim'], name='layer_h2' + str(i),
reuse=reuse, bias=False)
return h2_lambda
def theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse):
# Log pi layer, with zeros in the first value to avoid the identificability problem
h2_log_pi_partial = observed_data_layer(observed_y, missing_y, condition_indices,
output_dim=int(types_list[i]['dim']) - 1, name='layer_h2' + str(i),
reuse=reuse, bias=True)
h2_log_pi = tf.concat([tf.zeros([batch_size, 1]), h2_log_pi_partial], 1)
return h2_log_pi
def theta_cat_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i,
reuse):
# Log pi layer, with zeros in the first value to avoid the identificability problem
h2_log_pi_partial = observed_data_layer(tf.concat([observed_y, observed_s], 1),
tf.concat([missing_y, missing_s], 1), condition_indices,
output_dim=int(types_list[i]['dim']) - 1, name='layer_h2' + str(i),
reuse=reuse, bias=False)
h2_log_pi = tf.concat([tf.zeros([batch_size, 1]), h2_log_pi_partial], 1)
return h2_log_pi
def theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse):
# Theta layer, Dimension of ordinal - 1
h2_theta = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=int(types_list[i]['dim']) - 1,
name='layer_h2' + str(i), reuse=reuse, bias=True)
# Mean layer, a single value
h2_mean = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=1,
name='layer_h2_sigma' + str(i), reuse=reuse, bias=True)
return [h2_theta, h2_mean]
def theta_ordinal_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs, batch_size, i,
reuse):
# Theta layer, Dimension of ordinal - 1
h2_theta = observed_data_layer(observed_s, missing_s, condition_indices, output_dim=int(types_list[i]['dim']) - 1,
name='layer_h2' + str(i), reuse=reuse, bias=False)
# Mean layer, a single value
h2_mean = observed_data_layer(tf.concat([observed_y, observed_s], 1), tf.concat([missing_y, missing_s], 1),
condition_indices, output_dim=1, name='layer_h2_sigma' + str(i), reuse=reuse,
bias=False)
return [h2_theta, h2_mean]
def observed_data_layer(observed_data, missing_data, condition_indices, output_dim, name, reuse, bias):
# Train a layer with the observed data and reuse it for the missing data
obs_output = tf.layers.dense(inputs=observed_data, units=output_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05), name=name, reuse=reuse,
trainable=True, use_bias=bias)
miss_output = tf.layers.dense(inputs=missing_data, units=output_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05), name=name, reuse=True,
trainable=False, use_bias=bias)
# Join back the data
output = tf.dynamic_stitch(condition_indices, [miss_output, obs_output])
return output
def loglik_evaluation(batch_data_list, types_list, miss_list, theta, tau2, normalization_params, reuse):
log_p_x = []
log_p_x_missing = []
samples_x = []
params_x = []
# Independet yd -> Compute log(p(xd|yd))
for i, d in enumerate(batch_data_list):
# Select the likelihood for the types of variables
loglik_function = getattr(loglik_models_missing_normalize, 'loglik_' + types_list[i]['type'])
out = loglik_function([d, miss_list[:, i]], types_list[i], theta[i], normalization_params[i], tau2,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_mean_dec_x' + str(i), reuse=reuse)
log_p_x.append(out['log_p_x'])
log_p_x_missing.append(out['log_p_x_missing']) # Test-loglik element
samples_x.append(out['samples'])
params_x.append(out['params'])
return log_p_x, log_p_x_missing, samples_x, params_x
``` |
{
"source": "jonatanvm/nlp-project",
"score": 3
} |
#### File: nlp-project/notebooks/plotting.py
```python
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
def TSNE_scatterplot(model, word, neg_word, save=False):
close_words = np.array(model.wv.most_similar([word]))
neg_words = np.array(model.wv.most_similar(negative=[neg_word]))
arrays = np.vstack((model.wv[[word]], model.wv[close_words[:, 0]], model.wv[neg_words[:, 0]]))
word_labels = [word] + list(close_words[:, 0]) + list(neg_words[:, 0])
color_list = ['blue'] * len(close_words) + ['green'] * len(neg_words)
Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(arrays)
x = Y[1:, 0]
y = Y[1:, 1]
plt.figure(figsize=(11, 4))
plt.scatter(Y[0, 0], Y[0, 1], marker="*", c='blue', s=120)
plt.scatter(x, y, c=color_list)
plt.grid()
annotations = []
for line in range(len(word_labels)):
annotations.append(plt.text(Y[line, 0], Y[line, 1], word_labels[line].title()))
if save:
plt.savefig(f"{str(model)}.png", bbox_inches='tight')
``` |
{
"source": "jonatanwestholm/garageofcode",
"score": 2
} |
#### File: garageofcode/other/xkcd730.py
```python
import time
import numpy as np
import networkx as nx
#from garageofcode.mip.solver import get_solver
from sentian_miami import get_solver
def get_xkcd730_graph():
G = nx.DiGraph()
edges = [(0, 1), (0, 7), (0, 9), (0, 10),
(1, 7), (1, 3), (1, 6),
(3, 4), (3, 5),
(4, 5), (4, 9),
(5, 8),
(6, 7), (6, 8),
(7, 8), (7, 9), (7, 12), (7, 13), (7, 14),
(8, 14),
(9, 10), (9, 11),
(10, 11),
(11, 12), (11, 13),
(12, 13),
(13, 14)
]
G.add_edges_from(edges)
return G
def get_simple_graph():
G = nx.DiGraph()
edges = [(0, 1), (0, 2),
(1, 2), (1, 3),
(2, 3),
]
G.add_edges_from(edges)
return G
def get_potentials(G, s, t):
# It's not necessary to use a MIP solver for this;
# it's just a linear equation system.
# But it makes for a simple formulation.
solver = get_solver("mono")
t0 = time.time()
potentials = {node: solver.NumVar(lb=0) for node in G}
currents = {e: solver.NumVar() for e in G.edges}
# Edge conditions
U0, U_1 = 1, 0
total_potential = U0 - U_1
solver.Add(potentials[s] == U0)
solver.Add(potentials[t] == U_1)
# Kirchoff's law: current is preserved in internal nodes
for node in G:
in_current = solver.Sum([currents[e] for e in G.in_edges(node)])
out_current = solver.Sum([currents[e] for e in G.out_edges(node)])
if node == s:
total_in = out_current - in_current
elif node == t:
total_out = in_current - out_current
else:
solver.Add(in_current == out_current)
# Ohm's law: delta U = I * R
for e in G.edges:
i, j = e
Ui = potentials[i]
Uj = potentials[j]
Iij = currents[e]
Rij = 1 # ignore resistance parameter for now
solver.Add(Ui - Uj == Rij * Iij)
t1 = time.time()
print("Build time: {0:.3f}".format(t1 - t0))
solver.Solve(time_limit=10, verbose=True)
total_current = solver.solution_value(total_in)
total_resistance = total_potential / total_current
print("Total resistance: {0:.3f}".format(total_resistance))
print("Total current: {0:.3f}".format(total_current))
for node, potential in sorted(potentials.items()):
print("{0:d} {1:.3f}".format(node, solver.solution_value(potential)))
def main():
np.random.seed(0)
G = get_xkcd730_graph()
#G = get_simple_graph()
get_potentials(G, 0, max(G))
if __name__ == '__main__':
main()
``` |
{
"source": "jonatanwestholm/sugarrush",
"score": 3
} |
#### File: sugarrush/sugarrush/solver.py
```python
from pysat.solvers import Solver
from pysat.card import CardEnc, EncType, ITotalizer
from pysat.formula import CNF
from sugarrush.utils import flatten_simple as flatten
from sugarrush.utils import dbg, a_eq_i, is_iter
class SugarRush(Solver):
"""
Quality-of-life wrapper for pysat.solvers.Solver
* Does automatic bookkeeping of literals.
* When calling constraint builders, new literals are assigned,
that do not interfere with existing literals.
* New literals can also be created and accessed by :meth:`var`.
* After solving, the solution value for a given var,
can be obtained by :meth:`solution_value`.
* Constraint builders return CNF's,
but do not add them automatically to the model.
"""
def __init__(self, name="glucose4"):
super().__init__(name=name)
self.var2val = {}
self.lits = set([0])
def var(self):
"""
**Added in SugarRush**\n
Return a new unused variable.
"""
self.lits.add(self._top_id() + 1)
return self._top_id()
def add(self, c):
"""
**Added in SugarRush**\n
If c is iterable of iterable of ints, then interpret as CNF.
If c is iterable of ints (simple list of literals),
then interpet as single clause.\n
Simple list of literals:
.. code-block:: python
>>> from sugarrush.solver import SugarRush
>>> with SugarRush() as solver:
X = [solver.var() for _ in range(6)]
solver.add(X)
solver.solve()
print(solver.solution_values(X))
[1, 0, 0, 0, 0, 0]
List of list of literals:
.. code-block:: python
>>> from sugarrush.solver import SugarRush
>>> with SugarRush() as solver:
X = [solver.var() for _ in range(6)]
solver.add([X])
solver.solve()
print(solver.solution_values(X))
[1, 0, 0, 0, 0, 0]
Normal CNF:
.. code-block:: python
>>> from sugarrush.solver import SugarRush
>>> with SugarRush() as solver:
X = [solver.var() for _ in range(6)]
solver.add([X[:3], X[3:]])
solver.solve()
print(solver.solution_values(X))
[1, 0, 0, 1, 0, 0]
"""
for elem in c:
try:
iter(elem)
except TypeError:
self._add(c) # c is list of ints
break
self._add(*c) # c is list of lists of ints
break
def _add(self, *clauses):
for clause in clauses:
self._add_clause(clause)
def _add_clause(self, clause):
self._add_lits(clause)
self.add_clause(clause)
def _add_lits(self, lits):
"""
**Added in SugarRush**\n
Update the internal set of literals.
"""
for lit in lits:
self.lits.add(abs(lit))
def _add_lits_from(self, cnf):
"""
**Added in SugarRush**\n
Update the internal set of literals from a CNF.
"""
self._add_lits(flatten(cnf))
def _top_id(self):
"""
**Added in SugarRush**\n
Return the largest valued literal in use by the model.
"""
return max(self.lits)
def _init_var2val(self):
"""
**Added in SugarRush**\n
Initialize a mapping to the solved values. The mapping
is such that **var2val[var]** has the same boolean value as
:param:`var` in the satisfying assignment.
"""
for val in self.get_model():
if abs(val) in self.lits:
self.var2val[abs(val)] = (val > 0) * 1 # 1-indexed
def solve(self, **kwargs):
ret = super().solve(**kwargs)
self.solver_called = True
return ret
def solution_value(self, var):
"""
**Added in SugarRush**\n
Get solved value of **var**. Must not be run before successful solve.
"""
try:
_ = self.solver_called
except AttributeError:
raise TypeError("Solver.solution_value() called before model solved")
if (not self.var2val) or self.solver_called:
self._init_var2val()
self.solver_called = False
if var not in self.var2val:
return 0
else:
return self.var2val[var]
def solution_values(self, variables):
"""
**Added in SugarRush**\n
List version of :meth:`solution_value`.
"""
return [self.solution_value(var) for var in variables]
def print_stats(self):
"""
**Added in SugarRush**\n
Print number of variables and number of clauses used by the solver.
"""
print("Nof variables:", self.nof_vars())
print("Nof clauses:", self.nof_clauses())
def print_values(self):
"""
**Added in SugarRush**\n
Print full mapping from vars to boolean values
"""
for var, val in sorted(self.var2val.items()):
print("{}: {}".format(var, val))
"""
Constructs
"""
def equals(self, lits, bound=1, encoding=EncType.seqcounter):
"""
**Added in SugarRush**\n
Uses :meth:`pysat.card.CardEnc.equals`.
Adds automatic bookkeeping of literals.
"""
cnf = CardEnc.equals(lits=lits,
bound=bound,
encoding=encoding,
top_id=self._top_id())
clauses = cnf.clauses
self._add_lits_from(clauses)
return clauses
def atmost(self, lits, bound=1, encoding=EncType.seqcounter):
"""
**Added in SugarRush**\n
Uses :meth:`pysat.card.CardEnc.atmost`.
Adds automatic bookkeeping of literals.
"""
cnf = CardEnc.atmost(lits=lits,
bound=bound,
encoding=encoding,
top_id=self._top_id())
clauses = cnf.clauses
self._add_lits_from(clauses)
return clauses
#self.add(clauses)
#return cnf.clauses
def negate(self, clauses):
"""
**Added in SugarRush**\n
Uses :meth:`pysat.formula.CNF.negate`.
Adds automatic bookkeeping of literals.
"""
cnf = CNF(from_clauses=clauses)
neg = cnf.negate(topv=self._top_id())
neg_clauses = neg.clauses
self._add_lits_from(neg_clauses)
#neg_force = [[-auxvar] for auxvar in neg.auxvars]
#print(neg_force)
#self.add(neg_force)
#print(neg.auxvars)
#self.add([neg.auxvars])
return neg_clauses
def int2binvec(solver, x, N):
"""
**Added in SugarRush**\n
Given an integer, return an N-length binary vector
and clauses equal to that integer.
"""
if is_iter(x):
return x, []
else:
i = x
x = [solver.var() for _ in range(N)]
return x, a_eq_i(x, i)
def xor(self, x1, x2):
"""
**Added in SugarRush**\n
Returns an indicator t <=> xor(x1, x2),
and clauses.
Adds automatic bookkeeping of literals.
"""
t = self.var()
clauses = [[-t, x1, x2], [-t, -x1, -x2], [t, x1, -x2], [t, -x1, x2]]
return t, clauses
def parity(self, X):
"""
**Added in SugarRush**\n
Returns an indicator t, for whether the
sum of X is even (t=0) or odd (t=1).
Adds automatic bookkeeping of literals.
"""
if len(X) == 0:
raise ValueError("Cannot take parity of zero variables")
clauses = []
t = X[0]
for x in X[1:]:
t, c = self.xor(x, t)
clauses.extend(c)
return t, clauses
def less(self, a, b):
return self.less_(a, b, strict=True)
def leq(self, a, b):
return self.less_(a, b, strict=False)
def less_(self, a, b, strict):
"""
**Added in SugarRush**\n
Return indicator and constraints for a less than b.
if strict: a < b
if not strict: a <= b
Adds automatic bookkeeping of literals.
"""
if is_iter(a):
N = len(a)
else:
N = len(b) # b better be iterable then
a, cnfa = self.int2binvec(a, N)
b, cnfb = self.int2binvec(b, N)
print(cnfa, cnfb)
cnf = cnfa + cnfb
assert len(a) == len(b)
last_iteration = len(a) - 1
ti_1 = None # t(i - 1)
for iteration, (ai, bi) in enumerate(zip(a, b)):
# The t's indicate that given the current assumptions
# about the literals, the constraint is already fulilled.
# If ti becomes true anywhere,
# then this will propagate to all subsequent clauses,
# and pop them.
if ti_1 is None:
already_smaller = [[-ai], [bi]]
else:
already_smaller = [[ti_1, -ai], [ti_1, bi]]
ti, ti_bind = self.indicator(already_smaller)
cnf.extend(ti_bind)
if iteration is last_iteration and strict:
pass
elif iteration is last_iteration and not strict:
ti, ti_bind = self.indicator([[ti, -ai, bi]])
cnf.extend(ti_bind)
else:
cnf.append([ti, -ai, bi]) # ti OR (ai <= bi) == (ti OR !ai OR bi)
ti_1 = ti
return ti, cnf
def plus(self, a, b, z):
"""
**Added in SugarRush**\n
Constrains
z = (a + b) % 2**N
N == len(a) == len(b) == len(z)
for the inputs that are binary vectors,
integer inputs are converted to binary vectors.
In other words, uintN addition.
The leftmost bit is assumed to be the highest bit.
"""
if is_iter(a):
N = len(a)
elif is_iter(b):
N = len(b)
else:
N = len(z)
a, cnfa = self.int2binvec(a, N)
b, cnfb = self.int2binvec(b, N)
z, cnfz = self.int2binvec(z, N)
assert len(a) == len(b) == len(z)
cnf = cnfa + cnfb + cnfz
return cnf + self.plus_(a, b, z)
def plus_(self, a, b, z):
"""
**Added in SugarRush**\n
Internal method
Constrains
z = (a + b) % 2**N
N == len(a) == len(b) == len(z)
In other words, uintN addition.
The leftmost bit is assumed to be the highest bit.
"""
cnf = []
carry = None
for ap, bp, zp in zip(a[::-1], b[::-1], z[::-1]):
if carry is None:
t, t_bind = self.parity([ap, bp])
carry = self.var()
cnf.extend([[-carry, ap], [-carry, bp], [carry, -ap, -bp]]) # carry == ap AND bp
else:
t, t_bind = self.parity([ap, bp, carry])
carry_1 = self.var()
cnf.extend([[carry_1, -ap, -bp], [carry_1, -ap, -carry], [carry_1, -bp, -carry],
[-carry_1, ap, bp], [-carry_1, ap, carry], [-carry_1, bp, carry]])
# carry_1 == (ap + bp + carry >= 2)
carry = carry_1
cnf.extend(t_bind)
cnf.extend([[zp, -t], [-zp, t]]) # zp == t
return cnf
def element(self, v, a, z):
cnf = []
try:
K = len(a)
except TypeError:
# the given a is an integer
i = a
assert i < len(v), "list index out of range"
K = 0
while 2**K < len(v):
K += 1
a = [self.var() for _ in range(K)]
cnf.extend(a_eq_i(a, i))
return cnf + self.element_(v, a, z)
def element_(self, v, a, z):
"""Constrain
z = v[a]
where a is uintK,
z is uintN,
v is a vector of at most 2**K uintN
"""
assert len(v) <= 2**len(a)
assert all([len(vi) == len(z) for vi in v])
cnf = []
for i, vi in enumerate(v):
a_eq_i_clauses = a_eq_i(a, i)
ti, ti_bind = self.indicator(a_eq_i_clauses)
cnf.extend(ti_bind)
for vij, zj in zip(vi, z):
# if ti is true then vij == zj
cnf.extend([[-ti, -vij, zj], [-ti, vij, -zj]])
return cnf
def indicator(self, cnf):
"""
**Added in SugarRush**\n
Uses Tseytin transformation to create a variable that has the
same boolean value as the given CNF.
Does automatic bookkeeping of literals.
Creates len(cnf) + 1 new variables
Return indicator variable, and the equivalence clauses
"""
indicators = []
clauses = []
for clause in cnf:
p, equivalence = self.indicate_disjunction(clause)
indicators.append(p)
clauses.extend(equivalence)
p, equivalence = self.indicate_conjunction(indicators)
clauses.extend(equivalence)
return p, clauses
def indicate_disjunction(self, clause):
"""
**Added in SugarRush**\n
p <=> (c1 OR c2 OR ... OR cn)
"""
if len(clause) == 1:
return clause[0], []
p = self.var()
right_imp = [clause + [-p]] # p => (c1 OR c2 OR ... OR cn)
left_imp = [[-c, p] for c in clause] # (c1 OR c2 OR ... OR cn) => p
equivalence = right_imp + left_imp
return p, equivalence
def indicate_conjunction(self, clause):
"""
**Added in SugarRush**\n
p <=> (c1 AND c2 AND ... AND cn)
"""
p = self.var()
right_imp = [[-p, c] for c in clause] # p => (c1 AND c2 AND ... AND cn)
left_imp = [[-c for c in clause] + [p]] # (c1 AND c2 AND ... AND cn) => p
equivalence = right_imp + left_imp
return p, equivalence
def disjunction(self, cnfs):
"""
**Added in SugarRush**\n
Uses :meth:`indicator` to create a CNF that has the same boolean value
as the disjunction of a given set of CNF's.
Does automatic bookkeeping of literals.
"""
inds = []
clauses = []
for cnf in cnfs:
p, equiv = self.indicator(cnf)
inds.append(p)
clauses.extend(equiv)
clauses.append(inds)
return clauses
def itotalizer(self, lits, ubound=None):
"""
**Added in SugarRush**\n
Uses :meth:`pysat.card.ITotalizer`.
Adds automatic bookkeeping of literals.
"""
if ubound is None:
ubound = len(lits)
itot = ITotalizer(lits, ubound)
clauses = itot.cnf.clauses
bound_vars = itot.rhs
self._add_lits_from(clauses)
return clauses, bound_vars
def optimize(self, itot, search="linear"):
if search == "linear":
return self.optimize_linear(itot)
elif search == "binary":
return self.optimize_binary(itot)
else:
raise Exception("Unknown search method!")
def optimize_linear(self, itot):
self.print_stats()
ub = len(itot) - 1
if not self.solve(assumptions=[-itot[ub]]):
return None
ub -= 1
while ub >= 0:
print("ub:", ub)
if not self.solve(assumptions=[-itot[ub]]):
print("returning:", ub + 1)
return ub + 1
else:
ub -= 1
return 0
def optimize_binary(self, itot, debug=False):
"""
**Added in SugarRush**\n
Uses binary search to find the smallest satisfiable value for the ITotalizer.
Assumes that satisfiability is monotonically increasing.
"""
upper = len(itot) - 1 # smallest known to be feasible
lower = 0 # largest known to be infeasible (after initial check)
if not self.solve(assumptions=[-itot[upper]]):
return None
if self.solve(assumptions=[-itot[lower]]):
return 0
while True:
mid = (upper + lower) // 2
dbg("upper: %d" % upper, debug)
dbg("mid: %d" % mid, debug)
dbg("lower: %d" % lower, debug)
if mid == lower:
break
satisfiable = self.solve(assumptions=[-itot[mid]])
dbg("satisfiable: %d" % satisfiable, debug)
if satisfiable:
upper = mid
else:
lower = mid
dbg("", debug)
self.solve(assumptions=[-itot[upper]])
return upper
``` |
{
"source": "jonatas2014/simple_chat_with_Blowfish",
"score": 4
} |
#### File: jonatas2014/simple_chat_with_Blowfish/criptography.py
```python
from Crypto.Cipher import Blowfish
DEFAULT_KEY = b"You are worthless but I like you"
ENCONDING_FORMAT = 'UTF-8'
class BlowfishCriptography:
def __init__(self, key):
self.cipher = Blowfish.new(key)
def __prepare_for_encryption(self, text):
"""
Ensures the message length is multiple of 8
:param text: The content to be prepared for encryption
:type: str
:return: The content padded with trailing space characters
"""
return text.ljust(8 * (len(text) // 8 + 1))
def encrypt(self, plain_text):
"""
Encrypts the text
:param plain_text: The text to be encrypted
:type: str
:return: Encrypted text using blowfish algorithm
"""
plain_text = self.__prepare_for_encryption(plain_text)
return self.cipher.encrypt(plain_text)
def decrypt(self, cipher_text):
"""
Decrypts the text. Also, decodes the decrypted text to UTF-8 and removes
the trailing space characters
:param cipher_text: The ciphered text to be decrypted
:type: str
:return: Decrypted text which was encrypted using Blowfish algorithm
"""
decrypted_text = self.cipher.decrypt(cipher_text).decode(ENCONDING_FORMAT)
decrypted_text = decrypted_text.strip()
return decrypted_text
if __name__ == '__main__':
message = 'Alone on a Saturday night? God, you\'re pathetic'
cipher = BlowfishCriptography(DEFAULT_KEY)
encrypted_text = cipher.encrypt(message)
decrypted_text = cipher.decrypt(encrypted_text)
assert message == decrypted_text, 'Message are not equal after apply encrypt and decrypt methods'
``` |
{
"source": "jonatasbaldin/pygoqrme",
"score": 3
} |
#### File: pygoqrme/pygoqrme/pygoqrme.py
```python
import requests, re, os
from urllib.parse import quote
class Api(object):
"""
Main class
"""
def __init__(self,
size='200x200',
charset_source='UTF-8',
charset_target='UTF-8',
ecc='L',
color='0-0-0',
bgcolor='255-255-255',
margin=1,
qzone=0,
fmt='png'):
"""
Define common parameters to all QRCode types
"""
self.size = size
self.charset_source = charset_source
self.charset_target = charset_target
self.ecc = ecc
self.color = color
self.bgcolor = bgcolor
self.margin = margin
self.qzone = qzone
self.fmt = fmt
def text(self, text):
"""
Creates QRCode with simple text
Parameters:
data: text to be created
"""
return self.req(text)
def url(self, url):
"""
Creates QRCode with url
Parameters:
url: url to be created
"""
# Django's Validator (modified)
url_regex = re.compile(
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# test if its url
# and if it is, encoded it
data_url = re.findall(url_regex, url)
if data_url.__len__() != 0:
url = quote(url)
else:
return 'Inavlid URL'
return self.req(url)
def wifi(self, ssid, security, password):
"""
Creates QRCode to WiFi connection
Parameters:
ssid: wifi connection name
security: wifi security, WEP or WPA
password: wifi password
"""
data = 'WIFI:S:{};T:{};P:{};;'.format(ssid, security, password)
return self.req(data)
def geo(self, latitude, longitude, distance):
"""
Creates QRCode with geolocation
Parameters:
latitude: specifies latitude
longitude: specifies longitude
distance: specifies distance to map
"""
data = 'geo:{},{},{}'.format(latitude, longitude, distance)
return self.req(data)
def call(self, phone):
"""
Creates QRCode with a number to call
Parameters:
phone: number to call
"""
data = 'tel:{}'.format(phone)
return self.req(data)
def sms(self, phone, message=''):
"""
Creates QRCode with message to be sent via sms
Parameters:
phone: number to send
message: sms message to send
"""
data = 'SMSTO:{}:{}'.format(phone, message)
return self.req(data)
def mail(self, email, subject, body):
"""
Creates QRCode to send an email
Parameters:
email: destination email
subject: email subject
body: email body
"""
data = 'mailto:{}?subject={}&body={}'.format(email, subject, body)
return self.req(data)
def save(self, filename):
"""
Saves the QRCode to a file
Parameters:
filename: file to be saved, if it has no extensions,
automatically adds it
"""
# if file has no extension, add extension to it
if not os.path.splitext(filename)[-1]:
filename = filename + '.' + self.fmt
# writes qrcode to file
with open(filename, 'bw') as f:
f.write(self.qrcode)
def req(self, data):
"""
Makes the requests
Parameters:
data: data to be sent to goqr.me
"""
url = 'https://api.qrserver.com/v1/create-qr-code/?data={}&size={}&charset-source={}&charset_target={}&ecc={}&color={}&bgcolor={}&margin={}&qzone={}&format={}'.format(
data, self.size, self.charset_source, self.charset_target, self.ecc, self.color, self.bgcolor, self.margin, self.qzone, self.fmt)
qr_request = requests.get(url)
self.qrcode = qr_request.content
return self.qrcode
``` |
{
"source": "jonatasbaldin/wsquiz",
"score": 3
} |
#### File: wsquiz/quiz/tests.py
```python
import json
from channels import Channel, Group
from channels.test import ChannelTestCase, WSClient
from quiz import models
class ChannelTests(ChannelTestCase):
def _setup_player(self, name='Player'):
return models.Player.objects.create(name=name)
def _setup_question(self, text='My Question'):
return models.Question.objects.create(text=text)
def _setup_choice(self, question, text='My Choice', right_answer=True):
return models.Choice.objects.create(
question=question,
text=text,
right_answer=right_answer,
)
def _setup_answer(self, question, choice, player):
return models.QuestionChoicePlayer.objects.create(
question=question,
choice=choice,
player=player
)
def test_top_players(self):
player = self._setup_player()
question = self._setup_question()
choice = self._setup_choice(question=question)
Group('top_players').add('test_channel')
self._setup_answer(
player=player,
question=question,
choice=choice
)
top_players = models.Player.top_players()
result = self.get_next_message('test_channel', require=True)
result = json.loads(result['text'])
self.assertEqual(result[0]['name'], top_players[0].name)
self.assertEqual(result[0]['right_answers'], top_players[0].right_answers)
self.assertEqual(result[0]['wrong_answers'], top_players[0].wrong_answers)
self.assertEqual(result[0]['time_playing'], top_players[0].time_playing)
def test_send_answer_no_more_question(self):
player = self._setup_player()
question = self._setup_question()
choice = self._setup_choice(question=question)
text = {
'player': player.name,
'question_id': question.id,
'choice_id': choice.id,
}
client = WSClient()
client.send_and_consume('websocket.receive', text=text)
result = client.receive()
self.assertFalse(result['next_question'])
self.assertTrue(result['game_finished'])
self.assertTrue(result['last_right_answer'], choice.text)
self.assertEqual(result['time_playing'], player.time_playing)
def test_send_answer_with_more_question(self):
player = self._setup_player()
question_one = self._setup_question()
question_two = self._setup_question()
choice_one = self._setup_choice(question=question_one)
choice_two = self._setup_choice(question=question_two)
text = {
'player': player.name,
'question_id': question_one.id,
'choice_id': choice_one.id,
}
client = WSClient()
client.send_and_consume('websocket.receive', text=text)
result = client.receive()
self.assertTrue('question' in result['next_question'])
self.assertEqual(result['next_question']['question'], question_two.text)
self.assertEqual(result['next_question']['choices'][0]['text'], choice_two.text)
self.assertTrue(result['last_right_answer'], choice_one.text)
self.assertFalse(result['game_finished'])
``` |
{
"source": "jonatascastro12/open-brqq",
"score": 2
} |
#### File: open-brqq/main/models.py
```python
from django.db import models
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=255)
uf = models.CharField(max_length=2)
region = models.CharField(max_length=30, null=True, blank=True)
video = models.ForeignKey('main.GloboVideo', on_delete=models.SET_NULL, null=True, blank=True)
treated_subtitle_part = models.TextField(null=True, blank=True)
content1 = models.TextField(null=True, blank=True)
content2 = models.TextField(null=True, blank=True)
content3 = models.TextField(null=True, blank=True)
def has_treated_subtitle(self):
return self.treated_subtitle_part is not None
has_treated_subtitle.short_description = 'Has Treated Subtitle?'
has_treated_subtitle.boolean = True
def has_analysis(self):
return self.keywordanalysis_set.count() > 0
has_analysis.short_description = 'Has Analysis?'
has_analysis.boolean = True
def __str__(self):
return str(self.name + '-' + self.uf)
class Meta:
ordering = ['uf', 'name']
class GloboVideo(models.Model):
title = models.CharField(max_length=255)
description = models.CharField(max_length=255)
globo_id = models.IntegerField(null=True, blank=True)
video_info = models.TextField(null=True, blank=True)
exhibited_at = models.DateField(null=True, blank=True)
subtitle_url = models.CharField(max_length=255)
subtitle_content = models.TextField(null=True, blank=True)
subtitle_cleaned_content = models.TextField(null=True, blank=True)
error_on_clean = models.BooleanField(default=False)
def __str__(self):
return str(self.globo_id)
def has_subtitle(self):
return self.subtitle_url != ''
has_subtitle.short_description = 'Has Subtitle?'
has_subtitle.boolean = True
```
#### File: open-brqq/main/tasks.py
```python
from collections import Counter
import os
from celery import shared_task
import json
import requests
import re
from main.models import City, GloboVideo
# Imports the Google Cloud client library
# @shared_task(bind=True, name='Video crawler')
def run_crawler(self):
response = requests.get('https://especiais.g1.globo.com/o-brasil-que-eu-quero/2018/videos/js/data.json')
data = response.json()
i = 0
municipios = data.get('municipios')
for mun in municipios:
i = i + 1
# self.update_state(state='PROGRESS',
# meta={'current': i, 'total': len(municipios)})
city = City.objects.get_or_create(name=mun.get('municipio'), uf=mun.get('estado'))[0]
if (mun.get('video')):
try:
if not city.video:
video = GloboVideo.objects.filter(globo_id=mun.get('video')).first()
if video:
city.video = video
city.save()
continue
response2 = requests.get(
'https://api.globovideos.com/videos/%s/playlist/callback/wmPlayerPlaylistLoaded%s' % (
mun.get('video'), mun.get('video')))
videoInfo = response2.text
result = re.search('wmPlayerPlaylistLoaded[0-9]+\((.+)\)\;', videoInfo)
videoInfo = result.group(1)
videoInfo = json.loads(videoInfo).get('videos')[0]
video = GloboVideo.objects.create(title=videoInfo.get('title'),
globo_id=videoInfo.get('id'),
video_info=json.dumps(videoInfo),
description=videoInfo.get('description'),
exhibited_at=videoInfo.get('exhibited_at'),
)
city.video = video
city.save()
subtitles = [r for r in videoInfo.get('resources') if r.get('type') == 'subtitle']
if len(subtitles) > 0 and subtitles[0].get('url'):
sub_url = subtitles[0].get('url')
video.subtitle_url = sub_url
response3 = requests.get(sub_url)
video.subtitle_content = response3.content.decode('utf8')
video.save()
except Exception:
print('error', mun.get('video'))
``` |
{
"source": "jonatascs/labdata-tcc",
"score": 3
} |
#### File: data_scraping/spiders/pkm.py
```python
from typing import Any, Callable
import scrapy
class PkmSpider(scrapy.Spider):
name = 'pkm'
allowed_domains = ['pokemondb.net']
start_urls = ['http://pokemondb.net/pokedex/bulbasaur']
def parse(self, response):
name = response.xpath('//h1/text()').extract_first()
number = response.xpath('//*[@class="vitals-table"]//tbody/tr/td/strong/text()').extract_first()
pktype = response.xpath('//*[@class="vitals-table"]/tbody/tr/td')[1].xpath('a/text()').extract()
pkspecies = response.xpath('//*[@class="vitals-table"]/tbody/tr/td')[2].xpath('text()').extract()
pkheight = response.xpath('//*[@class="vitals-table"]/tbody/tr/td')[3].xpath('text()').extract()
pkweight = response.xpath('//*[@class="vitals-table"]/tbody/tr/td')[4].xpath('text()').extract()
pkabilities = response.xpath('//*[@class="vitals-table"]/tbody/tr/td')[4] \
.xpath('//*[@class="text-muted"]/a/text()').extract()
# Base stats
pkhp = response.xpath('//*[@class="vitals-table"]')[1].xpath('//*[@class="cell-num"]')[0] \
.xpath('text()').extract()
pkattack = response.xpath('//*[@class="vitals-table"]')[1].xpath('//*[@class="cell-num"]')[3] \
.xpath('text()').extract()
pkdefense = response.xpath('//*[@class="vitals-table"]')[1].xpath('//*[@class="cell-num"]')[6] \
.xpath('text()').extract()
pkspatk = response.xpath('//*[@class="vitals-table"]')[1].xpath('//*[@class="cell-num"]')[9] \
.xpath('text()').extract()
pkspdef = response.xpath('//*[@class="vitals-table"]')[1].xpath('//*[@class="cell-num"]')[12] \
.xpath('text()').extract()
pkspeed = response.xpath('//*[@class="vitals-table"]')[1].xpath('//*[@class="cell-num"]')[15] \
.xpath('text()').extract()
dict_stats = {"hp": pkhp,
"Attack": pkattack,
"pkdefense": pkdefense,
"SpAtk": pkspatk,
"pkspdef": pkspdef,
"Speed": pkspeed, }
# Type defenses
nor = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[0].xpath('text()').extract()
fir = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[1].xpath('text()').extract()
wat = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[2].xpath('text()').extract()
ele = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[3].xpath('text()').extract()
gra = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[4].xpath('text()').extract()
ice = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[5].xpath('text()').extract()
fig = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[6].xpath('text()').extract()
poi = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[7].xpath('text()').extract()
gro = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[8].xpath('text()').extract()
fly = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[9].xpath('text()').extract()
psy = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[10].xpath('text()').extract()
bug = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[11].xpath('text()').extract()
roc = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[12].xpath('text()').extract()
gho = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[13].xpath('text()').extract()
dra = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[14].xpath('text()').extract()
dar = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[15].xpath('text()').extract()
ste = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[16].xpath('text()').extract()
fai = response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[17].xpath('text()').extract()
dict_defenses = {"nor": nor,
"fir": fir,
"wat": wat,
"ele": ele,
"gra": gra,
"ice": ice,
"fig": fig,
"poi": poi,
"gro": gro,
"fly": fly,
"psy": psy,
"bug": bug,
"roc": roc,
"gho": gho,
"dra": dra,
"dar": dar,
"ste": ste,
"fai": fai}
# recuperar propriedade title
# response.xpath('//*[@class="type-table type-table-pokedex"]/tr/td')[0].xpath('@title').extract()
yield {
"name": name,
"number": number,
"type": pktype,
"species": pkspecies,
"height": pkheight,
"weight": pkweight,
"abilities": pkabilities,
"stats": dict_stats,
"defenses": dict_defenses
}
next_page_url = response.xpath('//*[@class="entity-nav-next"]/@href').extract_first()
absolute_next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(absolute_next_page_url)
``` |
{
"source": "JonatasDPorto/QuickDrawFlutterApp",
"score": 3
} |
#### File: QuickDrawFlutterApp/api/app.py
```python
from flask import Flask, request, jsonify
from PIL import Image
from tensorflow import keras
from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten, Conv2D, MaxPool2D, LSTM, Embedding, SimpleRNN, Reshape, Lambda
import numpy as np
import cv2
from skimage.filters import threshold_otsu
from skimage.color import rgb2gray, convert_colorspace, rgb2gray, rgb2gray
from skimage import io, filters
app = Flask(__name__)
model_cnn = keras.models.load_model('./model_cnn.h5')
model_rnn = keras.models.load_model('./model_rnn.h5', custom_objects={
"Reshape": Reshape,
"Lambda": Lambda,
"Flatten": Flatten,
"SimpleRNN": SimpleRNN,
"Conv2D": Conv2D
})
images_labels = ['bee',
'coffee cup',
'guitar',
'hamburger',
'rabbit',
'truck',
'umbrella',
'crab',
'banana',
'airplane']
#flask run -h 192.168.0.118
@app.route("/classificar", methods=["POST"])
def process_image():
print('Realizando classificação...')
img = request.files['file']
img.save("img-original.jpg")
SIZE = 28
img = cv2.imread("img-original.jpg", 0)
img_gray = rgb2gray(img)
thresh = threshold_otsu(img_gray)
binary_thresh_img = img_gray > thresh
io.imsave("img-tratada.png", binary_thresh_img)
with Image.open("img-tratada.png") as im:
imagem = im.resize((SIZE,SIZE)).convert('L')
imagem = np.array(imagem)/255
result_cnn = model_cnn.predict(np.array([imagem]))[0]
result_rnn = model_rnn.predict(np.array([imagem]))[0]
r = {'cnn': result_cnn.tolist(), 'rnn': result_rnn.tolist()}
return jsonify(r)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "JonatasFontele/10-days-of-statistics",
"score": 4
} |
#### File: JonatasFontele/10-days-of-statistics/day_3_conditional_probability.py
```python
from itertools import product
from fractions import Fraction
from random import choice
# Suppose a family has 2 children, one of which is a boy(b). What is the probability that both children are boys(bb)?
def calculate_2children_prob():
genders = ["b", "g"]
cartesian_product_list = list(product(genders, repeat=2))
event_b = []
event_bb = []
for subset in cartesian_product_list:
if "b" in subset:
event_b.append(subset)
if ('b', 'b') == subset:
event_bb.append(subset)
return Fraction(len(event_bb), len(event_b))
# event_b = [subset for subset in cartesian_product_list if "b" in subset]
# event_bb = [subset for subset in event_b if ('b', 'b') == subset]
# Is slightly slower
def choose_2children(n_simulations=100000):
event_b = 0
event_bb = 0
for _ in range(n_simulations):
child1 = choice(["b", "g"])
child2 = choice(["b", "g"])
children = child1 + child2
if "b" in children:
event_b += 1
if children == "bb":
event_bb += 1
# p(b|bb) = 1
# p(bb|b) = p(b|bb) * p(bb) / p(b)
return event_bb/event_b
def main():
print(calculate_2children_prob())
print(choose_2children())
print(round(choose_2children()*100, 2), "%")
if __name__ == "__main__":
main()
``` |
{
"source": "JonatasFontele/30-Days-of-Code-Challenges",
"score": 4
} |
#### File: JonatasFontele/30-Days-of-Code-Challenges/day_19_interfaces.py
```python
class AdvancedArithmetic(object):
def divisorSum(n):
raise NotImplementedError
class Calculator(AdvancedArithmetic):
def divisorSum(self, n):
sum = 0
# O(sqrt(n)) instead of brute-force technique O(n)
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0:
# If divisors are equal, print only one
if n / i == i:
sum += i
# Otherwise print both
else:
sum += i
sum += n // i
return sum
n = int(input())
my_calculator = Calculator()
s = my_calculator.divisorSum(n)
print("I implemented: " + type(my_calculator).__bases__[0].__name__)
print(s)
```
#### File: JonatasFontele/30-Days-of-Code-Challenges/day_3_intro_to_conditional_statements.py
```python
def is_weird(n):
if n % 2 != 0:
print("Weird")
elif 2 <= n <= 5 and (n % 2 == 0):
print("Not Weird")
elif 6 <= n <= 20 and (n % 2 == 0):
print("Weird")
elif n > 20 and (n % 2 == 0):
print("Not Weird")
def main():
n = int(input())
if 1 <= n <= 100:
is_weird(n)
if __name__ == "__main__":
main()
```
#### File: JonatasFontele/30-Days-of-Code-Challenges/day_5_loops.py
```python
def first_ten_multiples(n):
return [print(f"{n} x {multiple} = {n * multiple}") for multiple in range(1, 11)]
def main():
n = int(input())
if 2 <= n <= 20:
first_ten_multiples(n)
if __name__ == "__main__":
main()
```
#### File: JonatasFontele/30-Days-of-Code-Challenges/day_8_dictionaries_and_maps.py
```python
def query(contacts):
queries = 0
name_query = input()
# Read unknown number of lines of queries (name)
while name_query != "" and queries <= 100000:
try:
found_number = contacts.get(name_query, "Not found")
if found_number != "Not found":
print(f"{name_query}={found_number}")
else:
print(found_number)
name_query = input()
except EOFError:
break
def main():
n = int(input()) # Number of entries
if 1 <= n <= 100000:
# for i in range(n):
# name_number = input().split() name number (per line)
# contacts[name_number[0]] = name_number[1] {name: number}
contacts = dict([input().split() for _ in range(n)]) # Pythonic way
query(contacts)
if __name__ == "__main__":
main()
``` |
{
"source": "JonatasFontele/Algorithms_Structured-and-OOP-Exercises",
"score": 3
} |
#### File: JonatasFontele/Algorithms_Structured-and-OOP-Exercises/coin_trial.py
```python
from random import random
def coin_trial():
heads = sum([1 for _ in range(100) if random() <= 0.5])
return heads
def simulate(n):
trials = [coin_trial() for _ in range(n)]
return sum(trials)/n
print(simulate(10))
print(simulate(100))
print(simulate(1000))
print(simulate(1000000))
```
#### File: JonatasFontele/Algorithms_Structured-and-OOP-Exercises/inverted_pyramid_numbers.py
```python
def piramide_invertida(n):
lista = []
for i in range(1, n + 1):
lista.append(i)
while len(lista) >= 1:
print(str(lista).strip('[]'))
del(lista[-1])
n = int(input())
piramide_invertida(n)
```
#### File: JonatasFontele/Algorithms_Structured-and-OOP-Exercises/multiplication_function.py
```python
def sum(a=0, b=0): # たす
return a + b
def multiply(a=1, b=1): # かける
summation = 0
if a < b:
for i in range(a):
summation = sum(summation, b)
return summation
else:
for i in range(b):
summation = sum(summation, a)
return summation
try:
a, b = input("Input two numbers in the same line:").split()
a = int(a)
b = int(b)
except ValueError:
a, b = input("You broke the line, please, try again:").split()
a = int(a)
b = int(b)
print(sum(a, b))
print(multiply(a, b))
```
#### File: JonatasFontele/Algorithms_Structured-and-OOP-Exercises/progress_bars.py
```python
from tqdm import tqdm #pip install tqdm
import time
def complicated_function():
time.sleep(2) #Simulating some complicated processing
for i in tqdm(range(100)):
complicated_function()
```
#### File: JonatasFontele/Algorithms_Structured-and-OOP-Exercises/shopping_cart_example.py
```python
from collections import Counter
def main():
prices = {"course": 97.99, "book": 54.99, "wallpaper": 4.99}
cart = Counter(course=1, book=3, wallpaper=2)
total = 0.0
for product, units in cart.items():
subtotal = units * prices[product]
price = prices[product]
total) += subtotal
print(f"{product:9}: ${price:7.2f} × {units} = ${subtotal:7.2f}")
print(total)
if __name__ == "__main__":
main()
```
#### File: JonatasFontele/Algorithms_Structured-and-OOP-Exercises/timeit_reverse.py
```python
import operator
from time import time
# Precise
from timeit import timeit
# Here we want to invert digits
number = ''.join([str(i) for i in range(100)]) # input("Input a number to invert its order:")
# Slightly better for a comparatively short string
def slicing_method(number1):
return "Printing using string: ", number1[::-1]
inicio = time()
slicing_method(number)
fim = time()
print("Duracao time slicing_method: %f" % (fim - inicio))
print("Duracao timeit slicing_method: ", timeit('slicing_method(number)', 'from __main__ import slicing_method, number',
number=100))
print()
# Elegant but slow
def math_method(number2):
inverted = 0
exponent = len(number2)
number2 = int(number2)
while number2 >= 1:
inverted += (number2 % 10) * (10 ** (exponent - 1))
exponent -= 1
number2 = number2 // 10 # the floor division // rounds the result down to the nearest whole number
return "Printing using elegant math: {}".format(inverted)
inicio2 = time()
math_method(number)
fim2 = time()
print("Duracao time math_method: ", (fim2 - inicio2))
print("Duracao timeit math_method: ", timeit('math_method(number)', 'from __main__ import math_method, number',
number=100))
print()
# Better for a comparatively large list (~10*6)
def reversed_method(number3):
return f"Printing using string: {reversed(number3)}."
inicio3 = time()
reversed_method(number)
fim3 = time()
print(f"Duracao time reversed_method: {fim3 - inicio3}")
print("Duracao timeit reversed_method: ", timeit('reversed_method(number)',
'from __main__ import reversed_method, number', number=100))
print()
# Here we want to invert a list
number_list = [str(i) for i in range(100)]
print("Duracao timeit slicing_method em lista: ", timeit('slicing_method(number_list)',
'from __main__ import slicing_method, number_list',
number=100))
print("Duracao timeit reversed_method em lista: ", timeit('reversed_method(number_list)',
'from __main__ import reversed_method, number_list',
number=100))
# Slightly better for a comparatively short list
def reverse_method(number4):
return f"Printing using string: {number4.reverse()}."
print("Duracao timeit reverse_method em lista: ", timeit('reverse_method(number_list)',
'from __main__ import reverse_method, number_list',
number=100))
print()
def add(x, y):
return x + y
def operators_add(x, y):
return operator.add(x, y)
# Strings
a = '1'
b = '2'
print(timeit('add(a, b)', 'from __main__ import add, a, b'))
# 0.16069997000158764
print(timeit('operators_add(a, b)', 'from __main__ import operators_add, a, b'))
# Integers
a2 = 1
b2 = 2
print(timeit('add(a2, b2)', 'from __main__ import add, a2, b2'))
# 0.10841095799696632
print(timeit('operators_add(a2, b2)', 'from __main__ import operators_add, a2, b2'))
``` |
{
"source": "jonatasleon/cookiecutter-qmd-flask",
"score": 2
} |
#### File: {{cookiecutter.project_slug}}/tests/test_{{cookiecutter.project_core_dir}}.py
```python
from flask import url_for
def test_index(client):
assert client.get(url_for('site.index')).status_code == 200
def test_admin(client):
assert client.get(url_for('admin.index')).status_code == 200
``` |
{
"source": "jonatasleon/killer-roms-cli",
"score": 3
} |
#### File: killer-roms-cli/kr/__init__.py
```python
import os
import click
from tabulate import tabulate
from kr.consoles import list_consoles
from .download import download, retrieve_file_url
from .search import search, verify_console_name
def validate_console(ctx, param, value):
try:
console_code = verify_console_name(value)
except KeyError:
raise click.BadParameter(f"Console {value} is not available.")
return console_code
def validate_query(ctx, param, value):
return value or ("",)
order_by_choices = click.Choice(
[
"title",
"genre",
"rating",
"downloads",
"size",
]
)
def validate_order_by(ctx, param, value):
replace_mapper = {
"title": "file_name",
"size": "file_size",
}
return replace_mapper.get(value, value)
def validate_urls(ctx, param, value):
if not value and not click.get_text_stream("stdin").isatty():
return tuple(click.get_text_stream("stdin").read().strip().split("\n")) or ("",)
else:
return value
@click.group()
def cli():
pass
@cli.command("consoles")
def _consoles():
"""List available consoles to search and download from."""
click.echo("\n".join(list_consoles()))
@cli.command("search")
@click.argument("console", callback=validate_console)
@click.argument("query", nargs=-1, type=click.UNPROCESSED, callback=validate_query)
@click.option(
"--quiet",
"-q",
type=bool,
default=False,
is_flag=True,
help="Only print link output",
)
@click.option(
"--order-by",
"-o",
type=order_by_choices,
default="downloads",
callback=validate_order_by,
help="Defines criteria order",
)
@click.option(
"--asc/--desc",
"-a/-d",
"ascending",
default=False,
help="Defines ascending or descending order",
)
@click.option(
"--page",
"-p",
type=int,
default=1,
callback=lambda ctx, param, value: value - 1,
help="Page number",
show_default=True,
)
def _search(console, query, quiet, order_by, ascending, page):
"""Search roms."""
for q in query:
result = search(console, q, order_by=order_by, asc=ascending, page=page)
if quiet:
output = "\n".join([r["link"] for r in result])
else:
output = tabulate(result, headers="keys")
click.echo(output)
@cli.command("download")
@click.argument(
"urls", nargs=-1, type=click.UNPROCESSED, required=False, callback=validate_urls
)
@click.option("--output_dir", "-d", default=os.path.abspath("."))
def _download(urls, output_dir):
"""Download roms."""
for url in urls:
file_url = retrieve_file_url(url)
click.echo(file_url)
chunks = download(file_url, output_dir=output_dir)
with click.progressbar(length=next(chunks)) as bar:
for size in chunks:
bar.update(size)
```
#### File: killer-roms-cli/kr/search.py
```python
import requests
from bs4 import BeautifulSoup
from .config import BASE_URL
from .consoles import consoles
def verify_console_name(console_name):
try:
console_code = consoles[console_name]
except KeyError:
raise KeyError(f"Console {console_name} is not available.")
return console_code
def search(console_code, query, order_by="downloads", asc=False, page=0):
order = f"{order_by}${'ASC' if asc else 'DESC'}"
response = requests.post(
f"{BASE_URL}/ajax.php?m=roms_j",
data={
"sort": order,
"page": page,
"search": query,
"rom_concole": console_code,
},
)
soup = BeautifulSoup(response.content, "html.parser")
table = soup.select("table > tbody tr")
result = [parse_row(row) for row in table]
return result
def parse_row(row):
columns = row.select("td")
parsed_row = dict(
title=columns[0].text.strip("\n"),
genre=columns[1].text.strip("\n"),
rating=columns[2].text.strip("\n"),
downloads=columns[3].text.strip("\n"),
size=columns[4].text.strip("\n"),
link=f"{BASE_URL}{columns[0].find('a')['href']}",
)
return parsed_row
``` |
{
"source": "jonatasleon/sqlalchemy-mixins",
"score": 2
} |
#### File: sqlalchemy-mixins/examples/smartquery.py
```python
from __future__ import print_function
import os
import datetime
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import Query, scoped_session, sessionmaker
from sqlalchemy_mixins import SmartQueryMixin, ReprMixin, JOINED, smart_query
def log(msg):
print('\n{}\n'.format(msg))
#################### setup ######################
Base = declarative_base()
# we also use ReprMixin which is optional
class BaseModel(Base, SmartQueryMixin, ReprMixin):
__abstract__ = True
__repr__ = ReprMixin.__repr__
pass
class User(BaseModel):
__tablename__ = 'user'
__repr_attrs__ = ['name']
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
# to smart query relationship, it should be explicitly set,
# not to be a backref
posts = sa.orm.relationship('Post')
comments = sa.orm.relationship('Comment')
# below relationship will just return query (without executing)
# this query can be customized
# see http://docs.sqlalchemy.org/en/latest/orm/collections.html#dynamic-relationship
#
# we will use this relationship for demonstrating real-life example
# of how smart_query() function works (see 3.2.2)
comments_ = sa.orm.relationship('Comment', lazy="dynamic") # this will return query
class Post(BaseModel):
__tablename__ = 'post'
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
archived = sa.Column(sa.Boolean, default=False)
# to smart query relationship, it should be explicitly set,
# not to be a backref
user = sa.orm.relationship('User')
comments = sa.orm.relationship('Comment')
@hybrid_property
def public(self):
return not self.archived
@public.expression
def public(cls):
return ~cls.archived
@hybrid_method
def is_commented_by_user(cls, user, mapper=None):
# in real apps, Comment class can be obtained from relation
# to avoid cyclic imports like so:
# Comment = cls.comments.property.argument()
mapper = mapper or cls
# from sqlalchemy import exists
# return exists().where((Comment.post_id == mapper.id) & \
# (Comment.user_id == user.id))
return mapper.comments.any(Comment.user_id == user.id)
@hybrid_method
def is_public(cls, value, mapper=None):
# in real apps, Comment class can be obtained from relation
# to avoid cyclic imports like so:
# Comment = cls.comments.property.argument()
mapper = mapper or cls
return mapper.public == value
class Comment(BaseModel):
__tablename__ = 'comment'
__repr_attrs__ = ['body']
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
post_id = sa.Column(sa.Integer, sa.ForeignKey('post.id'))
rating = sa.Column(sa.Integer)
created_at = sa.Column(sa.DateTime)
# to smart query relationship, it should be explicitly set,
# not to be a backref
user = sa.orm.relationship('User')
post = sa.orm.relationship('Post')
#################### setup ORM ######################
db_file = os.path.join(os.path.dirname(__file__), 'test.sqlite')
engine = create_engine('sqlite:///{}'.format(db_file), echo=True)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
BaseModel.set_session(session)
#################### setup some data ######################
u1 = User(name='Bill u1')
session.add(u1)
session.commit()
u2 = User(name='<NAME>')
session.add(u2)
session.commit()
u3 = User(name='<NAME>')
session.add(u3)
session.commit()
session.commit()
p11 = Post(
id=11,
body='1234567890123',
archived=True,
user=u1
)
session.add(p11)
session.commit()
p12 = Post(
id=12,
body='1234567890',
user=u1
)
session.add(p12)
session.commit()
p21 = Post(
id=21,
body='p21',
user=u2
)
session.add(p21)
session.commit()
p22 = Post(
id=22,
body='p22',
user=u2
)
session.add(p22)
session.commit()
cm11 = Comment(
id=11,
body='cm11',
user=u1,
post=p11,
rating=1,
created_at=datetime.datetime(2014, 1, 1)
)
session.add(cm11)
session.commit()
cm12 = Comment(
id=12,
body='cm12',
user=u2,
post=p12,
rating=2,
created_at=datetime.datetime(2015, 10, 20)
)
session.add(cm12)
session.commit()
cm21 = Comment(
id=21,
body='cm21',
user=u1,
post=p21,
rating=1,
created_at=datetime.datetime(2015, 11, 21)
)
session.add(cm21)
session.commit()
cm22 = Comment(
id=22,
body='cm22',
user=u3,
post=p22,
rating=3,
created_at=datetime.datetime(2016, 11, 20)
)
session.add(cm22)
session.commit()
cm_empty = Comment(
id=29,
# no body
# no user
# no post
# no rating
)
session.add(cm_empty)
session.commit()
#################### Demo ######################
# ['id', 'body', 'user_id', 'archived', # normal columns
# 'user', 'comments', # relations
# 'public', # hybrid attributes
# 'is_public', 'is_commented_by_user' # hybrid methods
# ]
log(Post.filterable_attributes)
#### 1. Filters ####
##### 1.1 filter by hybrid_property 'public' #####
# low-level filter_expr()
log(session.query(Post).filter(*Post.filter_expr(user=u1, public=True)).all())
# high-level SmartQueryMixin.where() method
log(Post.where(user=u1, public=True).all())
# you can unpack dict (in real world app you will do this)
filters = {'user': u1, 'public': True}
log(Post.where(**filters).all())
##### 1.2 filter by hybrid_method 'is_commented_by_user' #####
# low-level filter_expr()
log(session.query(Post).filter(
*Post.filter_expr(is_commented_by_user=u1)).all())
# high-level SmartQueryMixin.where() method
log(Post.where(is_commented_by_user=u1).all())
##### 1.3 operators #####
# rating == None
log(Comment.where(rating=None).all()) # cm_empty
log(Comment.where(rating__isnull=2).all()) # cm_empty
# rating == 2
# when no operator, 'exact' operator is assumed
log(Comment.where(rating=2).all()) # cm12
# assumed
log(Comment.where(rating__exact=2).all()) # cm12
# rating > 2
log(Comment.where(rating__gt=2).all()) # cm22
# rating >= 2
log(Comment.where(rating__ge=2).all()) # cm12, cm22
# rating < 2
log(Comment.where(rating__lt=2).all()) # cm11, cm21
# rating <= 2
log(Comment.where(rating__le=2).all()) # cm11, cm12, cm21
# rating in [1,3]
log(Comment.where(rating__in=[1, 3]).all()) # cm11, cm21, cm22
log(Comment.where(rating__in=(1, 3)).all()) # cm11, cm21, cm22
log(Comment.where(rating__in={1, 3}).all()) # cm11, cm21, cm22
# rating between 2 and 3
log(Comment.where(rating__between=[2, 3]).all()) # cm12, cm22
log(Comment.where(rating__between=(2, 3)).all()) # cm12, cm22
# likes
log(Comment.where(body__like=u'cm12 to p12').all()) # cm12
log(Comment.where(body__like='%cm12%').all()) # cm12
log(Comment.where(body__ilike='%CM12%').all()) # cm12
log(Comment.where(body__startswith='cm1').all()) # cm11, cm12
log(Comment.where(body__istartswith='CM1').all()) # cm11, cm12
log(Comment.where(body__endswith='to p12').all()) # cm12
log(Comment.where(body__iendswith='TO P12').all()) # cm12
# dates
# year
log(Comment.where(created_at__year=2014).all()) # cm11
log(Comment.where(created_at__year=2015).all()) # cm12, cm21
# month
log(Comment.where(created_at__month=1).all()) # cm11
log(Comment.where(created_at__month=11).all()) # cm21, cm22
# day
log(Comment.where(created_at__day=1).all()) # cm11
log(Comment.where(created_at__day=20).all()) # cm12, cm22
# whole date
log(Comment.where(created_at__year=2014, created_at__month=1,
created_at__day=1).all()) # cm11
##### 1.4 where() with auto-joined relations #####
# when have no joins, where() is a shortcut for filter_expr
log(session.query(Comment).filter(
*Comment.filter_expr(rating__gt=2, body__startswith='cm1')).all())
log(Comment.where(rating__gt=2, body__startswith='cm1').all())
# but where() can automatically join relations
# users having posts which are commented by user 2
log(User.where(posts___comments___user_id=u2.id).all())
# comments where user name starts with 'Bi'
# !! ATTENTION !!
# about Comment.post:
# although we have Post.comments relationship,
# it's important to **add relationship Comment.post** too,
# not just use backref !!!
log(Comment.where(user___name__startswith='Bi').all())
# non-public posts commented by user 1
log(Post.where(public=False, is_commented_by_user=u1).all())
#### 2. sort ####
#### 2.1 simple demo ####
##### 2.1.1 low-level order_expr()
# '-rating', 'created_at' means 'ORDER BY rating DESC, created_at ASC'
log(session.query(Comment).order_by(
*Comment.order_expr('-rating', 'created_at')).all())
##### 2.1.2 high-level sort()
log(Comment.sort('-rating', 'created_at'))
# in real world apps, you will keep attrs in list
sort_attrs = ['-rating', 'created_at']
log(Comment.sort(*sort_attrs))
##### 2.1.3 hybrid properties
log(session.query(Post).order_by(*Post.order_expr('-public')).all())
log(Post.sort('-public').all())
#### 2.2 sort() with auto-joined relations ####
# sort by name of user ASC (user relation will be auto-joined), then by
# created_at DESC
log(Comment.sort('user___name', '-created_at').all())
# get comments on public posts first, then order by post user name
# Post and User tables will be auto-joined
log(Comment.sort('-post___public', 'post___user___name').all())
#### 3. smart_query() : combination of where(), sort() and eager load ####
schema = {
'post': {
'user': JOINED
}
}
# schema can use class properties too (see EagerLoadMixin):
# schema = {
# Comment.post: {
# Post.user: JOINED
# }
# }
##### 3.1 high-level smart_query() class method #####
res = Comment.smart_query(
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2 more flexible smart_query() function #####
##### 3.2.1. The same as 3.1
query = Comment.query # could be any query you want
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2.2. Real-life example with lazy='dynamic' relationship
# let's imagine we want to display some user relations
# and flexibly filter, sort and eagerload them
# like this http://www.qopy.me/LwfSCu_ETM6At6el8wlbYA
# (no sort on screenshot, but you've git the idea)
# so we have a user
user = session.query(User).first()
# and we have initial query for his/her comments
# (see User.comments_ relationship)
query = user.comments_
# now we just smartly apply all filters, sorts and eagerload. Perfect!
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm21
##### 3.3 auto eager load in where() and sort() with auto-joined relations ####
"""
Smart_query does auto-joins for filtering/sorting,
so there's a sense to tell sqlalchemy that we alreeady joined that relation
So we test that relations are set to be joinedload
if they were used in smart_query()
"""
##### 3.3.1 where()
# comments on public posts where posted user name like ...
res = Comment.where(post___public=True, post___user___name__like='Bi%').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
##### 3.3.2 sort()
res = Comment.sort('-post___public', 'post___user___name').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
``` |
{
"source": "jonatasleon/tdd-with-python",
"score": 2
} |
#### File: tdd-with-python/functional_tests/tests.py
```python
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_table_list')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
# Edith ouviu falar sobre uma nova aplicação legal para to-do lists.
# Ela vai checar sua homepage
self.browser.get(self.live_server_url)
# Ela percebe que o titulo e o header da pagina mencionam To-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# Ela é convidada a entrar com um to-do item imediatamente
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# Ela digita "Comprar penas de pavão" em um text box (O hobby dela é
# amarrar iscas de peixes com mosca)
inputbox.send_keys('Buy peacock feathers')
# Quando ela digita enter, a página atualiza, agora pagina exibe
# "1: Comprar penas de pavão" como um item em uma lista to-do
inputbox.send_keys(Keys.ENTER)
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
self.check_for_row_in_list_table('1: Buy peacock feathers')
# Ainda tem uma caixa de texto convidando-a para adicionar outro item.
# Ela entra com "Usar penas para fazer uma mosca" (Edith é metódica)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Use peacocks to make a fly')
inputbox.send_keys(Keys.ENTER)
# A página atualiza novamente, agora exibe ambos itens na lista
self.check_for_row_in_list_table('2: Use peacocks to make a fly')
self.check_for_row_in_list_table('1: Buy peacock feathers')
# Agora um novo usuário, Francis, vem para o site
# Nós usamos um novo sessão no navegador para ter certeza que nenhuma #
# informação de Edith está vindo através de cookies etc #
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visita a home page. Não há nenhum sinal da lista da Edith
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis começa uma nova lista entrando com um novo item. Ele é menos
# interessante do que Edith...
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
# Francis recebe sua própria URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# De novo, não há nem rastro da list de Edith
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfeitos eles voltam a dormir
def test_layout_and_styling(self):
# Ela vai checar sua homepage
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# Ela nota que a caixa de texto está muito bem centralizada
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
``` |
{
"source": "jonatasoli/fastapi-design-api-example",
"score": 2
} |
#### File: order/locust/locust_order_endpoints.py
```python
from locust import HttpUser, TaskSet, task
from order.schemas.schemas_order import orderModelBase
HEADERS = {"Content-Type": "application/json"}
class orderModelTask(TaskSet):
@task
def post(self):
_data = ()
self.client.post("/orders", data=_data.dict(), headers=HEADERS)
class stressTest(HttpUser):
tasks = [orderModelTask]
host = "http://localhost:8888"
min_wait = 5000
max_wait = 10000
```
#### File: order/service/business_rules.py
```python
from enum import Enum
from fastapi import status, HTTPException
from order.adapters.orm_adapter import ordermodel
class Order:
async def create(self, data):
return await ordermodel.create(obj_in=data)
async def update(self, id, data):
_order = await ordermodel.get(id)
if not hasattr(_order, "status")\
or _order.status != OrderStatus.WAITING.value:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Current Status in payment status is not permitted"
)
_order.product_name = data.product_name
_order.total_amount = data.total_amount
return await ordermodel.update(obj_id=id, obj_in=_order)
async def cancel(self, id):
_order = await ordermodel.get(id)
if not hasattr(_order, "status")\
or _order.status != OrderStatus.WAITING.value:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Update payment status is not permitted"
)
_order.status = OrderStatus.CANCELLED.value
return await ordermodel.update(obj_id=id, obj_in=_order)
async def status(self, id):
return await ordermodel.get(id)
class Payment:
async def process(self, id):
_order = await ordermodel.get(id)
if not hasattr(_order, "status")\
or _order.status != OrderStatus.WAITING.value:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Update payment status is not permitted"
)
_order.status = OrderStatus.PAID.value
return await ordermodel.update(obj_id=id, obj_in=_order)
class Receipt:
async def delivery(self, id):
_order = await ordermodel.get(id)
if not hasattr(_order, "status")\
or _order.status != OrderStatus.READY.value:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Update payment status is not permitted"
)
_order.status = OrderStatus.DELIVERY.value
return await ordermodel.update(obj_id=id, obj_in=_order)
class OrderStatus(Enum):
WAITING="Waiting Payment"
CANCELLED="Canceled"
PAID="Paid"
READY="Ready"
DELIVERY="Delivery"
``` |
{
"source": "jonatasoli/fastapi",
"score": 3
} |
#### File: fastapi/fastapi/cli.py
```python
import os
import typer
import uvicorn
from typing import Optional
from fastapi import __cliname__, __version__
app = typer.Typer()
@app.command()
def name():
typer.echo(__cliname__)
@app.command()
def version():
typer.echo(__version__)
@app.command("run", short_help="Run a development server.")
def run_command(
app: Optional[str] = typer.Argument("main:app"),
host: str = typer.Option("0.0.0.0", help="Put specific host"),
port: int = typer.Option(8000, help="Put specific port"),
level: str = typer.Option("info", help="Put specific level"),
reload: bool = typer.Option(True, help="Reload option"),
factory: bool = typer.Option(False, help="Factory Mode"),
):
uvicorn.run(app, host=host, port=port, log_level=level, reload=reload)
@app.command("shell", short_help="Run a shell in the app context.")
def shell_command(
app: str = typer.Option("main.py", help="Put specific app file"),
):
os.system(f"python -i {app}")
def main():
app()
if __name__ == "__main__":
main()
``` |
{
"source": "jonatasoli/fastapi-order-example",
"score": 2
} |
#### File: api/adapters/publisher.py
```python
from loguru import logger
from tenacity import retry, stop_after_delay, wait_random_exponential
from config import settings
from ext.broker import create_broker_connection
@retry(
reraise=True,
stop=stop_after_delay(settings.BROKER_STOP_DELAY),
wait=wait_random_exponential(multiplier=1, max=settings.BROKER_MAX_DELAY),
)
async def publish_queue(
broker_queue,
broker_exchange,
body_queue,
exchange_type="direct"
):
try:
connection = await create_broker_connection()
channel = await connection.channel()
await channel.exchange_declare(
exchange=broker_exchange, exchange_type=exchange_type
)
_routing_key = broker_queue
response = await channel.basic_publish(
body=body_queue,
routing_key=_routing_key,
exchange=broker_exchange,
)
await connection.close()
return response
except Exception as e:
logger.error(f"Error in publisher adapter.\n{e}")
raise e
```
#### File: api/adapters/user.py
```python
from config import settings
from order.schemas.schemas_user import userBase
from ext.fetcher import fetcher
async def get_user(user_id):
data = await fetcher(
base_url=settings.BASE_URL_USER,
method="GET",
query=f"users/{user_id}"
)
return userBase(
user_id=data["id"],
firstName=data["firstName"],
lastName=data["lastName"],
customer_fullname=data["firstName"] + data["lastName"]
)
```
#### File: order/services/services_order.py
```python
from datetime import datetime
from loguru import logger
from config import settings
from order.dao import ordermodel
from order.schemas.schemas_order import (
orderMessage,
orderPayload,
orderModelBrokerMessage,
orderModelCreate,
messageBaseResponse,
)
from order.api.adapters.user import get_user
from order.api.adapters.product import get_product
from order.api.adapters.publisher import publish_queue
class OrderService():
async def add_ordermodel(self, ordermodel_data):
try:
_order_message = await self._pub_message(
await ordermodel.create(
obj_in=await self._create_model_obj(ordermodel_data)
)
)
return _order_message
except Exception as e:
logger.error(f"Error in add ordermodel {e}")
raise e
@staticmethod
async def _create_model_obj(data):
try:
_user = await get_user(user_id=data.user_id)
_product = await get_product(product_code=data.product_code)
if not _user or not _product:
raise ValueError(f"User or Product not found.\n User {_user} - Product {_product}")
return orderModelCreate(
user_id=_user.user_id,
product_code=_product.product_code,
customer_fullname=_user.customer_fullname,
product_name=_product.product_name,
total_amount=_product.price
)
except Exception as e:
logger.error(f"Error in create_model_object {e}")
raise e
@staticmethod
async def _pub_message(message):
try:
_order_message = orderMessage(
order_id=message.id,
product_code=message.product_code,
customer_fullname=message.customer_fullname,
product_name=message.product_name,
total_amount=message.total_amount,
created_at=message.created_at
)
_order_payload = orderPayload(order=_order_message)
_order_broker_message = orderModelBrokerMessage(
producer="service-order",
sent_at=datetime.now(),
type="created-order",
payload=_order_payload,
)
_output = await publish_queue(
broker_queue=settings.BROKER_QUEUE_CREATE_ORDER,
broker_exchange=settings.BROKER_EXCHANGE_ORDERS,
body_queue=_order_broker_message.json().encode("utf-8")
)
if not hasattr(_output, "index"):
raise Exception("Order not queue")
return messageBaseResponse(
queue_index=_output.index,
order_id=message.id,
user_id=message.user_id,
product_code=message.product_code,
customer_fullname=message.customer_fullname,
product_name=message.product_name,
total_amount=message.total_amount,
created_at=message.created_at,
)
except Exception as e:
logger.error(f"Error in send message to broker {e}")
raise e
```
#### File: order/tests/test_adapter_product.py
```python
import pytest
from fastapi import status, HTTPException
from order.api.adapters.product import get_product
from order.schemas.schemas_product import productBase
@pytest.mark.container
@pytest.mark.asyncio
async def test_get_product_success(mocker):
response_obj = productBase(
product_code="classic-box",
product_name="Classic Box",
price=9.99
)
response = await get_product(product_code="classic-box")
assert response.dict() == response_obj
@pytest.mark.container
@pytest.mark.asyncio
async def test_get_product_not_found(mocker):
with pytest.raises(HTTPException) as exc_info:
await get_product(product_code="error-box")
assert exc_info.value.status_code == status.HTTP_404_NOT_FOUND
# assert exc_info.value.detail == "Internal Error\n 404 Client Error: Not Found for url: http://localhost:8081/products/error-box\nFor more information check: https://httpstatuses.com/404"
@pytest.mark.container
@pytest.mark.asyncio
async def test_get_product_request_timeout(mocker):
with pytest.raises(HTTPException) as exc_info:
await get_product(product_code="family-box")
assert exc_info.value.status_code == status.HTTP_408_REQUEST_TIMEOUT
@pytest.mark.container
@pytest.mark.asyncio
async def test_get_product_internal_error(mocker):
with pytest.raises(HTTPException) as exc_info:
await get_product(product_code="veggie-box")
assert exc_info.value.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
``` |
{
"source": "jonatasoli/fastapi-simple-test",
"score": 2
} |
#### File: jonatasoli/fastapi-simple-test/conftest.py
```python
import pytest
from fastapi.testclient import TestClient
from main import app
@pytest.fixture
def client():
with TestClient(app) as client:
yield client
```
#### File: jonatasoli/fastapi-simple-test/test_client.py
```python
def test_index(client):
response = client.get("/")
assert response.json() == dict(message="Hello Qredo")
``` |
{
"source": "jonatasoli/fastapi-template-cookiecutter",
"score": 2
} |
#### File: {{cookiecutter.app_slug_snakecase}}/api/endpoints.py
```python
from fastapi import APIRouter, status
from loguru import logger
from {{cookiecutter.app_slug_snakecase}}.schemas.schemas_{{cookiecutter.app_slug_snakecase}} import {{cookiecutter.model_name}}Endpoint
from {{cookiecutter.app_slug_snakecase}}.services import services_{{cookiecutter.app_slug_snakecase}}
{{cookiecutter.app_slug_snakecase}}_router = APIRouter()
@{{cookiecutter.app_slug_snakecase}}_router.post("/add", status_code=status.HTTP_201_CREATED)
async def add_{{cookiecutter.model_slug_snakecase}}(*, data: {{cookiecutter.model_name}}Endpoint):
try:
return await services_{{cookiecutter.app_slug_snakecase}}.add_{{cookiecutter.model_slug_snakecase}}(data)
except Exception as e:
logger.error(f"Error return endpoint {e}")
raise e
@{{cookiecutter.app_slug_snakecase}}_router.put("/update/{id}", status_code=status.HTTP_200_OK)
async def update_{{cookiecutter.model_slug_snakecase}}(*, id: int, data: {{cookiecutter.model_name}}Endpoint):
try:
return await services_{{cookiecutter.app_slug_snakecase}}.update_{{cookiecutter.model_slug_snakecase}}(id, data)
except Exception as e:
logger.error(f"Error return endpoint {e}")
raise e
@{{cookiecutter.app_slug_snakecase}}_router.get("/get/{id}", status_code=status.HTTP_200_OK)
async def get_{{cookiecutter.model_slug_snakecase}}(*, id: int):
try:
return await services_{{cookiecutter.app_slug_snakecase}}.get_{{cookiecutter.model_slug_snakecase}}(id)
except Exception as e:
logger.error(f"Error return endpoint {e}")
raise e
```
#### File: {{cookiecutter.app_slug_snakecase}}/tests/test_{{cookiecutter.app_slug_snakecase}}_services.py
```python
import pytest
from unittest import mock
from {{cookiecutter.app_slug_snakecase}}.services.services_{{cookiecutter.app_slug_snakecase}} import add_{{cookiecutter.model_slug_snakecase}}
from {{cookiecutter.app_slug_snakecase}}.schemas.schemas_{{cookiecutter.app_slug_snakecase}} import {{cookiecutter.model_name}}CreateResponse, {{cookiecutter.model_name}}Endpoint, {{cookiecutter.model_name}}Create
response_obj = {{cookiecutter.model_name}}CreateResponse(id=1, name="{{cookiecutter.model_name}} 1", completed=False)
@pytest.mark.asyncio
@mock.patch("{{cookiecutter.app_slug_snakecase}}.dao.{{cookiecutter.model_slug_snakecase}}.create", return_value=response_obj)
async def test_add_{{cookiecutter.model_slug_snakecase}}(mocker):
data = {{cookiecutter.model_name}}Endpoint(name="{{cookiecutter.model_name}} 1", completed=False, current_user_id=1)
response = await add_{{cookiecutter.model_slug_snakecase}}({{cookiecutter.model_slug_snakecase}}_data=data)
assert response == response_obj
``` |
{
"source": "jonatasoli/reguleque-core",
"score": 2
} |
#### File: reguleque-core/app/test_integration_service_layer_user.py
```python
import pytest
from config import settings
from loguru import logger
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from user.unit_of_work import AbstractUnitOfWork, SqlAlchemyUnitOfWork
from user.repository import AbstractRepository, SqlAlchemyRepository
from domain import SignUp, User, Role
from user.service_layer import Auth
from user.orm import get_session
@pytest.mark.asyncio
async def test_database(apply_migrations):
dir(apply_migrations)
assert True == True
@pytest.mark.db
@pytest.mark.asyncio
async def test_create_user(apply_migrations, postgres_session):
uow = SqlAlchemyUnitOfWork(session_factory=postgres_session)
db_user = SignUp(
name="<NAME>",
email="<EMAIL>",
password="<PASSWORD>",
)
output = await Auth.signup(uow=uow, user_in=db_user)
assert output is not None
assert uow.users.get("<EMAIL>") is not None
@pytest.mark.skip
@pytest.mark.db
@pytest.mark.asyncio
async def test_check_existent_user(apply_migrations, postgres_session):
uow = SqlAlchemyUnitOfWork(session_factory=postgres_session)
# db_user = User(
# name="<NAME>",
# email="<EMAIL>",
# password="<PASSWORD>",
# )
# await Auth.signup(uow=uow, user_in=db_user)
db_user = User(
name="<NAME>",
email="<EMAIL>",
password="<PASSWORD>",
)
output = Auth.check_existent_user(uow=uow, email="<EMAIL>")
import ipdb; ipdb.set_trace()
assert uow.users.get("<EMAIL>") is not None
assert output is not None
assert db_user == output
```
#### File: app/user/repository.py
```python
import abc
from sqlalchemy import select, between
from user.orm import User, Subscribe, SubscribePlan
from user.schemas import SubscribePlanDB, SubscribeDB, UserDB
from user.adapters.db_obj_converter import obj_in_to_db_obj
class AbstractRepository(abc.ABC):
@abc.abstractmethod
def add(self, user: User):
raise NotImplementedError
@abc.abstractmethod
def get(self, reference) -> User:
raise NotImplementedError
class SqlAlchemyRepository(AbstractRepository):
def __init__(self, session):
self.session = session
async def add(self, user):
db_user = User(
name=user.name,
password=<PASSWORD>_<PASSWORD>(),
email=user.email,
)
return self.session.add(db_user)
async def get(self, email):
smtm = select(User).where(User.email==email)
_result =await self.session.execute(smtm)
return _result.scalars().first()
async def get_subscribe(self, id):
smtm = select(Subscribe).where(Subscribe.id==id)
_result =await self.session.execute(smtm)
return SubscribeDB.from_orm(_result.scalars().first())
async def get_subscribe_plan(self, id):
smtm = select(SubscribePlan).where(SubscribePlan.id==id)
_result =await self.session.execute(smtm)
return SubscribePlanDB.from_orm(_result.scalars().first())
def list(self):
return self.session.query(User).all()
``` |
{
"source": "jonatasoli/sapu",
"score": 2
} |
#### File: app/ext/database.py
```python
from tortoise import Tortoise
from dynaconf import settings
async def init():
# Here we connect to a Postgres.
# also specify the app name of "models"
# which contain models from "app.models"
await Tortoise.init(
db_url=settings.DATABASE_URL,
modules={'models': ['app.models', "aerich.models"]}
)
TORTOISE_ORM = {
"connections": {"default": settings.DATABASE_URL},
"apps": {
"models": {
"models": ["models", "aerich.models"],
"default_connection": "default",
},
},
}
``` |
{
"source": "jonatasoli/the-eye",
"score": 3
} |
#### File: events/services/unit_of_work.py
```python
from __future__ import annotations
import abc
from sqlalchemy.orm.session import Session
from src.events.adapters import repository, database
from src.config import settings
class AbstractUnitOfWork(abc.ABC):
events: repository.AbstractRepository
def __enter__(self) -> AbstractUnitOfWork:
return self
def __exit__(self, *args):
self.rollback()
def commit(self):
self._commit()
@abc.abstractmethod
def _commit(self):
raise NotImplementedError
@abc.abstractmethod
def rollback(self):
raise NotImplementedError
class SqlAlchemyUnitOfWork(AbstractUnitOfWork):
def __init__(self, session_factory=database.session_factory()):
self.session_factory = session_factory
def __enter__(self):
self.session = self.session_factory() # type: Session
self.events = repository.SqlAlchemyRepository(self.session)
return super().__enter__()
def __exit__(self, *args):
super().__exit__(*args)
self.session.close()
def _commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
``` |
{
"source": "jonatasrenan/mascavo",
"score": 3
} |
#### File: mascavo/mascavo/parallel.py
```python
def tmap(func, args, workers=16):
"""
Redefinição da função map, multithread, aguarda threads no final e retorna resultado expandido em lista.
:param func: função
:param args: lista
:param workers: número de threads máximo
:return: resultado do mapeamento de fn em l expandido em lista
"""
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(workers) as ex:
res = ex.map(func, args)
ex.shutdown(wait=True)
return list(res)
def pmap(func, args, workers=None):
"""
Redefinição da função map, multiprocessos, aguarda processos no final e retorna resultado expandido em lista.
:param func: função
:param args: lista
:param workers: número de processos máximo
:return: resultado do mapeamento de fn em l expandido em lista
"""
import multiprocessing
if not workers:
workers = multiprocessing.cpu_count()
if workers == 1:
return list(map(func, args))
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor(workers) as ex:
res = ex.map(func, args)
ex.shutdown(wait=True)
return list(res)
``` |
{
"source": "Jonatas-Soares-Alves/Exercicios-do-Curso-de-Python",
"score": 4
} |
#### File: Exercicios-do-Curso-de-Python/MUNDO 3/Aula 20.py
```python
def mensagem(msg):
print('-'*30)
print(f'{msg:^30}')
print('-'*30)
def soma(a, b):
print(f'A = {a} e B = {b}')
print(f'A soma A + B = {a+b}\n')
def contador(*num):
print(f'Recebi os valores {num} e são ao todo {len(num)} números')
def dobra(lst):
pos = 0
while pos < len(lst):
lst[pos] *= 2
pos += 1
mensagem('Olá!')
mensagem('Esse é um teste')
mensagem('Vamos ver como isso funciona')
soma(3, 2)
soma(a=5, b=7)
soma(b=10, a=4)
contador(2, 1, 7)
contador(8, 0)
contador(4, 4, 7, 6, 2)
valores = [6, 3, 9, 1, 0, 2]
print('\n', valores)
dobra(valores)
print('\n', valores)
esc = 'O ponto tem de . estar em alguma parte'
print(esc)
esc = esc.replace('.', ',')
print(esc)
help(list.insert)
``` |
{
"source": "Jonatas-Soares-Alves/PDF-Converter-Discord-Bot-in-Python",
"score": 3
} |
#### File: PDF-Converter-Discord-Bot-in-Python/Bot/PDF_Reader.py
```python
import discord
from discord.ext import commands
import requests
import os
import fitz
client = commands.Bot(command_prefix='.')
@client.command(name='up')
async def up(ctx, name=''):
await ctx.send('Converting...')
try:
attachment_url = ctx.message.attachments[0].url
url = attachment_url
r = requests.get(url, allow_redirects=True)
open(f'{name}.pdf', 'wb').write(r.content)
except:
await ctx.send("I'm sorry... Something went wrong saving the PDF file. = (")
return
try:
# Open a PDF file and generate an object
images = fitz.open(f'{name}.pdf')
for pg in range(images.pageCount):
page = images[pg]
rotate = int(0)
# Each size has a scaling factor of 2, which will give us an image that is quadrupled in resolution.
zoom_x = 2.0
zoom_y = 2.0
trans = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate)
pm = page.getPixmap(matrix=trans, alpha=False)
pm.writePNG(f'{name}{pg + 1}.png')
except:
await ctx.send("I'm sorry... Something went wrong trying to convert. = (")
return
for pg in range(images.pageCount):
await ctx.send(f'{name} Page {pg+1}')
await ctx.send(file=discord.File(f'{name}{pg+1}.png'))
os.remove(f'{name}{pg+1}.png')
images.close()
os.remove(f'{name}.pdf')
#==========================================================
# Down here is the channel ID and the Bot Token to test it.
#V=========================VVV============================V
@client.event
async def on_ready():
canalgeral = client.get_channel() # <- Put the ID of a channel you want to receive the images here.
myEmbed = discord.Embed(title='Hello! = D', description="My job is basically to convert your PDF files to PNG images and send one by one to easy the reading of it's content for the users.", color=0x8000ff)
myEmbed.add_field(name='Command:', value='.up "Optional name in quotes" [Uploaded File]', inline=False)
await canalgeral.send(embed=myEmbed)
# Run the client on the server
client.run('token here')# <- Put a Discord Bot Token here.
``` |
{
"source": "jonatas-valete/sistema-locadora-carros",
"score": 3
} |
#### File: jonatas-valete/sistema-locadora-carros/main.py
```python
from funcoes_carro import *
from funcoes import *
def load():
try:
with open('database.db', 'rb') as file:
lista_cliente = pickle.load(file)
return lista_cliente
except Exception as e:
print('Erro ao carregar')
print(e)
def menu():
print(
'################################\n'
'1 - Cadastrar Cliente\n'
'2 - Ver Clientes Cadastrados\n'
'3 - Cadastrar Carros\n'
'4 - Ver Carros\n'
'5 - Ver Histórico\n'
'6 - Deletar Cliente\n'
'0 - Desligar Sistema\n'
'################################'
)
if __name__=="__main__":
while True:
lista_cliente = load() # variavel tipo lista 'lista_cliente'
lista_carro = load_car()
menu() #mostra menu
opcao = input('Digite uma opção: ')
if opcao == '1':
nome = input('Digite o nome:')
cpf = input('Digite o cpf: ')
rg = input('Digite o Rg: ')
telefone = input('Insira o telefone: ')
endereco = input('Insira o endereço')
cad = cadastrar_cliente(nome, cpf, rg, telefone,
endereco, lista_cliente)
save(lista_cliente)
elif opcao == '2':
while True:
for i in lista_cliente:
print(i)
print(
'1 - ver dados do cliente\n'
'2 - Voltar ao menu principal'
)
escolha = input('Digite uma opção: ')
if escolha == '1':
pos = input('Escolha um cliente: ')
dados_cliente = ver_cadastro_cliente(int(pos), lista_cliente)
print(dados_cliente)
break
elif escolha == '2':
break
else:
print('Digito inválido tente um digito valido 1 ou 2')
elif opcao == '3':
carro = input('Insira um modelo: ')
portas = input('Insira a quantidade de portas: ')
ar_condicionado = input('Insira se há ar condicionado: ')
direcao = input('Insira o tipo de direção: ')
cor = input('Insira a cor do veiculo: ')
ano = input('Insira o ano: ')
placa = input('Insira a placa: ')
cadastrar_carro(carro, portas, ar_condicionado, direcao,
cor, ano, placa, lista_carro)
save_car(lista_carro)
elif opcao == '4':
while True:
ver_carros(lista_carro)
print(
'1 - Ver detalhes do carro\n'
'2 - Alugar carro\n'
'3 - Voltar ao menu principal\n'
)
opcao = input('Escolha uma opção: ')
if opcao == '1':
ver_detalhes = input('Escolha um carro'
' para ver os detalhes: ')
posicao = (int(ver_detalhes))
print(ver_detalhes_carro(lista_carro, posicao))
elif opcao == '2':
posicao = input('cliente a alugar: ')
cliente = (int(posicao))
carro_a_ser_alugado = input('Digite o carro a ser alugado')
carro = (int(carro_a_ser_alugado))
carro_alugado = lista_carro[carro]
lista_cliente[cliente].alugar(carro_alugado)
rel = relatorio(lista_cliente[cliente], carro_alugado)
geradorRelatorio(lista_cliente[cliente], rel)
save(lista_cliente)
break
elif opcao == '3':
break
else:
print('Digito inválido tende um digito valido 1 ou 2')
elif opcao == '5':
while True:
try:
ver_clientes_cadastrados(lista_cliente)
escolha = input('Escolha o Cliente: ')
cliente = lista_cliente[int(escolha)]
print(cliente.historico())
break
except:
print('Cliente inexistente. Tente novamente')
elif opcao == '6':
escolha_cliente = input('Qual cliente deletar?: ')
lista_cliente.pop(int(escolha_cliente))
elif opcao == '0':
print('Fechando Sistema...')
break
else:
print('Digito invalido\n Tente novamente.')
``` |
{
"source": "JonatDenner/discord-crypto-bot",
"score": 3
} |
#### File: JonatDenner/discord-crypto-bot/bot.py
```python
import discord
import pandas as pd
import requests
from discord.ext import commands
#gets the bot token and coinranking api key
key = open('.login', 'r').read()
api = open('.api', 'r').read()
intents = discord.Intents.default()
#set prefix and other rules, full list of attributes at https://discordpy.readthedocs.io/en/stable/ext/commands/api.html?#bot
bot = commands.Bot(command_prefix='$', intents=intents, case_insensitive='true')
#opens the coins csv as a pandas dataframe
df = pd.read_csv('coins.csv', delimiter=',')
#sends the dataframe to a 2*x value list
coins = [[row[col] for col in df.columns] for row in df.to_dict('records')]
#actions to do when the bot successfully logs in
@bot.event
async def on_ready():
print('\n------------------------------')
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------------------------------')
await bot.change_presence(activity=discord.Game(name='$help for a list of available commands.'))
#defines one bot command
@bot.command()
async def v(ctx, crypto:str):
'''Display given crypto value\nUsage: .v [crypto symbol], e.g. $v btc\nCase insensitive'''
try:
crypto = crypto.upper()
match = [s for s in coins if crypto == s[1]]
if match:
#some cryptocurrencies share the same symbol, such as SOL, requiring loops
for x in match:
r=requests.get("https://api.coinranking.com/v2/coin/" + x[0], headers={"x-access-token":api})
data = r.json()
#firs catches unconfirmed coins, second catches dead/inactive ones
if not data['data']['coin']['supply']['confirmed']:
await ctx.send(data['data']['coin']['name'] + " supply is currently unconfirmed, so there is no price to display.")
elif str(data['data']['coin']['price']) == "0":
await ctx.send(data['data']['coin']['name'] + " is currently worth zero USD.")
else:
#displays a certain amount of digits depending on coing value, to avoid having large numbers every time
if float(data['data']['coin']['price']) > 10000:
output = data['data']['coin']['name'] + ': $' + (data['data']['coin']['price'])[:8] + ' | 24 Hour Change: ' + (data['data']['coin']['change'])[:6] + '%'
elif float(data['data']['coin']['price']) > 0.00001:
output = data['data']['coin']['name'] + ': $' + (data['data']['coin']['price'])[:10] + ' | 24 Hour Change: ' + (data['data']['coin']['change'])[:6] + '%'
else:
output = data['data']['coin']['name'] + ': $' + data['data']['coin']['price'] + ' | 24 Hour Change: ' + (data['data']['coin']['change'])[:6] + '%'
await ctx.send(output)
else:
#if the symbol isn't in the csv file it most likely doesn't exist
await ctx.send("Crypto doesn't exist.")
#for other unknown errors
except Exception as err:
await ctx.send("An error occurred...")
print(err)
#start the bot
bot.run(key)
``` |
{
"source": "JonathaCnB/desafio-backend",
"score": 2
} |
#### File: desafio-backend/questions/models.py
```python
from django.db import models
class Category(models.Model):
name = models.CharField(
max_length=255,
verbose_name="Nome da categoria",
unique=True,
)
is_active = models.BooleanField(default=False)
class Meta:
verbose_name = "Categoria"
verbose_name_plural = "Categorias"
db_table = "category"
def __str__(self) -> str:
return self.name
class Question(models.Model):
question = models.CharField(
max_length=255,
verbose_name="Pergunta",
null=True,
)
category = models.ForeignKey(
"questions.Category",
verbose_name="Categoria",
on_delete=models.PROTECT,
null=True,
)
registered_by = models.ForeignKey(
"users.User",
verbose_name="<NAME>",
on_delete=models.DO_NOTHING,
)
first_answer = models.CharField(
max_length=255,
verbose_name="1ª Resposta",
null=True,
)
second_answer = models.CharField(
max_length=255,
verbose_name="2ª Resposta",
null=True,
)
third_answer = models.CharField(
max_length=255,
verbose_name="3ª Resposta",
null=True,
)
CHOICE_CORRECT_ANSWER = (
("1", "Primeira Resposta"),
("2", "Segunda Resposta"),
("3", "Terceira Resposta"),
)
correct_answer = models.CharField(
max_length=1,
verbose_name="Resposta Correta",
choices=CHOICE_CORRECT_ANSWER,
null=False,
blank=False,
)
is_active = models.BooleanField(default=False)
class Meta:
verbose_name = "Pergunta"
verbose_name_plural = "Perguntas"
db_table = "question"
def __str__(self) -> str:
return self.question
```
#### File: desafio-backend/rank/models.py
```python
from django.db import models
class Rank(models.Model):
score = models.PositiveIntegerField(
verbose_name="Pontuação",
null=True,
)
category = models.ForeignKey(
"questions.Category",
verbose_name="Categoria",
on_delete=models.PROTECT,
null=True,
)
profile = models.ForeignKey(
"users.User",
verbose_name="Usuário",
on_delete=models.DO_NOTHING,
)
class Meta:
verbose_name = "Rank"
verbose_name_plural = "Rank"
db_table = "rank"
def __str__(self) -> str:
return f"{self.profile} - {self.score}"
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.